source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
watcher.py
|
import sys
import time
import json
import elemental
import threading
import requests
from seleniumwire import webdriver
from selenium.webdriver.support.ui import Select
from subprocess import check_output
from dataclasses import dataclass
from datetime import datetime
import argparse
import gc
from logger import get_logger
logger = get_logger()
_watcher_lock = threading.Lock()
_watcher = None
def run_command(cmd):
out = check_output(cmd, shell=True)
if isinstance(out, bytes):
out = out.decode('utf8')
return out.strip()
def create_timestamp():
return datetime.now().strftime('%m-%d-%Y %H:%M:%S')
class Browser:
def __init__(self):
self.chrome_options = webdriver.ChromeOptions()
self.chrome_options.add_argument("--disable-gpu")
self.chrome_options.add_argument("--disable-software-rasterizer")
self.chrome_options.add_argument("--headless")
self.chrome_options.add_argument("--disable-dev-shm-usage")
self.chrome_options.add_argument("--window-size=1920x1080")
self.chrome_options.add_argument("--disable-setuid-sandbox")
self.chrome_options.add_argument("--no-sandbox")
self.selenium_wire_options = {
'exclude_hosts': ['google-analytics.com', 'facebook.com', 'youtube.com', 'adservice.google.com', 'insight.adsrvr.org']
}
self.exec_path = run_command('which chromedriver')
self._driver = None
self._browser = None
self._calls = 0
def should_reset(self):
if self._calls > 10:
self._reset()
self._calls = 0
self._calls += 1
def _reset(self):
self._create_driver()
self._create_browser()
def _create_driver(self):
if self._driver:
self.close()
self._driver = webdriver.Chrome(options=self.chrome_options, seleniumwire_options=self.selenium_wire_options, executable_path=self.exec_path)
def _create_browser(self):
if self._browser:
return
if not self._driver:
self._create_driver()
self._browser = elemental.Browser(self._driver)
@property
def driver(self):
if not self._driver:
self._create_driver()
return self._driver
@property
def browser(self):
if not self._browser:
self._create_browser()
return self._browser
def close(self):
self._driver.close()
self._driver.quit()
self._driver = None
self._browser = None
gc.collect()
@dataclass
class Config:
city: str = 'Houston'
state: str = 'Texas'
state_abbr: str = 'TX'
zipcode: str = '77056'
_wg_steps = [
'https://www.walgreens.com/',
'https://www.walgreens.com/findcare/vaccination/covid-19?ban=covid_scheduler_brandstory_main_March2021',
]
_avail_links = {
'cvs': 'https://www.cvs.com//vaccine/intake/store/cvd-schedule.html?icid=coronavirus-lp-vaccine-sd-statetool',
'wg': 'https://www.walgreens.com/findcare/vaccination/covid-19?ban=covid_scheduler_brandstory_main_March2021'
}
class VaccineWatcher:
def __init__(self, config, freq_secs=600, hook=None, check_walgreens=True, check_cvs=True, send_data=True, always_send=False, verbose=False):
self.config = Config(**config)
self.freq = freq_secs
self.send_data = send_data
self.always_send = always_send
self.hook = hook
self.verbose = verbose
self._last_status = {'walgreens': {'available': False, 'data': None, 'timestamp': None}, 'cvs': {'available': False, 'data': None, 'timestamp': None}}
self._check_wg = check_walgreens
self._check_cvs = check_cvs
self.api = Browser()
self.browser = self.api.browser
self.alive = True
self.dactive = False
logger.log(f'Initialized VaccineWatcher with {self.config}. Will Check every {self.freq} secs. Walgreens: {self._check_wg}. CVS: {self._check_cvs}\nCall .run() to start daemon')
def _wg_parser(self, resp):
data = json.loads(resp.body.decode('utf-8'))
self._last_status['walgreens']['data'] = data
if data.get('appointmentsAvailable') and data['appointmentsAvailable']:
msg = f'Walgreens has Available Appointments: {data["availabilityGroups"]} for Next {data["days"]} in {data["zipCode"]}, {data["stateCode"]} in {data["radius"]} mile radius'
msg += f'\nPlease Visit: {_avail_links["wg"]} to schedule.'
self._call_hook(msg)
logger.log(msg)
return True
if self.verbose:
msg = f'Result for Walgreens: {data}'
logger.log(msg)
return False
def check_wg(self):
self.browser.visit(_wg_steps[0])
time.sleep(5)
self.browser.visit(_wg_steps[1])
time.sleep(5)
self.browser.get_element(partial_link_text="Schedule new appointment").click()
time.sleep(3)
self.browser.get_input(id="inputLocation").fill(f'{self.config.city} {self.config.state} {self.config.zipcode}')
self.browser.get_button(text="Search").click()
time.sleep(1)
reqs = self.browser.selenium_webdriver.requests
for r in reqs:
if r.response:
if '/hcschedulersvc/svc/v1/immunizationLocations/availability' in r.url:
return self._wg_parser(r.response)
return None
def _cvs_parser(self, resp):
data = json.loads(resp.body.decode('utf-8'))['responsePayloadData']['data'][self.config.state_abbr]
for item in data:
if item['city'] == self.config.city.upper():
self._last_status['cvs']['data'] = item
if item['status'] == 'Available':
msg = f'CVS has Available Appointments in {item["city"]}, {item["state"]}'
msg += f'\nPlease Visit: {_avail_links["cvs"]} to schedule.'
self._call_hook(msg)
logger.log(msg)
return True
if self.verbose:
msg = f'Results for CVS: {item}'
logger.log(msg)
return False
def check_cvs(self):
self.browser.visit('https://www.cvs.com/')
time.sleep(1)
self.browser.get_element(partial_link_text="Schedule a COVID-19 vaccine").click()
self.browser.get_element(id='selectstate').get_element(value=self.config.state_abbr).select()
self.browser.get_button(text="Get started").click()
reqs = self.browser.selenium_webdriver.requests
for r in reqs:
if r.response:
if 'https://www.cvs.com/immunizations/covid-19-vaccine.vaccine-status' in r.url:
return self._cvs_parser(r.response)
return None
def run(self):
if not self.dactive:
t = threading.Thread(target=self._daemon, daemon=True)
t.start()
def last_check(self):
return self._last_status
def _call_hook(self, msg=None):
if not self.hook:
return
if not msg and not self.send_data:
return
if msg and self.send_data:
self.hook(message=msg, data=self.last_check())
elif msg:
self.hook(message=msg)
elif always_send:
self.hook(message=None, data=self.last_check())
def _daemon(self):
self.dactive = True
print(f'Vaccine Watcher Active')
while self.alive:
if self._check_cvs:
self._last_status['cvs']['available'] = self.check_cvs()
self._last_status['cvs']['timestamp'] = create_timestamp()
if self._check_wg:
self._last_status['walgreens']['available'] = self.check_wg()
self._last_status['walgreens']['timestamp'] = create_timestamp()
self._call_hook()
self.api.should_reset()
time.sleep(self.freq)
def __call__(self, check_walgreens=True, check_cvs=True):
res = {}
if check_walgreens:
res['walgreens'] = self.check_wg()
if check_cvs:
res['cvs'] = self.check_cvs()
return res
def __enter__(self):
return self
def close(self):
self.alive = False
self.api.close
msg = 'Vaccine Watcher is exiting'
self._call_hook(msg)
logger.log(msg)
def __exit__(self, *_):
self.close()
def configure_watcher(**config):
global _watcher
with _watcher_lock:
if _watcher:
return
_watcher = VaccineWatcher(**config)
def get_vaccine_watcher(**config):
configure_watcher(**config)
return _watcher
class ZapierWebhook:
def __init__(self, url):
self.url = url
self.s = requests.Session()
logger.log(f'Initialized Zapier Webhook at {self.url}')
def __call__(self, message=None, data=None):
if not message or data:
return
params = {}
if message:
params['message'] = message
if data:
params.update(data)
params['timestamp'] = create_timestamp()
r = self.s.post(self.url, json=params)
if r.status_code == 200:
logger.log(f'Successfully sent to Zapier Webhook: {params}')
else:
logger.log(f'Potential Error sending to Zapier Webhook')
def cli():
parser = argparse.ArgumentParser(description='Vaccine Watcher CLI')
parser.add_argument('--city', dest='city', type=str, default="Houston", help='Full name of your City.')
parser.add_argument('--state', dest='state', type=str, default="Texas", help='Full name of your State.')
parser.add_argument('--abbr', dest='state_abbr', type=str, default="TX", help='State Abbreviation')
parser.add_argument('--zip', dest='zipcode', type=str, default="77056", help='Your nearest Zipcode')
parser.add_argument('--freq', dest='freq', type=int, default=600, help='Seconds between refreshes')
parser.add_argument('--zapier', dest='zapierhook', type=str, default=None, help='A Zapier Webhook URL to Send Messages/Notifications')
parser.add_argument('--no-cvs', dest='cvs', default=True, action='store_false', help='Disable CVS Search.')
parser.add_argument('--no-wg', dest='wg', default=True, action='store_false', help='Disable Walgreens Search.')
parser.add_argument('--verbose', dest='verbose', default=False, action='store_true', help='Enable verbosity. Will log results regardless of status')
args = parser.parse_args()
params = {'city': args.city.capitalize(), 'state': args.state.capitalize(), 'state_abbr': args.state_abbr.upper(), 'zipcode': args.zipcode}
hook = None
if args.zapierhook:
hook = ZapierWebhook(args.zapierhook)
watcher = get_vaccine_watcher(config=params, freq_secs=args.freq, hook=hook, check_walgreens=args.wg, check_cvs=args.cvs, verbose=args.verbose)
watcher.run()
while True:
try:
time.sleep(60)
except KeyboardInterrupt:
logger.info('Exiting due to Keyboard Interrupt')
watcher.close()
sys.exit()
except Exception as e:
watcher.close()
logger.info(f'Exiting Due to Error: {str(e)}')
sys.exit()
if __name__ == '__main__':
cli()
|
rqt_rosbag_control.py
|
#!/usr/bin/env python
"""
RQT Plugin to control rosbag playback
"""
import os
import time
import threading
from python_qt_binding import loadUi # pylint: disable=import-error
from python_qt_binding.QtGui import QPixmap, QIcon # pylint: disable=no-name-in-module, import-error
from python_qt_binding.QtWidgets import QWidget # pylint: disable=no-name-in-module, import-error
from qt_gui.plugin import Plugin # pylint: disable=import-error
import rclpy
from ament_index_python.packages import get_package_share_directory
from rosgraph_msgs.msg import Clock
from rosbag2_interfaces.srv import IsPaused, PlayNext, Pause, TogglePaused
class RosbagControlPlugin(Plugin):
"""
RQT Plugin to control rosbag playback
"""
def __init__(self, context):
"""
Constructor
"""
super(RosbagControlPlugin, self).__init__(context)
self.setObjectName('Rosbag Control')
# Set UI
self._widget = QWidget()
package_share_dir = get_package_share_directory('rqt_rosbag_control')
ui_file = os.path.join(package_share_dir, 'resource', 'RosbagControl.ui')
loadUi(ui_file, self._widget)
self._widget.setObjectName('RosbagControl')
if context.serial_number() > 1:
self._widget.setWindowTitle(
self._widget.windowTitle() + (' (%d)' % context.serial_number()))
self.pause_icon = QIcon(
QPixmap(os.path.join(
package_share_dir, 'resource', 'pause.png')))
self.play_icon = QIcon(
QPixmap(os.path.join(
package_share_dir, 'resource', 'play.png')))
self._widget.pushButtonStepOnce.setIcon(
QIcon(QPixmap(os.path.join(
package_share_dir, 'resource', 'step_once.png'))))
self._widget.pushButtonPause.setIcon(self.pause_icon)
# self._widget.pushButtonPlayPause.setDisabled(True)
# self._widget.pushButtonStepOnce.setDisabled(True)
self._widget.pushButtonPlayPause.setIcon(self.play_icon)
self._widget.pushButtonPlayPause.clicked.connect(self.toggle_play_pause)
self._widget.pushButtonStepOnce.clicked.connect(self.step_once)
self._widget.pushButtonPause.clicked.connect(self.pause)
context.add_widget(self._widget)
# Set ROS services and clock subscriber
self._node = rclpy.create_node('rqt_rosbag_control_node')
self._is_paused_client = self._node.create_client(IsPaused, '/rosbag2_player/is_paused')
self._pause_client = self._node.create_client(Pause, '/rosbag2_player/pause')
self._play_next_client = self._node.create_client(PlayNext, '/rosbag2_player/play_next')
self._toggle_paused_client = self._node.create_client(TogglePaused, '/rosbag2_player/toggle_paused')
self.clock_subscriber = self._node.create_subscription(Clock, "/clock", self.clock_sub, 10)
self._running = True
self._spin_thread = threading.Thread(target=self.spin)
self._spin_thread.start()
self._update_paused_thread = threading.Thread(target=self.update_paused)
self._update_paused_thread.start()
def toggle_play_pause(self):
""" Toggle play/pause """
print("Play/Pause")
if not self._toggle_paused_client.wait_for_service(timeout_sec=5.0):
print("WARNING: toggle_paused service not available. Is rosbag player running?")
return
req = TogglePaused.Request()
future = self._toggle_paused_client.call_async(req)
def step_once(self):
""" Execute one step """
print("Play next")
if not self._play_next_client.wait_for_service(timeout_sec=5.0):
print("WARNING: play_next service not available. Is rosbag player running?")
return
req = PlayNext.Request()
future = self._play_next_client.call_async(req)
def pause(self):
""" Pause bagfile """
print("Pause")
if not self._pause_client.wait_for_service(timeout_sec=5.0):
print("WARNING: pause service not available. Is rosbag player running?")
return
req = Pause.Request()
future = self._pause_client.call_async(req)
def clock_sub(self, clock_msg):
""" /clock subscriber """
clock_str = '{s}.{n}'.format(s=clock_msg.clock.sec, n=clock_msg.clock.nanosec)
self._widget.labelTimeData.setText(clock_str)
def set_paused_callback(self, future):
print("set_paused_callback")
result = future.result()
print(F"result: {result}")
self._widget.labelPausedData.setText(str(result.paused))
# If not paused, disable play next button (can only be done while playback is paused)
self._widget.pushButtonStepOnce.setDisabled(not result.paused)
def update_paused(self):
while rclpy.ok() and self._running:
if not self._is_paused_client.wait_for_service(timeout_sec=5.0):
print("WARNING: is_paused service not available. Is rosbag player running?")
return
req = IsPaused.Request()
result = self._is_paused_client.call(req)
self._widget.labelPausedData.setText(str(result.paused))
# If not paused, disable play next button (can only be done while playback is paused)
self._widget.pushButtonStepOnce.setDisabled(not result.paused)
# Update play/Pause icon
if result.paused:
self._widget.pushButtonPlayPause.setIcon(self.play_icon)
else:
self._widget.pushButtonPlayPause.setIcon(self.pause_icon)
time.sleep(0.05)
def spin(self):
"""
Spin the node and update the state of
the playback (paused/unpaused) via service call
"""
while rclpy.ok() and self._running:
rclpy.spin_once(self._node, timeout_sec=1.0)
time.sleep(0.01)
def shutdown_plugin(self):
""" shutdown plugin """
self._running = False
self._spin_thread.join()
self._update_paused_thread.join()
self._node.destroy_node()
|
main_test_alone.py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import parl
import time
import threading
from parl.remote.master import Master
from parl.remote.worker import Worker
from parl.remote.client import disconnect
class TestImport(unittest.TestCase):
def tearDown(self):
disconnect()
def test_import_local_module(self):
from Module2 import B
port = 8442
master = Master(port=port)
th = threading.Thread(target=master.run)
th.start()
time.sleep(1)
worker = Worker('localhost:{}'.format(port), 1)
time.sleep(10)
parl.connect("localhost:8442")
obj = B()
res = obj.add_sum(10, 5)
self.assertEqual(res, 15)
worker.exit()
master.exit()
def test_import_subdir_module_0(self):
from subdir import Module
port = 8443
master = Master(port=port)
th = threading.Thread(target=master.run)
th.start()
time.sleep(1)
worker = Worker('localhost:{}'.format(port), 1)
time.sleep(10)
parl.connect(
"localhost:8443",
distributed_files=[
os.path.join('subdir', 'Module.py'),
os.path.join('subdir', '__init__.py')
])
obj = Module.A()
res = obj.add_sum(10, 5)
self.assertEqual(res, 15)
worker.exit()
master.exit()
def test_import_subdir_module_1(self):
from subdir.Module import A
port = 8444
master = Master(port=port)
th = threading.Thread(target=master.run)
th.start()
time.sleep(1)
worker = Worker('localhost:{}'.format(port), 1)
time.sleep(10)
parl.connect(
"localhost:8444",
distributed_files=[
os.path.join('subdir', 'Module.py'),
os.path.join('subdir', '__init__.py')
])
obj = A()
res = obj.add_sum(10, 5)
self.assertEqual(res, 15)
worker.exit()
master.exit()
if __name__ == '__main__':
unittest.main()
|
bucketprocessor.py
|
from threading import Lock
from threading import Thread
from threading import Condition
from framerate import FrameRate
from frameduration import FrameDuration
class BucketProcessor:
def __init__(self,stream,ipdictionary, ipselection):
print("Creating BucketProcessor for " + stream.name)
self._lock = Lock()
self._condition = Condition()
self.fps = FrameRate()
self.duration = FrameDuration()
self.stream = stream
self.name = self.stream.name
self.ipdictionary = ipdictionary
self.ipselection = ipselection
self.ip = self.ipdictionary[ipselection]
self._frame = None
self.frame = None
self.count = 0
self.isNew = False
# initialize the variable used to indicate if the thread should
# be stopped
self._stop = False
self.stopped = True
print("BucketProcessor created for " + self.name)
def start(self):
print("STARTING BucketProcessor for " + self.name)
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
print("BucketProcessor for " + self.name + " RUNNING")
# keep looping infinitely until the thread is stopped
self.stopped = False
self.fps.start()
lastIpSelection = self.ipselection
while True:
# if the thread indicator variable is set, stop the thread
if (self._stop == True):
self._stop = False
self.stopped = True
return
# otherwise, read the next frame from the stream
# grab the frame from the threaded video stream
(self._frame, count, timestamp, isNew) = self.stream.read()
self.duration.start()
self.fps.update()
if (lastIpSelection != self.ipselection):
self.ip = self.ipdictionary[self.ipselection]
lastIpSelection = self.ipselection
if (isNew == True):
# TODO: Insert processing code then forward display changes
self.ip.process(self._frame)
# Now that image processing is complete, place results
# into an outgoing buffer to be grabbed at the convenience
# of the reader
self._condition.acquire()
self._lock.acquire()
self.count = count
self.isNew = isNew
self.frame = self._frame
self.timestamp = timestamp
self._lock.release()
self._condition.notifyAll()
self._condition.release()
self.duration.update()
print("BucketProcessor for " + self.name + " STOPPING")
def updateSelection(self, ipselection):
self.ipselection = ipselection
def read(self):
# return the frame most recently processed if the frame
# is not being updated at this exact moment
self._condition.acquire()
self._condition.wait()
self._condition.release()
if (self._lock.acquire() == True):
self.outFrame = self.frame
self.outCount = self.count
self.outTimestamp = self.timestamp
self._lock.release()
return (self.outFrame, self.outCount, self.outTimestamp, True)
else:
return (self.outFrame, self.outCount, "No Time Stamp", False)
def stop(self):
# indicate that the thread should be stopped
self._stop = True
self._condition.acquire()
self._condition.notifyAll()
self._condition.release()
def isStopped(self):
return self.stopped
|
flovis.py
|
# coding=utf-8
import websocket
import socket
import ssl
import json
import time
import uuid
from threading import Thread
from helpers import log
class Flovis:
def __init__(self, host):
self.host = host
initialized = False
attempts = 0
while not initialized and attempts < 5:
initialized = self._init_websocket()
if not initialized:
time.sleep(1)
attempts += 1
def _init_websocket(self):
def on_message(ws, frame):
msg = json.loads(frame)
if 'action' in msg:
if msg['action'] == 'ping':
ws.send(json.dumps({'action': 'pong'}))
elif msg['action'] == 'response':
if msg['success'] is False:
log('warning', 'Flovis data send failed ({}): {}'.format(msg['event_id'], msg['code']))
else:
ws.send(json.dumps({'action': 'info', 'message': "LA LA LA I'M NOT LISTENING"}))
def on_close(_ws):
self._init_websocket()
try:
self.ws = websocket.WebSocketApp(self.host, on_message=on_message, on_close=on_close)
def run():
try:
self.ws.run_forever()
except websocket._exceptions.WebSocketConnectionClosedException:
log('error', 'Flovis websocket closed unexpectedly, assuming problems and nullifying ws')
except websocket._exceptions.WebSocketException as e:
if "socket is already opened" not in str(e):
raise
except (AttributeError, OSError) as e:
log('error', str(e))
finally:
try:
if self.ws and self.ws.sock:
self.ws.sock.close()
except websocket.WebSocketException:
pass
self.ws = None
flovis_t = Thread(name='flovis_websocket', target=run)
flovis_t.start()
return True
except (websocket._exceptions.WebSocketBadStatusException, socket.gaierror) as e:
log('error', e)
self.ws = None
return False
def stage(self, name, site, post_id, data=None):
event_id = str(uuid.uuid4())
msg_data = {'action': 'stage', 'name': name, 'site': site, 'post_id': post_id, 'event_id': event_id}
if data is not None:
msg_data['data'] = data
for retries in range(1, 5):
try:
if self.ws is not None:
self.ws.send(json.dumps(msg_data))
break
except (websocket.WebSocketConnectionClosedException, ssl.SSLError):
if retries == 5:
raise # Actually raise the initial error if we've exceeded number of init retries
self.ws = None
self._init_websocket()
|
test_socket.py
|
import unittest
from test import support
import errno
import io
import itertools
import socket
import select
import tempfile
import time
import traceback
import queue
import sys
import os
import array
import contextlib
from weakref import proxy
import signal
import math
import pickle
import struct
import random
import string
try:
import multiprocessing
except ImportError:
multiprocessing = False
try:
import fcntl
except ImportError:
fcntl = None
HOST = support.HOST
MSG = 'Michael Gilfix was here\u1234\r\n'.encode('utf-8') ## test unicode string and carriage return
MAIN_TIMEOUT = 60.0
try:
import _thread as thread
import threading
except ImportError:
thread = None
threading = None
try:
import _socket
except ImportError:
_socket = None
def _have_socket_can():
"""Check whether CAN sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_rds():
"""Check whether RDS sockets are supported on this host."""
try:
s = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_alg():
"""Check whether AF_ALG sockets are supported on this host."""
try:
s = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
HAVE_SOCKET_CAN = _have_socket_can()
HAVE_SOCKET_RDS = _have_socket_rds()
HAVE_SOCKET_ALG = _have_socket_alg()
# Size in bytes of the int type
SIZEOF_INT = array.array("i").itemsize
class SocketTCPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.serv)
self.serv.listen()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.port = support.bind_port(self.serv)
def tearDown(self):
self.serv.close()
self.serv = None
class ThreadSafeCleanupTestCase(unittest.TestCase):
"""Subclass of unittest.TestCase with thread-safe cleanup methods.
This subclass protects the addCleanup() and doCleanups() methods
with a recursive lock.
"""
if threading:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cleanup_lock = threading.RLock()
def addCleanup(self, *args, **kwargs):
with self._cleanup_lock:
return super().addCleanup(*args, **kwargs)
def doCleanups(self, *args, **kwargs):
with self._cleanup_lock:
return super().doCleanups(*args, **kwargs)
class SocketCANTest(unittest.TestCase):
"""To be able to run this test, a `vcan0` CAN interface can be created with
the following commands:
# modprobe vcan
# ip link add dev vcan0 type vcan
# ifconfig vcan0 up
"""
interface = 'vcan0'
bufsize = 128
"""The CAN frame structure is defined in <linux/can.h>:
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
__u8 can_dlc; /* data length code: 0 .. 8 */
__u8 data[8] __attribute__((aligned(8)));
};
"""
can_frame_fmt = "=IB3x8s"
can_frame_size = struct.calcsize(can_frame_fmt)
"""The Broadcast Management Command frame structure is defined
in <linux/can/bcm.h>:
struct bcm_msg_head {
__u32 opcode;
__u32 flags;
__u32 count;
struct timeval ival1, ival2;
canid_t can_id;
__u32 nframes;
struct can_frame frames[0];
}
`bcm_msg_head` must be 8 bytes aligned because of the `frames` member (see
`struct can_frame` definition). Must use native not standard types for packing.
"""
bcm_cmd_msg_fmt = "@3I4l2I"
bcm_cmd_msg_fmt += "x" * (struct.calcsize(bcm_cmd_msg_fmt) % 8)
def setUp(self):
self.s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
self.addCleanup(self.s.close)
try:
self.s.bind((self.interface,))
except OSError:
self.skipTest('network interface `%s` does not exist' %
self.interface)
class SocketRDSTest(unittest.TestCase):
"""To be able to run this test, the `rds` kernel module must be loaded:
# modprobe rds
"""
bufsize = 8192
def setUp(self):
self.serv = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
self.addCleanup(self.serv.close)
try:
self.port = support.bind_port(self.serv)
except OSError:
self.skipTest('unable to bind RDS socket')
class ThreadableTest:
"""Threadable Test class
The ThreadableTest class makes it easy to create a threaded
client/server pair from an existing unit test. To create a
new threaded class from an existing unit test, use multiple
inheritance:
class NewClass (OldClass, ThreadableTest):
pass
This class defines two new fixture functions with obvious
purposes for overriding:
clientSetUp ()
clientTearDown ()
Any new test functions within the class must then define
tests in pairs, where the test name is preceded with a
'_' to indicate the client portion of the test. Ex:
def testFoo(self):
# Server portion
def _testFoo(self):
# Client portion
Any exceptions raised by the clients during their tests
are caught and transferred to the main thread to alert
the testing framework.
Note, the server setup function cannot call any blocking
functions that rely on the client thread during setup,
unless serverExplicitReady() is called just before
the blocking call (such as in setting up a client/server
connection and performing the accept() in setUp().
"""
def __init__(self):
# Swap the true setup function
self.__setUp = self.setUp
self.__tearDown = self.tearDown
self.setUp = self._setUp
self.tearDown = self._tearDown
def serverExplicitReady(self):
"""This method allows the server to explicitly indicate that
it wants the client thread to proceed. This is useful if the
server is about to execute a blocking routine that is
dependent upon the client thread during its setup routine."""
self.server_ready.set()
def _setUp(self):
self.wait_threads = support.wait_threads_exit()
self.wait_threads.__enter__()
self.server_ready = threading.Event()
self.client_ready = threading.Event()
self.done = threading.Event()
self.queue = queue.Queue(1)
self.server_crashed = False
# Do some munging to start the client test.
methodname = self.id()
i = methodname.rfind('.')
methodname = methodname[i+1:]
test_method = getattr(self, '_' + methodname)
self.client_thread = thread.start_new_thread(
self.clientRun, (test_method,))
try:
self.__setUp()
except:
self.server_crashed = True
raise
finally:
self.server_ready.set()
self.client_ready.wait()
def _tearDown(self):
self.__tearDown()
self.done.wait()
self.wait_threads.__exit__(None, None, None)
if self.queue.qsize():
exc = self.queue.get()
raise exc
def clientRun(self, test_func):
self.server_ready.wait()
try:
self.clientSetUp()
except BaseException as e:
self.queue.put(e)
self.clientTearDown()
return
finally:
self.client_ready.set()
if self.server_crashed:
self.clientTearDown()
return
if not hasattr(test_func, '__call__'):
raise TypeError("test_func must be a callable function")
try:
test_func()
except BaseException as e:
self.queue.put(e)
finally:
self.clientTearDown()
def clientSetUp(self):
raise NotImplementedError("clientSetUp must be implemented.")
def clientTearDown(self):
self.done.set()
thread.exit()
class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedUDPSocketTest(SocketUDPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedCANSocketTest(SocketCANTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketCANTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
try:
self.cli.bind((self.interface,))
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedRDSSocketTest(SocketRDSTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketRDSTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
try:
# RDS sockets must be bound explicitly to send or receive data
self.cli.bind((HOST, 0))
self.cli_addr = self.cli.getsockname()
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class SocketConnectedTest(ThreadedTCPSocketTest):
"""Socket tests for client-server connection.
self.cli_conn is a client socket connected to the server. The
setUp() method guarantees that it is connected to the server.
"""
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def setUp(self):
ThreadedTCPSocketTest.setUp(self)
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
ThreadedTCPSocketTest.tearDown(self)
def clientSetUp(self):
ThreadedTCPSocketTest.clientSetUp(self)
self.cli.connect((HOST, self.port))
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
ThreadedTCPSocketTest.clientTearDown(self)
class SocketPairTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv, self.cli = socket.socketpair()
def tearDown(self):
self.serv.close()
self.serv = None
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
# The following classes are used by the sendmsg()/recvmsg() tests.
# Combining, for instance, ConnectedStreamTestMixin and TCPTestBase
# gives a drop-in replacement for SocketConnectedTest, but different
# address families can be used, and the attributes serv_addr and
# cli_addr will be set to the addresses of the endpoints.
class SocketTestBase(unittest.TestCase):
"""A base class for socket tests.
Subclasses must provide methods newSocket() to return a new socket
and bindSock(sock) to bind it to an unused address.
Creates a socket self.serv and sets self.serv_addr to its address.
"""
def setUp(self):
self.serv = self.newSocket()
self.bindServer()
def bindServer(self):
"""Bind server socket and set self.serv_addr to its address."""
self.bindSock(self.serv)
self.serv_addr = self.serv.getsockname()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketListeningTestMixin(SocketTestBase):
"""Mixin to listen on the server socket."""
def setUp(self):
super().setUp()
self.serv.listen()
class ThreadedSocketTestMixin(ThreadSafeCleanupTestCase, SocketTestBase,
ThreadableTest):
"""Mixin to add client socket and allow client/server tests.
Client socket is self.cli and its address is self.cli_addr. See
ThreadableTest for usage information.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = self.newClientSocket()
self.bindClient()
def newClientSocket(self):
"""Return a new socket for use as client."""
return self.newSocket()
def bindClient(self):
"""Bind client socket and set self.cli_addr to its address."""
self.bindSock(self.cli)
self.cli_addr = self.cli.getsockname()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ConnectedStreamTestMixin(SocketListeningTestMixin,
ThreadedSocketTestMixin):
"""Mixin to allow client/server stream tests with connected client.
Server's socket representing connection to client is self.cli_conn
and client's connection to server is self.serv_conn. (Based on
SocketConnectedTest.)
"""
def setUp(self):
super().setUp()
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
super().tearDown()
def clientSetUp(self):
super().clientSetUp()
self.cli.connect(self.serv_addr)
self.serv_conn = self.cli
def clientTearDown(self):
try:
self.serv_conn.close()
self.serv_conn = None
except AttributeError:
pass
super().clientTearDown()
class UnixSocketTestBase(SocketTestBase):
"""Base class for Unix-domain socket tests."""
# This class is used for file descriptor passing tests, so we
# create the sockets in a private directory so that other users
# can't send anything that might be problematic for a privileged
# user running the tests.
def setUp(self):
self.dir_path = tempfile.mkdtemp()
self.addCleanup(os.rmdir, self.dir_path)
super().setUp()
def bindSock(self, sock):
path = tempfile.mktemp(dir=self.dir_path)
support.bind_unix_socket(sock, path)
self.addCleanup(support.unlink, path)
class UnixStreamBase(UnixSocketTestBase):
"""Base class for Unix-domain SOCK_STREAM tests."""
def newSocket(self):
return socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
class InetTestBase(SocketTestBase):
"""Base class for IPv4 socket tests."""
host = HOST
def setUp(self):
super().setUp()
self.port = self.serv_addr[1]
def bindSock(self, sock):
support.bind_port(sock, host=self.host)
class TCPTestBase(InetTestBase):
"""Base class for TCP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
class UDPTestBase(InetTestBase):
"""Base class for UDP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
class SCTPStreamBase(InetTestBase):
"""Base class for SCTP tests in one-to-one (SOCK_STREAM) mode."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM,
socket.IPPROTO_SCTP)
class Inet6TestBase(InetTestBase):
"""Base class for IPv6 socket tests."""
host = support.HOSTv6
class UDP6TestBase(Inet6TestBase):
"""Base class for UDP-over-IPv6 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
# Test-skipping decorators for use with ThreadableTest.
def skipWithClientIf(condition, reason):
"""Skip decorated test if condition is true, add client_skip decorator.
If the decorated object is not a class, sets its attribute
"client_skip" to a decorator which will return an empty function
if the test is to be skipped, or the original function if it is
not. This can be used to avoid running the client part of a
skipped test when using ThreadableTest.
"""
def client_pass(*args, **kwargs):
pass
def skipdec(obj):
retval = unittest.skip(reason)(obj)
if not isinstance(obj, type):
retval.client_skip = lambda f: client_pass
return retval
def noskipdec(obj):
if not (isinstance(obj, type) or hasattr(obj, "client_skip")):
obj.client_skip = lambda f: f
return obj
return skipdec if condition else noskipdec
def requireAttrs(obj, *attributes):
"""Skip decorated test if obj is missing any of the given attributes.
Sets client_skip attribute as skipWithClientIf() does.
"""
missing = [name for name in attributes if not hasattr(obj, name)]
return skipWithClientIf(
missing, "don't have " + ", ".join(name for name in missing))
def requireSocket(*args):
"""Skip decorated test if a socket cannot be created with given arguments.
When an argument is given as a string, will use the value of that
attribute of the socket module, or skip the test if it doesn't
exist. Sets client_skip attribute as skipWithClientIf() does.
"""
err = None
missing = [obj for obj in args if
isinstance(obj, str) and not hasattr(socket, obj)]
if missing:
err = "don't have " + ", ".join(name for name in missing)
else:
callargs = [getattr(socket, obj) if isinstance(obj, str) else obj
for obj in args]
try:
s = socket.socket(*callargs)
except OSError as e:
# XXX: check errno?
err = str(e)
else:
s.close()
return skipWithClientIf(
err is not None,
"can't create socket({0}): {1}".format(
", ".join(str(o) for o in args), err))
#######################################################################
## Begin Tests
class GeneralModuleTests(unittest.TestCase):
def test_SocketType_is_socketobject(self):
import _socket
self.assertTrue(socket.SocketType is _socket.socket)
s = socket.socket()
self.assertIsInstance(s, socket.SocketType)
s.close()
def test_repr(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with s:
self.assertIn('fd=%i' % s.fileno(), repr(s))
self.assertIn('family=%s' % socket.AF_INET, repr(s))
self.assertIn('type=%s' % socket.SOCK_STREAM, repr(s))
self.assertIn('proto=0', repr(s))
self.assertNotIn('raddr', repr(s))
s.bind(('127.0.0.1', 0))
self.assertIn('laddr', repr(s))
self.assertIn(str(s.getsockname()), repr(s))
self.assertIn('[closed]', repr(s))
self.assertNotIn('laddr', repr(s))
@unittest.skipUnless(_socket is not None, 'need _socket module')
def test_csocket_repr(self):
s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM)
try:
expected = ('<socket object, fd=%s, family=%s, type=%s, proto=%s>'
% (s.fileno(), s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
finally:
s.close()
expected = ('<socket object, fd=-1, family=%s, type=%s, proto=%s>'
% (s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
def test_weakref(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s.close()
s = None
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
msg = "Error raising socket exception (%s)."
with self.assertRaises(OSError, msg=msg % 'OSError'):
raise OSError
with self.assertRaises(OSError, msg=msg % 'socket.herror'):
raise socket.herror
with self.assertRaises(OSError, msg=msg % 'socket.gaierror'):
raise socket.gaierror
def testSendtoErrors(self):
# Testing that sendto doesn't mask failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None)
self.assertIn('not NoneType',str(cm.exception))
# 3 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 'bar', sockname)
self.assertIn('an integer is required', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None, None)
self.assertIn('an integer is required', str(cm.exception))
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except OSError:
# Probably a similar problem as above; skip this test
self.skipTest('name lookup failure')
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def test_host_resolution(self):
for addr in [support.HOST, '10.0.0.1', '255.255.255.255']:
self.assertEqual(socket.gethostbyname(addr), addr)
# we don't test support.HOSTv6 because there's a chance it doesn't have
# a matching name entry (e.g. 'ip6-localhost')
for host in [support.HOST]:
self.assertIn(host, socket.gethostbyaddr(host)[2])
def test_host_resolution_bad_address(self):
# These are all malformed IP addresses and expected not to resolve to
# any result. But some ISPs, e.g. AWS, may successfully resolve these
# IPs.
explanation = (
"resolving an invalid IP address did not raise OSError; "
"can be caused by a broken DNS server"
)
for addr in ['0.1.1.~1', '1+.1.1.1', '::1q', '::1::2',
'1:1:1:1:1:1:1:1:1']:
with self.assertRaises(OSError):
socket.gethostbyname(addr)
with self.assertRaises(OSError, msg=explanation):
socket.gethostbyaddr(addr)
@unittest.skipUnless(hasattr(socket, 'sethostname'), "test needs socket.sethostname()")
@unittest.skipUnless(hasattr(socket, 'gethostname'), "test needs socket.gethostname()")
def test_sethostname(self):
oldhn = socket.gethostname()
try:
socket.sethostname('new')
except OSError as e:
if e.errno == errno.EPERM:
self.skipTest("test should be run as root")
else:
raise
try:
# running test as root!
self.assertEqual(socket.gethostname(), 'new')
# Should work with bytes objects too
socket.sethostname(b'bar')
self.assertEqual(socket.gethostname(), 'bar')
finally:
socket.sethostname(oldhn)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInterfaceNameIndex(self):
interfaces = socket.if_nameindex()
for index, name in interfaces:
self.assertIsInstance(index, int)
self.assertIsInstance(name, str)
# interface indices are non-zero integers
self.assertGreater(index, 0)
_index = socket.if_nametoindex(name)
self.assertIsInstance(_index, int)
self.assertEqual(index, _index)
_name = socket.if_indextoname(index)
self.assertIsInstance(_name, str)
self.assertEqual(name, _name)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInvalidInterfaceNameIndex(self):
# test nonexistent interface index/name
self.assertRaises(OSError, socket.if_indextoname, 0)
self.assertRaises(OSError, socket.if_nametoindex, '_DEADBEEF')
# test with invalid values
self.assertRaises(TypeError, socket.if_nametoindex, 0)
self.assertRaises(TypeError, socket.if_indextoname, '_DEADBEEF')
@unittest.skipUnless(hasattr(sys, 'getrefcount'),
'test needs sys.getrefcount()')
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
if sys.getrefcount(__name__) != orig:
self.fail("socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except OSError:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1<<34)
@support.cpython_only
def testNtoHErrors(self):
good_values = [ 1, 2, 3, 1, 2, 3 ]
bad_values = [ -1, -2, -3, -1, -2, -3 ]
for k in good_values:
socket.ntohl(k)
socket.ntohs(k)
socket.htonl(k)
socket.htons(k)
for k in bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htonl, k)
self.assertRaises(OverflowError, socket.htons, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (sys.platform.startswith(('freebsd', 'netbsd', 'gnukfreebsd'))
or sys.platform in ('linux', 'darwin')):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except OSError:
pass
else:
raise OSError
# Try same call with optional protocol omitted
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf if it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except OSError:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Set the default timeout to 10, and see if it propagates
socket.setdefaulttimeout(10)
self.assertEqual(socket.getdefaulttimeout(), 10)
s = socket.socket()
self.assertEqual(s.gettimeout(), 10)
s.close()
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
@unittest.skipUnless(hasattr(socket, 'inet_aton'),
'test needs socket.inet_aton()')
def testIPv4_inet_aton_fourbytes(self):
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual(b'\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual(b'\xff'*4, socket.inet_aton('255.255.255.255'))
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv4toString(self):
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual(b'\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEqual(b'\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual(b'\xff\xff\xff\xff', f('255.255.255.255'))
assertInvalid(f, '0.0.0.')
assertInvalid(f, '300.0.0.0')
assertInvalid(f, 'a.0.0.0')
assertInvalid(f, '1.2.3.4.5')
assertInvalid(f, '::1')
self.assertEqual(b'\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual(b'\xff\xff\xff\xff', g('255.255.255.255'))
assertInvalid(g, '0.0.0.')
assertInvalid(g, '300.0.0.0')
assertInvalid(g, 'a.0.0.0')
assertInvalid(g, '1.2.3.4.5')
assertInvalid(g, '::1')
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv6toString(self):
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_pton(AF_INET6, '::')
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_pton(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual(b'\x00' * 16, f('::'))
self.assertEqual(b'\x00' * 16, f('0::0'))
self.assertEqual(b'\x00\x01' + b'\x00' * 14, f('1::'))
self.assertEqual(
b'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
self.assertEqual(
b'\xad\x42\x0a\xbc' + b'\x00' * 4 + b'\x01\x27\x00\x00\x02\x54\x00\x02',
f('ad42:abc::127:0:254:2')
)
self.assertEqual(b'\x00\x12\x00\x0a' + b'\x00' * 12, f('12:a::'))
assertInvalid('0x20::')
assertInvalid(':::')
assertInvalid('::0::')
assertInvalid('1::abc::')
assertInvalid('1::abc::def')
assertInvalid('1:2:3:4:5:6:')
assertInvalid('1:2:3:4:5:6')
assertInvalid('1:2:3:4:5:6:7:8:')
assertInvalid('1:2:3:4:5:6:7:8:0')
self.assertEqual(b'\x00' * 12 + b'\xfe\x2a\x17\x40',
f('::254.42.23.64')
)
self.assertEqual(
b'\x00\x42' + b'\x00' * 8 + b'\xa2\x9b\xfe\x2a\x17\x40',
f('42::a29b:254.42.23.64')
)
self.assertEqual(
b'\x00\x42\xa8\xb9\x00\x00\x00\x02\xff\xff\xa2\x9b\xfe\x2a\x17\x40',
f('42:a8b9:0:2:ffff:a29b:254.42.23.64')
)
assertInvalid('255.254.253.252')
assertInvalid('1::260.2.3.0')
assertInvalid('1::0.be.e.0')
assertInvalid('1:2:3:4:5:6:7:1.2.3.4')
assertInvalid('::1.2.3.4:0')
assertInvalid('0.100.200.0:3:4:5:6:7:8')
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv4(self):
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual('1.0.1.0', f(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', f(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f(b'\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f(b'\x01\x02\x03\x04'))
assertInvalid(f, b'\x00' * 3)
assertInvalid(f, b'\x00' * 5)
assertInvalid(f, b'\x00' * 16)
self.assertEqual('170.85.170.85', f(bytearray(b'\xaa\x55\xaa\x55')))
self.assertEqual('1.0.1.0', g(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g(b'\xff\xff\xff\xff'))
assertInvalid(g, b'\x00' * 3)
assertInvalid(g, b'\x00' * 5)
assertInvalid(g, b'\x00' * 16)
self.assertEqual('170.85.170.85', g(bytearray(b'\xaa\x55\xaa\x55')))
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv6(self):
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_ntop(AF_INET6, b'\x00' * 16)
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_ntop(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual('::', f(b'\x00' * 16))
self.assertEqual('::1', f(b'\x00' * 15 + b'\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f(b'\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
self.assertEqual('::1', f(bytearray(b'\x00' * 15 + b'\x01')))
assertInvalid(b'\x12' * 15)
assertInvalid(b'\x12' * 17)
assertInvalid(b'\x12' * 4)
# XXX The following don't test module-level functionality...
def testSockName(self):
# Testing getsockname()
port = support.find_unused_port()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.bind(("0.0.0.0", port))
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
sock.close()
self.assertRaises(OSError, sock.send, b"spam")
def testCloseException(self):
sock = socket.socket()
socket.socket(fileno=sock.fileno()).close()
try:
sock.close()
except OSError as err:
# Winsock apparently raises ENOTSOCK
self.assertIn(err.errno, (errno.EBADF, errno.ENOTSOCK))
else:
self.fail("close() should raise EBADF/ENOTSOCK")
def testNewAttributes(self):
# testing .family, .type and .protocol
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.assertEqual(sock.family, socket.AF_INET)
if hasattr(socket, 'SOCK_CLOEXEC'):
self.assertIn(sock.type,
(socket.SOCK_STREAM | socket.SOCK_CLOEXEC,
socket.SOCK_STREAM))
else:
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
sock.close()
def test_getsockaddrarg(self):
sock = socket.socket()
self.addCleanup(sock.close)
port = support.find_unused_port()
big_port = port + 65536
neg_port = port - 65536
self.assertRaises(OverflowError, sock.bind, (HOST, big_port))
self.assertRaises(OverflowError, sock.bind, (HOST, neg_port))
# Since find_unused_port() is inherently subject to race conditions, we
# call it a couple times if necessary.
for i in itertools.count():
port = support.find_unused_port()
try:
sock.bind((HOST, port))
except OSError as e:
if e.errno != errno.EADDRINUSE or i == 5:
raise
else:
break
@unittest.skipUnless(os.name == "nt", "Windows specific")
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(hasattr(socket, 'SIO_LOOPBACK_FAST_PATH'),
'Loopback fast path support required for this test')
def test_sio_loopback_fast_path(self):
s = socket.socket()
self.addCleanup(s.close)
try:
s.ioctl(socket.SIO_LOOPBACK_FAST_PATH, True)
except OSError as exc:
WSAEOPNOTSUPP = 10045
if exc.winerror == WSAEOPNOTSUPP:
self.skipTest("SIO_LOOPBACK_FAST_PATH is defined but "
"doesn't implemented in this Windows version")
raise
self.assertRaises(TypeError, s.ioctl, socket.SIO_LOOPBACK_FAST_PATH, None)
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if support.IPV6_ENABLED:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number or None
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, 80, socket.AF_INET, socket.SOCK_STREAM)
for family, type, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
self.assertEqual(str(family), 'AddressFamily.AF_INET')
self.assertEqual(type, socket.SOCK_STREAM)
self.assertEqual(str(type), 'SocketKind.SOCK_STREAM')
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# test keyword arguments
a = socket.getaddrinfo(HOST, None)
b = socket.getaddrinfo(host=HOST, port=None)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, socket.AF_INET)
b = socket.getaddrinfo(HOST, None, family=socket.AF_INET)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
b = socket.getaddrinfo(HOST, None, type=socket.SOCK_STREAM)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
b = socket.getaddrinfo(HOST, None, proto=socket.SOL_TCP)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
b = socket.getaddrinfo(HOST, None, flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
a = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
b = socket.getaddrinfo(host=None, port=0, family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM, proto=0,
flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
# Issue #6697.
self.assertRaises(UnicodeEncodeError, socket.getaddrinfo, 'localhost', '\uD800')
# Issue 17269: test workaround for OS X platform bug segfault
if hasattr(socket, 'AI_NUMERICSERV'):
try:
# The arguments here are undefined and the call may succeed
# or fail. All we care here is that it doesn't segfault.
socket.getaddrinfo("localhost", None, 0, 0, 0,
socket.AI_NUMERICSERV)
except socket.gaierror:
pass
def test_getnameinfo(self):
# only IP addresses are allowed
self.assertRaises(OSError, socket.getnameinfo, ('mail.python.org',0), 0)
@unittest.skipUnless(support.is_resource_enabled('network'),
'network is not enabled')
def test_idna(self):
# Check for internet access before running test
# (issue #12804, issue #25138).
with support.transient_internet('python.org'):
socket.gethostbyname('python.org')
# these should all be successful
domain = 'испытание.pythontest.net'
socket.gethostbyname(domain)
socket.gethostbyname_ex(domain)
socket.getaddrinfo(domain,0,socket.AF_UNSPEC,socket.SOCK_STREAM)
# this may not work if the forward lookup chooses the IPv6 address, as that doesn't
# have a reverse entry yet
# socket.gethostbyaddr('испытание.python.org')
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not strictly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * support.SOCK_MAX_SIZE)
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(socket.timeout, c.sendall,
b"x" * support.SOCK_MAX_SIZE)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
def test_dealloc_warn(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r = repr(sock)
with self.assertWarns(ResourceWarning) as cm:
sock = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# An open socket file object gets dereferenced after the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = sock.makefile('rb')
r = repr(sock)
sock = None
support.gc_collect()
with self.assertWarns(ResourceWarning):
f = None
support.gc_collect()
def test_name_closed_socketio(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
fp = sock.makefile("rb")
fp.close()
self.assertEqual(repr(fp), "<_io.BufferedReader name=-1>")
def test_unusable_closed_socketio(self):
with socket.socket() as sock:
fp = sock.makefile("rb", buffering=0)
self.assertTrue(fp.readable())
self.assertFalse(fp.writable())
self.assertFalse(fp.seekable())
fp.close()
self.assertRaises(ValueError, fp.readable)
self.assertRaises(ValueError, fp.writable)
self.assertRaises(ValueError, fp.seekable)
def test_makefile_mode(self):
for mode in 'r', 'rb', 'rw', 'w', 'wb':
with self.subTest(mode=mode):
with socket.socket() as sock:
with sock.makefile(mode) as fp:
self.assertEqual(fp.mode, mode)
def test_makefile_invalid_mode(self):
for mode in 'rt', 'x', '+', 'a':
with self.subTest(mode=mode):
with socket.socket() as sock:
with self.assertRaisesRegex(ValueError, 'invalid mode'):
sock.makefile(mode)
def test_pickle(self):
sock = socket.socket()
with sock:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises(TypeError, pickle.dumps, sock, protocol)
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
family = pickle.loads(pickle.dumps(socket.AF_INET, protocol))
self.assertEqual(family, socket.AF_INET)
type = pickle.loads(pickle.dumps(socket.SOCK_STREAM, protocol))
self.assertEqual(type, socket.SOCK_STREAM)
def test_listen_backlog(self):
for backlog in 0, -1:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen(backlog)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen()
@support.cpython_only
def test_listen_backlog_overflow(self):
# Issue 15989
import _testcapi
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.bind((HOST, 0))
self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1)
srv.close()
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
(support.HOSTv6, 0, 0xffffffff), 0)
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
self.assertRaises(OverflowError, s.bind, (support.HOSTv6, 0, -10))
def test_str_for_enums(self):
# Make sure that the AF_* and SOCK_* constants have enum-like string
# reprs.
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
self.assertEqual(str(s.family), 'AddressFamily.AF_INET')
self.assertEqual(str(s.type), 'SocketKind.SOCK_STREAM')
@unittest.skipIf(os.name == 'nt', 'Will not work on Windows')
def test_uknown_socket_family_repr(self):
# Test that when created with a family that's not one of the known
# AF_*/SOCK_* constants, socket.family just returns the number.
#
# To do this we fool socket.socket into believing it already has an
# open fd because on this path it doesn't actually verify the family and
# type and populates the socket object.
#
# On Windows this trick won't work, so the test is skipped.
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
with socket.socket(family=42424, type=13331, fileno=fd) as s:
self.assertEqual(s.family, 42424)
self.assertEqual(s.type, 13331)
@unittest.skipUnless(hasattr(os, 'sendfile'), 'test needs os.sendfile()')
def test__sendfile_use_sendfile(self):
class File:
def __init__(self, fd):
self.fd = fd
def fileno(self):
return self.fd
with socket.socket() as sock:
fd = os.open(os.curdir, os.O_RDONLY)
os.close(fd)
with self.assertRaises(socket._GiveupOnSendfile):
sock._sendfile_use_sendfile(File(fd))
with self.assertRaises(OverflowError):
sock._sendfile_use_sendfile(File(2**1000))
with self.assertRaises(TypeError):
sock._sendfile_use_sendfile(File(None))
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class BasicCANTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_RAW
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCMConstants(self):
socket.CAN_BCM
# opcodes
socket.CAN_BCM_TX_SETUP # create (cyclic) transmission task
socket.CAN_BCM_TX_DELETE # remove (cyclic) transmission task
socket.CAN_BCM_TX_READ # read properties of (cyclic) transmission task
socket.CAN_BCM_TX_SEND # send one CAN frame
socket.CAN_BCM_RX_SETUP # create RX content filter subscription
socket.CAN_BCM_RX_DELETE # remove RX content filter subscription
socket.CAN_BCM_RX_READ # read properties of RX content filter subscription
socket.CAN_BCM_TX_STATUS # reply to TX_READ request
socket.CAN_BCM_TX_EXPIRED # notification on performed transmissions (count=0)
socket.CAN_BCM_RX_STATUS # reply to RX_READ request
socket.CAN_BCM_RX_TIMEOUT # cyclic message is absent
socket.CAN_BCM_RX_CHANGED # updated CAN frame (detected content change)
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testCreateBCMSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM) as s:
pass
def testBindAny(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.bind(('', ))
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
self.assertRaisesRegex(OSError, 'interface name too long',
s.bind, ('x' * 1024,))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_LOOPBACK"),
'socket.CAN_RAW_LOOPBACK required for this test.')
def testLoopback(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
for loopback in (0, 1):
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK,
loopback)
self.assertEqual(loopback,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_FILTER"),
'socket.CAN_RAW_FILTER required for this test.')
def testFilter(self):
can_id, can_mask = 0x200, 0x700
can_filter = struct.pack("=II", can_id, can_mask)
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, can_filter)
self.assertEqual(can_filter,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, 8))
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, bytearray(can_filter))
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
@unittest.skipUnless(thread, 'Threading required for this test.')
class CANTest(ThreadedCANSocketTest):
def __init__(self, methodName='runTest'):
ThreadedCANSocketTest.__init__(self, methodName=methodName)
@classmethod
def build_can_frame(cls, can_id, data):
"""Build a CAN frame."""
can_dlc = len(data)
data = data.ljust(8, b'\x00')
return struct.pack(cls.can_frame_fmt, can_id, can_dlc, data)
@classmethod
def dissect_can_frame(cls, frame):
"""Dissect a CAN frame."""
can_id, can_dlc, data = struct.unpack(cls.can_frame_fmt, frame)
return (can_id, can_dlc, data[:can_dlc])
def testSendFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
self.assertEqual(addr[0], self.interface)
self.assertEqual(addr[1], socket.AF_CAN)
def _testSendFrame(self):
self.cf = self.build_can_frame(0x00, b'\x01\x02\x03\x04\x05')
self.cli.send(self.cf)
def testSendMaxFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
def _testSendMaxFrame(self):
self.cf = self.build_can_frame(0x00, b'\x07' * 8)
self.cli.send(self.cf)
def testSendMultiFrames(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf1, cf)
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf2, cf)
def _testSendMultiFrames(self):
self.cf1 = self.build_can_frame(0x07, b'\x44\x33\x22\x11')
self.cli.send(self.cf1)
self.cf2 = self.build_can_frame(0x12, b'\x99\x22\x33')
self.cli.send(self.cf2)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def _testBCM(self):
cf, addr = self.cli.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
can_id, can_dlc, data = self.dissect_can_frame(cf)
self.assertEqual(self.can_id, can_id)
self.assertEqual(self.data, data)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCM(self):
bcm = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM)
self.addCleanup(bcm.close)
bcm.connect((self.interface,))
self.can_id = 0x123
self.data = bytes([0xc0, 0xff, 0xee])
self.cf = self.build_can_frame(self.can_id, self.data)
opcode = socket.CAN_BCM_TX_SEND
flags = 0
count = 0
ival1_seconds = ival1_usec = ival2_seconds = ival2_usec = 0
bcm_can_id = 0x0222
nframes = 1
assert len(self.cf) == 16
header = struct.pack(self.bcm_cmd_msg_fmt,
opcode,
flags,
count,
ival1_seconds,
ival1_usec,
ival2_seconds,
ival2_usec,
bcm_can_id,
nframes,
)
header_plus_frame = header + self.cf
bytes_sent = bcm.send(header_plus_frame)
self.assertEqual(bytes_sent, len(header_plus_frame))
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class BasicRDSTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_RDS
socket.PF_RDS
def testCreateSocket(self):
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
pass
def testSocketBufferSize(self):
bufsize = 16384
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, bufsize)
s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, bufsize)
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
@unittest.skipUnless(thread, 'Threading required for this test.')
class RDSTest(ThreadedRDSSocketTest):
def __init__(self, methodName='runTest'):
ThreadedRDSSocketTest.__init__(self, methodName=methodName)
def setUp(self):
super().setUp()
self.evt = threading.Event()
def testSendAndRecv(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
self.assertEqual(self.cli_addr, addr)
def _testSendAndRecv(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testPeek(self):
data, addr = self.serv.recvfrom(self.bufsize, socket.MSG_PEEK)
self.assertEqual(self.data, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testPeek(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
@requireAttrs(socket.socket, 'recvmsg')
def testSendAndRecvMsg(self):
data, ancdata, msg_flags, addr = self.serv.recvmsg(self.bufsize)
self.assertEqual(self.data, data)
@requireAttrs(socket.socket, 'sendmsg')
def _testSendAndRecvMsg(self):
self.data = b'hello ' * 10
self.cli.sendmsg([self.data], (), 0, (HOST, self.port))
def testSendAndRecvMulti(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data1, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data2, data)
def _testSendAndRecvMulti(self):
self.data1 = b'bacon'
self.cli.sendto(self.data1, 0, (HOST, self.port))
self.data2 = b'egg'
self.cli.sendto(self.data2, 0, (HOST, self.port))
def testSelect(self):
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testSelect(self):
self.data = b'select'
self.cli.sendto(self.data, 0, (HOST, self.port))
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicTCPTest(SocketConnectedTest):
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecv(self):
# Testing large receive over TCP
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.serv_conn.send(MSG)
def testOverFlowRecv(self):
# Testing receive in chunks over TCP
seg1 = self.cli_conn.recv(len(MSG) - 3)
seg2 = self.cli_conn.recv(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecv(self):
self.serv_conn.send(MSG)
def testRecvFrom(self):
# Testing large recvfrom() over TCP
msg, addr = self.cli_conn.recvfrom(1024)
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.serv_conn.send(MSG)
def testOverFlowRecvFrom(self):
# Testing recvfrom() in chunks over TCP
seg1, addr = self.cli_conn.recvfrom(len(MSG)-3)
seg2, addr = self.cli_conn.recvfrom(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecvFrom(self):
self.serv_conn.send(MSG)
def testSendAll(self):
# Testing sendall() with a 2048 byte string over TCP
msg = b''
while 1:
read = self.cli_conn.recv(1024)
if not read:
break
msg += read
self.assertEqual(msg, b'f' * 2048)
def _testSendAll(self):
big_chunk = b'f' * 2048
self.serv_conn.sendall(big_chunk)
def testFromFd(self):
# Testing fromfd()
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
self.assertIsInstance(sock, socket.socket)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testFromFd(self):
self.serv_conn.send(MSG)
def testDup(self):
# Testing dup()
sock = self.cli_conn.dup()
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDup(self):
self.serv_conn.send(MSG)
def testShutdown(self):
# Testing shutdown()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
# wait for _testShutdown to finish: on OS X, when the server
# closes the connection the client also becomes disconnected,
# and the client's shutdown call will fail. (Issue #4397.)
self.done.wait()
def _testShutdown(self):
self.serv_conn.send(MSG)
self.serv_conn.shutdown(2)
testShutdown_overflow = support.cpython_only(testShutdown)
@support.cpython_only
def _testShutdown_overflow(self):
import _testcapi
self.serv_conn.send(MSG)
# Issue 15989
self.assertRaises(OverflowError, self.serv_conn.shutdown,
_testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, self.serv_conn.shutdown,
2 + (_testcapi.UINT_MAX + 1))
self.serv_conn.shutdown(2)
def testDetach(self):
# Testing detach()
fileno = self.cli_conn.fileno()
f = self.cli_conn.detach()
self.assertEqual(f, fileno)
# cli_conn cannot be used anymore...
self.assertTrue(self.cli_conn._closed)
self.assertRaises(OSError, self.cli_conn.recv, 1024)
self.cli_conn.close()
# ...but we can create another socket using the (still open)
# file descriptor
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=f)
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDetach(self):
self.serv_conn.send(MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicUDPTest(ThreadedUDPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPSocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDP
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDP
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
# Tests for the sendmsg()/recvmsg() interface. Where possible, the
# same test code is used with different families and types of socket
# (e.g. stream, datagram), and tests using recvmsg() are repeated
# using recvmsg_into().
#
# The generic test classes such as SendmsgTests and
# RecvmsgGenericTests inherit from SendrecvmsgBase and expect to be
# supplied with sockets cli_sock and serv_sock representing the
# client's and the server's end of the connection respectively, and
# attributes cli_addr and serv_addr holding their (numeric where
# appropriate) addresses.
#
# The final concrete test classes combine these with subclasses of
# SocketTestBase which set up client and server sockets of a specific
# type, and with subclasses of SendrecvmsgBase such as
# SendrecvmsgDgramBase and SendrecvmsgConnectedBase which map these
# sockets to cli_sock and serv_sock and override the methods and
# attributes of SendrecvmsgBase to fill in destination addresses if
# needed when sending, check for specific flags in msg_flags, etc.
#
# RecvmsgIntoMixin provides a version of doRecvmsg() implemented using
# recvmsg_into().
# XXX: like the other datagram (UDP) tests in this module, the code
# here assumes that datagram delivery on the local machine will be
# reliable.
class SendrecvmsgBase(ThreadSafeCleanupTestCase):
# Base class for sendmsg()/recvmsg() tests.
# Time in seconds to wait before considering a test failed, or
# None for no timeout. Not all tests actually set a timeout.
fail_timeout = 3.0
def setUp(self):
self.misc_event = threading.Event()
super().setUp()
def sendToServer(self, msg):
# Send msg to the server.
return self.cli_sock.send(msg)
# Tuple of alternative default arguments for sendmsg() when called
# via sendmsgToServer() (e.g. to include a destination address).
sendmsg_to_server_defaults = ()
def sendmsgToServer(self, *args):
# Call sendmsg() on self.cli_sock with the given arguments,
# filling in any arguments which are not supplied with the
# corresponding items of self.sendmsg_to_server_defaults, if
# any.
return self.cli_sock.sendmsg(
*(args + self.sendmsg_to_server_defaults[len(args):]))
def doRecvmsg(self, sock, bufsize, *args):
# Call recvmsg() on sock with given arguments and return its
# result. Should be used for tests which can use either
# recvmsg() or recvmsg_into() - RecvmsgIntoMixin overrides
# this method with one which emulates it using recvmsg_into(),
# thus allowing the same test to be used for both methods.
result = sock.recvmsg(bufsize, *args)
self.registerRecvmsgResult(result)
return result
def registerRecvmsgResult(self, result):
# Called by doRecvmsg() with the return value of recvmsg() or
# recvmsg_into(). Can be overridden to arrange cleanup based
# on the returned ancillary data, for instance.
pass
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer.
self.assertEqual(addr1, addr2)
# Flags that are normally unset in msg_flags
msg_flags_common_unset = 0
for name in ("MSG_CTRUNC", "MSG_OOB"):
msg_flags_common_unset |= getattr(socket, name, 0)
# Flags that are normally set
msg_flags_common_set = 0
# Flags set when a complete record has been received (e.g. MSG_EOR
# for SCTP)
msg_flags_eor_indicator = 0
# Flags set when a complete record has not been received
# (e.g. MSG_TRUNC for datagram sockets)
msg_flags_non_eor_indicator = 0
def checkFlags(self, flags, eor=None, checkset=0, checkunset=0, ignore=0):
# Method to check the value of msg_flags returned by recvmsg[_into]().
#
# Checks that all bits in msg_flags_common_set attribute are
# set in "flags" and all bits in msg_flags_common_unset are
# unset.
#
# The "eor" argument specifies whether the flags should
# indicate that a full record (or datagram) has been received.
# If "eor" is None, no checks are done; otherwise, checks
# that:
#
# * if "eor" is true, all bits in msg_flags_eor_indicator are
# set and all bits in msg_flags_non_eor_indicator are unset
#
# * if "eor" is false, all bits in msg_flags_non_eor_indicator
# are set and all bits in msg_flags_eor_indicator are unset
#
# If "checkset" and/or "checkunset" are supplied, they require
# the given bits to be set or unset respectively, overriding
# what the attributes require for those bits.
#
# If any bits are set in "ignore", they will not be checked,
# regardless of the other inputs.
#
# Will raise Exception if the inputs require a bit to be both
# set and unset, and it is not ignored.
defaultset = self.msg_flags_common_set
defaultunset = self.msg_flags_common_unset
if eor:
defaultset |= self.msg_flags_eor_indicator
defaultunset |= self.msg_flags_non_eor_indicator
elif eor is not None:
defaultset |= self.msg_flags_non_eor_indicator
defaultunset |= self.msg_flags_eor_indicator
# Function arguments override defaults
defaultset &= ~checkunset
defaultunset &= ~checkset
# Merge arguments with remaining defaults, and check for conflicts
checkset |= defaultset
checkunset |= defaultunset
inboth = checkset & checkunset & ~ignore
if inboth:
raise Exception("contradictory set, unset requirements for flags "
"{0:#x}".format(inboth))
# Compare with given msg_flags value
mask = (checkset | checkunset) & ~ignore
self.assertEqual(flags & mask, checkset & mask)
class RecvmsgIntoMixin(SendrecvmsgBase):
# Mixin to implement doRecvmsg() using recvmsg_into().
def doRecvmsg(self, sock, bufsize, *args):
buf = bytearray(bufsize)
result = sock.recvmsg_into([buf], *args)
self.registerRecvmsgResult(result)
self.assertGreaterEqual(result[0], 0)
self.assertLessEqual(result[0], bufsize)
return (bytes(buf[:result[0]]),) + result[1:]
class SendrecvmsgDgramFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for datagram sockets.
@property
def msg_flags_non_eor_indicator(self):
return super().msg_flags_non_eor_indicator | socket.MSG_TRUNC
class SendrecvmsgSCTPFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for SCTP sockets.
@property
def msg_flags_eor_indicator(self):
return super().msg_flags_eor_indicator | socket.MSG_EOR
class SendrecvmsgConnectionlessBase(SendrecvmsgBase):
# Base class for tests on connectionless-mode sockets. Users must
# supply sockets on attributes cli and serv to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.serv
@property
def cli_sock(self):
return self.cli
@property
def sendmsg_to_server_defaults(self):
return ([], [], 0, self.serv_addr)
def sendToServer(self, msg):
return self.cli_sock.sendto(msg, self.serv_addr)
class SendrecvmsgConnectedBase(SendrecvmsgBase):
# Base class for tests on connected sockets. Users must supply
# sockets on attributes serv_conn and cli_conn (representing the
# connections *to* the server and the client), to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.cli_conn
@property
def cli_sock(self):
return self.serv_conn
def checkRecvmsgAddress(self, addr1, addr2):
# Address is currently "unspecified" for a connected socket,
# so we don't examine it
pass
class SendrecvmsgServerTimeoutBase(SendrecvmsgBase):
# Base class to set a timeout on server's socket.
def setUp(self):
super().setUp()
self.serv_sock.settimeout(self.fail_timeout)
class SendmsgTests(SendrecvmsgServerTimeoutBase):
# Tests for sendmsg() which can use any socket type and do not
# involve recvmsg() or recvmsg_into().
def testSendmsg(self):
# Send a simple message with sendmsg().
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG]), len(MSG))
def testSendmsgDataGenerator(self):
# Send from buffer obtained from a generator (not a sequence).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgDataGenerator(self):
self.assertEqual(self.sendmsgToServer((o for o in [MSG])),
len(MSG))
def testSendmsgAncillaryGenerator(self):
# Gather (empty) ancillary data from a generator.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgAncillaryGenerator(self):
self.assertEqual(self.sendmsgToServer([MSG], (o for o in [])),
len(MSG))
def testSendmsgArray(self):
# Send data from an array instead of the usual bytes object.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgArray(self):
self.assertEqual(self.sendmsgToServer([array.array("B", MSG)]),
len(MSG))
def testSendmsgGather(self):
# Send message data from more than one buffer (gather write).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgGather(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
def testSendmsgBadArgs(self):
# Check that sendmsg() rejects invalid arguments.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadArgs(self):
self.assertRaises(TypeError, self.cli_sock.sendmsg)
self.assertRaises(TypeError, self.sendmsgToServer,
b"not in an iterable")
self.assertRaises(TypeError, self.sendmsgToServer,
object())
self.assertRaises(TypeError, self.sendmsgToServer,
[object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG, object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], 0, object())
self.sendToServer(b"done")
def testSendmsgBadCmsg(self):
# Check that invalid ancillary data items are rejected.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(object(), 0, b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, object(), b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, object())])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0)])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b"data", 42)])
self.sendToServer(b"done")
@requireAttrs(socket, "CMSG_SPACE")
def testSendmsgBadMultiCmsg(self):
# Check that invalid ancillary data items are rejected when
# more than one item is present.
self.assertEqual(self.serv_sock.recv(1000), b"done")
@testSendmsgBadMultiCmsg.client_skip
def _testSendmsgBadMultiCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [0, 0, b""])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b""), object()])
self.sendToServer(b"done")
def testSendmsgExcessCmsgReject(self):
# Check that sendmsg() rejects excess ancillary data items
# when the number that can be sent is limited.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgExcessCmsgReject(self):
if not hasattr(socket, "CMSG_SPACE"):
# Can only send one item
with self.assertRaises(OSError) as cm:
self.sendmsgToServer([MSG], [(0, 0, b""), (0, 0, b"")])
self.assertIsNone(cm.exception.errno)
self.sendToServer(b"done")
def testSendmsgAfterClose(self):
# Check that sendmsg() fails on a closed socket.
pass
def _testSendmsgAfterClose(self):
self.cli_sock.close()
self.assertRaises(OSError, self.sendmsgToServer, [MSG])
class SendmsgStreamTests(SendmsgTests):
# Tests for sendmsg() which require a stream socket and do not
# involve recvmsg() or recvmsg_into().
def testSendmsgExplicitNoneAddr(self):
# Check that peer address can be specified as None.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgExplicitNoneAddr(self):
self.assertEqual(self.sendmsgToServer([MSG], [], 0, None), len(MSG))
def testSendmsgTimeout(self):
# Check that timeout works with sendmsg().
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
def _testSendmsgTimeout(self):
try:
self.cli_sock.settimeout(0.03)
try:
while True:
self.sendmsgToServer([b"a"*512])
except socket.timeout:
pass
except OSError as exc:
if exc.errno != errno.ENOMEM:
raise
# bpo-33937 the test randomly fails on Travis CI with
# "OSError: [Errno 12] Cannot allocate memory"
else:
self.fail("socket.timeout not raised")
finally:
self.misc_event.set()
# XXX: would be nice to have more tests for sendmsg flags argument.
# Linux supports MSG_DONTWAIT when sending, but in general, it
# only works when receiving. Could add other platforms if they
# support it too.
@skipWithClientIf(sys.platform not in {"linux"},
"MSG_DONTWAIT not known to work on this platform when "
"sending")
def testSendmsgDontWait(self):
# Check that MSG_DONTWAIT in flags causes non-blocking behaviour.
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@testSendmsgDontWait.client_skip
def _testSendmsgDontWait(self):
try:
with self.assertRaises(OSError) as cm:
while True:
self.sendmsgToServer([b"a"*512], [], socket.MSG_DONTWAIT)
# bpo-33937: catch also ENOMEM, the test randomly fails on Travis CI
# with "OSError: [Errno 12] Cannot allocate memory"
self.assertIn(cm.exception.errno,
(errno.EAGAIN, errno.EWOULDBLOCK, errno.ENOMEM))
finally:
self.misc_event.set()
class SendmsgConnectionlessTests(SendmsgTests):
# Tests for sendmsg() which require a connectionless-mode
# (e.g. datagram) socket, and do not involve recvmsg() or
# recvmsg_into().
def testSendmsgNoDestAddr(self):
# Check that sendmsg() fails when no destination address is
# given for unconnected socket.
pass
def _testSendmsgNoDestAddr(self):
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG])
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG], [], 0, None)
class RecvmsgGenericTests(SendrecvmsgBase):
# Tests for recvmsg() which can also be emulated using
# recvmsg_into(), and can use any socket type.
def testRecvmsg(self):
# Receive a simple message with recvmsg[_into]().
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsg(self):
self.sendToServer(MSG)
def testRecvmsgExplicitDefaults(self):
# Test recvmsg[_into]() with default arguments provided explicitly.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgExplicitDefaults(self):
self.sendToServer(MSG)
def testRecvmsgShorter(self):
# Receive a message smaller than buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) + 42)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShorter(self):
self.sendToServer(MSG)
# FreeBSD < 8 doesn't always set the MSG_TRUNC flag when a truncated
# datagram is received (issue #13001).
@support.requires_freebsd_version(8)
def testRecvmsgTrunc(self):
# Receive part of message, check for truncation indicators.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
@support.requires_freebsd_version(8)
def _testRecvmsgTrunc(self):
self.sendToServer(MSG)
def testRecvmsgShortAncillaryBuf(self):
# Test ancillary data buffer too small to hold any ancillary data.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShortAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgLongAncillaryBuf(self):
# Test large ancillary data buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgLongAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgAfterClose(self):
# Check that recvmsg[_into]() fails on a closed socket.
self.serv_sock.close()
self.assertRaises(OSError, self.doRecvmsg, self.serv_sock, 1024)
def _testRecvmsgAfterClose(self):
pass
def testRecvmsgTimeout(self):
# Check that timeout works.
try:
self.serv_sock.settimeout(0.03)
self.assertRaises(socket.timeout,
self.doRecvmsg, self.serv_sock, len(MSG))
finally:
self.misc_event.set()
def _testRecvmsgTimeout(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@requireAttrs(socket, "MSG_PEEK")
def testRecvmsgPeek(self):
# Check that MSG_PEEK in flags enables examination of pending
# data without consuming it.
# Receive part of data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3, 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
# Ignoring MSG_TRUNC here (so this test is the same for stream
# and datagram sockets). Some wording in POSIX seems to
# suggest that it needn't be set when peeking, but that may
# just be a slip.
self.checkFlags(flags, eor=False,
ignore=getattr(socket, "MSG_TRUNC", 0))
# Receive all data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
# Check that the same data can still be received normally.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgPeek.client_skip
def _testRecvmsgPeek(self):
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
def testRecvmsgFromSendmsg(self):
# Test receiving with recvmsg[_into]() when message is sent
# using sendmsg().
self.serv_sock.settimeout(self.fail_timeout)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgFromSendmsg.client_skip
def _testRecvmsgFromSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
class RecvmsgGenericStreamTests(RecvmsgGenericTests):
# Tests which require a stream socket and can use either recvmsg()
# or recvmsg_into().
def testRecvmsgEOF(self):
# Receive end-of-stream indicator (b"", peer socket closed).
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.assertEqual(msg, b"")
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=None) # Might not have end-of-record marker
def _testRecvmsgEOF(self):
self.cli_sock.close()
def testRecvmsgOverflow(self):
# Receive a message in more than one chunk.
seg1, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
seg2, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testRecvmsgOverflow(self):
self.sendToServer(MSG)
class RecvmsgTests(RecvmsgGenericTests):
# Tests for recvmsg() which can use any socket type.
def testRecvmsgBadArgs(self):
# Check that recvmsg() rejects invalid arguments.
self.assertRaises(TypeError, self.serv_sock.recvmsg)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
-1, 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
len(MSG), -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
[bytearray(10)], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
object(), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), 0, object())
msg, ancdata, flags, addr = self.serv_sock.recvmsg(len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgBadArgs(self):
self.sendToServer(MSG)
class RecvmsgIntoTests(RecvmsgIntoMixin, RecvmsgGenericTests):
# Tests for recvmsg_into() which can use any socket type.
def testRecvmsgIntoBadArgs(self):
# Check that recvmsg_into() rejects invalid arguments.
buf = bytearray(len(MSG))
self.assertRaises(TypeError, self.serv_sock.recvmsg_into)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
len(MSG), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
buf, 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[object()], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[b"I'm not writable"], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf, object()], 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg_into,
[buf], -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], 0, object())
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf], 0, 0)
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoBadArgs(self):
self.sendToServer(MSG)
def testRecvmsgIntoGenerator(self):
# Receive into buffer obtained from a generator (not a sequence).
buf = bytearray(len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
(o for o in [buf]))
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoGenerator(self):
self.sendToServer(MSG)
def testRecvmsgIntoArray(self):
# Receive into an array rather than the usual bytearray.
buf = array.array("B", [0] * len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf])
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf.tobytes(), MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoArray(self):
self.sendToServer(MSG)
def testRecvmsgIntoScatter(self):
# Receive into multiple buffers (scatter write).
b1 = bytearray(b"----")
b2 = bytearray(b"0123456789")
b3 = bytearray(b"--------------")
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
[b1, memoryview(b2)[2:9], b3])
self.assertEqual(nbytes, len(b"Mary had a little lamb"))
self.assertEqual(b1, bytearray(b"Mary"))
self.assertEqual(b2, bytearray(b"01 had a 9"))
self.assertEqual(b3, bytearray(b"little lamb---"))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoScatter(self):
self.sendToServer(b"Mary had a little lamb")
class CmsgMacroTests(unittest.TestCase):
# Test the functions CMSG_LEN() and CMSG_SPACE(). Tests
# assumptions used by sendmsg() and recvmsg[_into](), which share
# code with these functions.
# Match the definition in socketmodule.c
try:
import _testcapi
except ImportError:
socklen_t_limit = 0x7fffffff
else:
socklen_t_limit = min(0x7fffffff, _testcapi.INT_MAX)
@requireAttrs(socket, "CMSG_LEN")
def testCMSG_LEN(self):
# Test CMSG_LEN() with various valid and invalid values,
# checking the assumptions used by recvmsg() and sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_LEN(0) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(socket.CMSG_LEN(0), array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_LEN(n)
# This is how recvmsg() calculates the data size
self.assertEqual(ret - socket.CMSG_LEN(0), n)
self.assertLessEqual(ret, self.socklen_t_limit)
self.assertRaises(OverflowError, socket.CMSG_LEN, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_LEN, toobig)
self.assertRaises(OverflowError, socket.CMSG_LEN, sys.maxsize)
@requireAttrs(socket, "CMSG_SPACE")
def testCMSG_SPACE(self):
# Test CMSG_SPACE() with various valid and invalid values,
# checking the assumptions used by sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_SPACE(1) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
last = socket.CMSG_SPACE(0)
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(last, array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_SPACE(n)
self.assertGreaterEqual(ret, last)
self.assertGreaterEqual(ret, socket.CMSG_LEN(n))
self.assertGreaterEqual(ret, n + socket.CMSG_LEN(0))
self.assertLessEqual(ret, self.socklen_t_limit)
last = ret
self.assertRaises(OverflowError, socket.CMSG_SPACE, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_SPACE, toobig)
self.assertRaises(OverflowError, socket.CMSG_SPACE, sys.maxsize)
class SCMRightsTest(SendrecvmsgServerTimeoutBase):
# Tests for file descriptor passing on Unix-domain sockets.
# Invalid file descriptor value that's unlikely to evaluate to a
# real FD even if one of its bytes is replaced with a different
# value (which shouldn't actually happen).
badfd = -0x5555
def newFDs(self, n):
# Return a list of n file descriptors for newly-created files
# containing their list indices as ASCII numbers.
fds = []
for i in range(n):
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
self.addCleanup(os.close, fd)
os.write(fd, str(i).encode())
fds.append(fd)
return fds
def checkFDs(self, fds):
# Check that the file descriptors in the given list contain
# their correct list indices as ASCII numbers.
for n, fd in enumerate(fds):
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(os.read(fd, 1024), str(n).encode())
def registerRecvmsgResult(self, result):
self.addCleanup(self.closeRecvmsgFDs, result)
def closeRecvmsgFDs(self, recvmsg_result):
# Close all file descriptors specified in the ancillary data
# of the given return value from recvmsg() or recvmsg_into().
for cmsg_level, cmsg_type, cmsg_data in recvmsg_result[1]:
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
for fd in fds:
os.close(fd)
def createAndSendFDs(self, n):
# Send n new file descriptors created by newFDs() to the
# server, with the constant MSG as the non-ancillary data.
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(n)))]),
len(MSG))
def checkRecvmsgFDs(self, numfds, result, maxcmsgs=1, ignoreflags=0):
# Check that constant MSG was received with numfds file
# descriptors in a maximum of maxcmsgs control messages (which
# must contain only complete integers). By default, check
# that MSG_CTRUNC is unset, but ignore any flags in
# ignoreflags.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertIsInstance(ancdata, list)
self.assertLessEqual(len(ancdata), maxcmsgs)
fds = array.array("i")
for item in ancdata:
self.assertIsInstance(item, tuple)
cmsg_level, cmsg_type, cmsg_data = item
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data) % SIZEOF_INT, 0)
fds.frombytes(cmsg_data)
self.assertEqual(len(fds), numfds)
self.checkFDs(fds)
def testFDPassSimple(self):
# Pass a single FD (array read from bytes object).
self.checkRecvmsgFDs(1, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testFDPassSimple(self):
self.assertEqual(
self.sendmsgToServer(
[MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(1)).tobytes())]),
len(MSG))
def testMultipleFDPass(self):
# Pass multiple FDs in a single array.
self.checkRecvmsgFDs(4, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testMultipleFDPass(self):
self.createAndSendFDs(4)
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassCMSG_SPACE(self):
# Test using CMSG_SPACE() to calculate ancillary buffer size.
self.checkRecvmsgFDs(
4, self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(4 * SIZEOF_INT)))
@testFDPassCMSG_SPACE.client_skip
def _testFDPassCMSG_SPACE(self):
self.createAndSendFDs(4)
def testFDPassCMSG_LEN(self):
# Test using CMSG_LEN() to calculate ancillary buffer size.
self.checkRecvmsgFDs(1,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(4 * SIZEOF_INT)),
# RFC 3542 says implementations may set
# MSG_CTRUNC if there isn't enough space
# for trailing padding.
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassCMSG_LEN(self):
self.createAndSendFDs(1)
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparate(self):
# Pass two FDs in two separate arrays. Arrays may be combined
# into a single control message by the OS.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG), 10240),
maxcmsgs=2)
@testFDPassSeparate.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
def _testFDPassSeparate(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparateMinSpace(self):
# Pass two FDs in two separate arrays, receiving them into the
# minimum space for two arrays.
num_fds = 2
self.checkRecvmsgFDs(num_fds,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(SIZEOF_INT * num_fds)),
maxcmsgs=2, ignoreflags=socket.MSG_CTRUNC)
@testFDPassSeparateMinSpace.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
def _testFDPassSeparateMinSpace(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
def sendAncillaryIfPossible(self, msg, ancdata):
# Try to send msg and ancdata to server, but if the system
# call fails, just send msg with no ancillary data.
try:
nbytes = self.sendmsgToServer([msg], ancdata)
except OSError as e:
# Check that it was the system call that failed
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer([msg])
self.assertEqual(nbytes, len(msg))
@unittest.skipIf(sys.platform == "darwin", "see issue #24725")
def testFDPassEmpty(self):
# Try to pass an empty FD array. Can receive either no array
# or an empty array.
self.checkRecvmsgFDs(0, self.doRecvmsg(self.serv_sock,
len(MSG), 10240),
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassEmpty(self):
self.sendAncillaryIfPossible(MSG, [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
b"")])
def testFDPassPartialInt(self):
# Try to pass a truncated FD array.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertLess(len(cmsg_data), SIZEOF_INT)
def _testFDPassPartialInt(self):
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [self.badfd]).tobytes()[:-1])])
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassPartialIntInMiddle(self):
# Try to pass two FD arrays, the first of which is truncated.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 2)
fds = array.array("i")
# Arrays may have been combined in a single control message
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.assertLessEqual(len(fds), 2)
self.checkFDs(fds)
@testFDPassPartialIntInMiddle.client_skip
def _testFDPassPartialIntInMiddle(self):
fd0, fd1 = self.newFDs(2)
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0, self.badfd]).tobytes()[:-1]),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))])
def checkTruncatedHeader(self, result, ignoreflags=0):
# Check that no ancillary data items are returned when data is
# truncated inside the cmsghdr structure.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no buffer size
# is specified.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG)),
# BSD seems to set MSG_CTRUNC only
# if an item has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTruncNoBufSize(self):
self.createAndSendFDs(1)
def testCmsgTrunc0(self):
# Check that no ancillary data is received when buffer size is 0.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 0),
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTrunc0(self):
self.createAndSendFDs(1)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
def testCmsgTrunc1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 1))
def _testCmsgTrunc1(self):
self.createAndSendFDs(1)
def testCmsgTrunc2Int(self):
# The cmsghdr structure has at least three members, two of
# which are ints, so we still shouldn't see any ancillary
# data.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
SIZEOF_INT * 2))
def _testCmsgTrunc2Int(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Minus1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(0) - 1))
def _testCmsgTruncLen0Minus1(self):
self.createAndSendFDs(1)
# The following tests try to truncate the control message in the
# middle of the FD array.
def checkTruncatedArray(self, ancbuf, maxdata, mindata=0):
# Check that file descriptor data is truncated to between
# mindata and maxdata bytes when received with buffer size
# ancbuf, and that any complete file descriptor numbers are
# valid.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbuf)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
if mindata == 0 and ancdata == []:
return
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertGreaterEqual(len(cmsg_data), mindata)
self.assertLessEqual(len(cmsg_data), maxdata)
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.checkFDs(fds)
def testCmsgTruncLen0(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0), maxdata=0)
def _testCmsgTruncLen0(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Plus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0) + 1, maxdata=1)
def _testCmsgTruncLen0Plus1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(SIZEOF_INT),
maxdata=SIZEOF_INT)
def _testCmsgTruncLen1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen2Minus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(2 * SIZEOF_INT) - 1,
maxdata=(2 * SIZEOF_INT) - 1)
def _testCmsgTruncLen2Minus1(self):
self.createAndSendFDs(2)
class RFC3542AncillaryTest(SendrecvmsgServerTimeoutBase):
# Test sendmsg() and recvmsg[_into]() using the ancillary data
# features of the RFC 3542 Advanced Sockets API for IPv6.
# Currently we can only handle certain data items (e.g. traffic
# class, hop limit, MTU discovery and fragmentation settings)
# without resorting to unportable means such as the struct module,
# but the tests here are aimed at testing the ancillary data
# handling in sendmsg() and recvmsg() rather than the IPv6 API
# itself.
# Test value to use when setting hop limit of packet
hop_limit = 2
# Test value to use when setting traffic class of packet.
# -1 means "use kernel default".
traffic_class = -1
def ancillaryMapping(self, ancdata):
# Given ancillary data list ancdata, return a mapping from
# pairs (cmsg_level, cmsg_type) to corresponding cmsg_data.
# Check that no (level, type) pair appears more than once.
d = {}
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertNotIn((cmsg_level, cmsg_type), d)
d[(cmsg_level, cmsg_type)] = cmsg_data
return d
def checkHopLimit(self, ancbufsize, maxhop=255, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space. Check that data is MSG, ancillary data is not
# truncated (but ignore any flags in ignoreflags), and hop
# limit is between 0 and maxhop inclusive.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
self.assertIsInstance(ancdata[0], tuple)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimit(self):
# Test receiving the packet hop limit as ancillary data.
self.checkHopLimit(ancbufsize=10240)
@testRecvHopLimit.client_skip
def _testRecvHopLimit(self):
# Need to wait until server has asked to receive ancillary
# data, as implementations are not required to buffer it
# otherwise.
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimitCMSG_SPACE(self):
# Test receiving hop limit, using CMSG_SPACE to calculate buffer size.
self.checkHopLimit(ancbufsize=socket.CMSG_SPACE(SIZEOF_INT))
@testRecvHopLimitCMSG_SPACE.client_skip
def _testRecvHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Could test receiving into buffer sized using CMSG_LEN, but RFC
# 3542 says portable applications must provide space for trailing
# padding. Implementations may set MSG_CTRUNC if there isn't
# enough space for the padding.
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSetHopLimit(self):
# Test setting hop limit on outgoing packet and receiving it
# at the other end.
self.checkHopLimit(ancbufsize=10240, maxhop=self.hop_limit)
@testSetHopLimit.client_skip
def _testSetHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
def checkTrafficClassAndHopLimit(self, ancbufsize, maxhop=255,
ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space. Check that data is MSG, ancillary
# data is not truncated (but ignore any flags in ignoreflags),
# and traffic class and hop limit are in range (hop limit no
# more than maxhop).
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 2)
ancmap = self.ancillaryMapping(ancdata)
tcdata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS)]
self.assertEqual(len(tcdata), SIZEOF_INT)
a = array.array("i")
a.frombytes(tcdata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
hldata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT)]
self.assertEqual(len(hldata), SIZEOF_INT)
a = array.array("i")
a.frombytes(hldata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimit(self):
# Test receiving traffic class and hop limit as ancillary data.
self.checkTrafficClassAndHopLimit(ancbufsize=10240)
@testRecvTrafficClassAndHopLimit.client_skip
def _testRecvTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
# Test receiving traffic class and hop limit, using
# CMSG_SPACE() to calculate buffer size.
self.checkTrafficClassAndHopLimit(
ancbufsize=socket.CMSG_SPACE(SIZEOF_INT) * 2)
@testRecvTrafficClassAndHopLimitCMSG_SPACE.client_skip
def _testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSetTrafficClassAndHopLimit(self):
# Test setting traffic class and hop limit on outgoing packet,
# and receiving them at the other end.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testSetTrafficClassAndHopLimit.client_skip
def _testSetTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testOddCmsgSize(self):
# Try to send ancillary data with first item one byte too
# long. Fall back to sending with correct size if this fails,
# and check that second item was handled correctly.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testOddCmsgSize.client_skip
def _testOddCmsgSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
try:
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class]).tobytes() + b"\x00"),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
except OSError as e:
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
self.assertEqual(nbytes, len(MSG))
# Tests for proper handling of truncated ancillary data
def checkHopLimitTruncatedHeader(self, ancbufsize, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space, which should be too small to contain the ancillary
# data header (if ancbufsize is None, pass no second argument
# to recvmsg()). Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and no ancillary data is
# returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
args = () if ancbufsize is None else (ancbufsize,)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), *args)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no ancillary
# buffer size is provided.
self.checkHopLimitTruncatedHeader(ancbufsize=None,
# BSD seems to set
# MSG_CTRUNC only if an item
# has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
@testCmsgTruncNoBufSize.client_skip
def _testCmsgTruncNoBufSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc0(self):
# Check that no ancillary data is received when ancillary
# buffer size is zero.
self.checkHopLimitTruncatedHeader(ancbufsize=0,
ignoreflags=socket.MSG_CTRUNC)
@testSingleCmsgTrunc0.client_skip
def _testSingleCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=1)
@testSingleCmsgTrunc1.client_skip
def _testSingleCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc2Int(self):
self.checkHopLimitTruncatedHeader(ancbufsize=2 * SIZEOF_INT)
@testSingleCmsgTrunc2Int.client_skip
def _testSingleCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncLen0Minus1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=socket.CMSG_LEN(0) - 1)
@testSingleCmsgTruncLen0Minus1.client_skip
def _testSingleCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncInData(self):
# Test truncation of a control message inside its associated
# data. The message may be returned with its data truncated,
# or not returned at all.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG), socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertLess(len(cmsg_data), SIZEOF_INT)
@testSingleCmsgTruncInData.client_skip
def _testSingleCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
def checkTruncatedSecondHeader(self, ancbufsize, ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space, which should be large enough to
# contain the first item, but too small to contain the header
# of the second. Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and only one ancillary
# data item is returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertIn(cmsg_type, {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT})
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
# Try the above test with various buffer sizes.
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc0(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT),
ignoreflags=socket.MSG_CTRUNC)
@testSecondCmsgTrunc0.client_skip
def _testSecondCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) + 1)
@testSecondCmsgTrunc1.client_skip
def _testSecondCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc2Int(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
2 * SIZEOF_INT)
@testSecondCmsgTrunc2Int.client_skip
def _testSecondCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTruncLen0Minus1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(0) - 1)
@testSecondCmsgTruncLen0Minus1.client_skip
def _testSecondCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecomdCmsgTruncInData(self):
# Test truncation of the second of two control messages inside
# its associated data.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) + socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
cmsg_types = {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT}
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertLess(len(cmsg_data), SIZEOF_INT)
self.assertEqual(ancdata, [])
@testSecomdCmsgTruncInData.client_skip
def _testSecomdCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Derive concrete test classes for different socket types.
class SendrecvmsgUDPTestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgUDPTest(SendmsgConnectionlessTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgUDPTest(RecvmsgTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoUDPTest(RecvmsgIntoTests, SendrecvmsgUDPTestBase):
pass
class SendrecvmsgUDP6TestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDP6TestBase):
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer, ignoring scope ID
self.assertEqual(addr1[:-1], addr2[:-1])
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgUDP6Test(SendmsgConnectionlessTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgUDP6Test(RecvmsgTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoUDP6Test(RecvmsgIntoTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgRFC3542AncillaryUDP6Test(RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoRFC3542AncillaryUDP6Test(RecvmsgIntoMixin,
RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
class SendrecvmsgTCPTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, TCPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgTCPTest(SendmsgStreamTests, SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgTCPTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoTCPTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
class SendrecvmsgSCTPStreamTestBase(SendrecvmsgSCTPFlagsBase,
SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, SCTPStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgSCTPStreamTest(SendmsgStreamTests, SendrecvmsgSCTPStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgSCTPStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
@requireAttrs(socket.socket, "recvmsg_into")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoSCTPStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgIntoSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
class SendrecvmsgUnixStreamTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, UnixStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "AF_UNIX")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgUnixStreamTest(SendmsgStreamTests, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireAttrs(socket, "AF_UNIX")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgUnixStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@requireAttrs(socket, "AF_UNIX")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoUnixStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgSCMRightsStreamTest(SCMRightsTest, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg_into")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoSCMRightsStreamTest(RecvmsgIntoMixin, SCMRightsTest,
SendrecvmsgUnixStreamTestBase):
pass
# Test interrupting the interruptible send/receive methods with a
# signal when a timeout is set. These tests avoid having multiple
# threads alive during the test so that the OS cannot deliver the
# signal to the wrong one.
class InterruptedTimeoutBase(unittest.TestCase):
# Base class for interrupted send/receive tests. Installs an
# empty handler for SIGALRM and removes it on teardown, along with
# any scheduled alarms.
def setUp(self):
super().setUp()
orig_alrm_handler = signal.signal(signal.SIGALRM,
lambda signum, frame: 1 / 0)
self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler)
# Timeout for socket operations
timeout = 4.0
# Provide setAlarm() method to schedule delivery of SIGALRM after
# given number of seconds, or cancel it if zero, and an
# appropriate time value to use. Use setitimer() if available.
if hasattr(signal, "setitimer"):
alarm_time = 0.05
def setAlarm(self, seconds):
signal.setitimer(signal.ITIMER_REAL, seconds)
else:
# Old systems may deliver the alarm up to one second early
alarm_time = 2
def setAlarm(self, seconds):
signal.alarm(seconds)
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedRecvTimeoutTest(InterruptedTimeoutBase, UDPTestBase):
# Test interrupting the recv*() methods with signals when a
# timeout is set.
def setUp(self):
super().setUp()
self.serv.settimeout(self.timeout)
def checkInterruptedRecv(self, func, *args, **kwargs):
# Check that func(*args, **kwargs) raises
# errno of EINTR when interrupted by a signal.
try:
self.setAlarm(self.alarm_time)
with self.assertRaises(ZeroDivisionError) as cm:
func(*args, **kwargs)
finally:
self.setAlarm(0)
def testInterruptedRecvTimeout(self):
self.checkInterruptedRecv(self.serv.recv, 1024)
def testInterruptedRecvIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recv_into, bytearray(1024))
def testInterruptedRecvfromTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom, 1024)
def testInterruptedRecvfromIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom_into, bytearray(1024))
@requireAttrs(socket.socket, "recvmsg")
def testInterruptedRecvmsgTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg, 1024)
@requireAttrs(socket.socket, "recvmsg_into")
def testInterruptedRecvmsgIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg_into, [bytearray(1024)])
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
@unittest.skipUnless(thread, 'Threading required for this test.')
class InterruptedSendTimeoutTest(InterruptedTimeoutBase,
ThreadSafeCleanupTestCase,
SocketListeningTestMixin, TCPTestBase):
# Test interrupting the interruptible send*() methods with signals
# when a timeout is set.
def setUp(self):
super().setUp()
self.serv_conn = self.newSocket()
self.addCleanup(self.serv_conn.close)
# Use a thread to complete the connection, but wait for it to
# terminate before running the test, so that there is only one
# thread to accept the signal.
cli_thread = threading.Thread(target=self.doConnect)
cli_thread.start()
self.cli_conn, addr = self.serv.accept()
self.addCleanup(self.cli_conn.close)
cli_thread.join()
self.serv_conn.settimeout(self.timeout)
def doConnect(self):
self.serv_conn.connect(self.serv_addr)
def checkInterruptedSend(self, func, *args, **kwargs):
# Check that func(*args, **kwargs), run in a loop, raises
# OSError with an errno of EINTR when interrupted by a
# signal.
try:
with self.assertRaises(ZeroDivisionError) as cm:
while True:
self.setAlarm(self.alarm_time)
func(*args, **kwargs)
finally:
self.setAlarm(0)
# Issue #12958: The following tests have problems on OS X prior to 10.7
@support.requires_mac_ver(10, 7)
def testInterruptedSendTimeout(self):
self.checkInterruptedSend(self.serv_conn.send, b"a"*512)
@support.requires_mac_ver(10, 7)
def testInterruptedSendtoTimeout(self):
# Passing an actual address here as Python's wrapper for
# sendto() doesn't allow passing a zero-length one; POSIX
# requires that the address is ignored since the socket is
# connection-mode, however.
self.checkInterruptedSend(self.serv_conn.sendto, b"a"*512,
self.serv_addr)
@support.requires_mac_ver(10, 7)
@requireAttrs(socket.socket, "sendmsg")
def testInterruptedSendmsgTimeout(self):
self.checkInterruptedSend(self.serv_conn.sendmsg, [b"a"*512])
@unittest.skipUnless(thread, 'Threading required for this test.')
class TCPCloserTest(ThreadedTCPSocketTest):
def testClose(self):
conn, addr = self.serv.accept()
conn.close()
sd = self.cli
read, write, err = select.select([sd], [], [], 1.0)
self.assertEqual(read, [sd])
self.assertEqual(sd.recv(1), b'')
# Calling close() many times should be safe.
conn.close()
conn.close()
def _testClose(self):
self.cli.connect((HOST, self.port))
time.sleep(1.0)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicSocketPairTest(SocketPairTest):
def __init__(self, methodName='runTest'):
SocketPairTest.__init__(self, methodName=methodName)
def _check_defaults(self, sock):
self.assertIsInstance(sock, socket.socket)
if hasattr(socket, 'AF_UNIX'):
self.assertEqual(sock.family, socket.AF_UNIX)
else:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def _testDefaults(self):
self._check_defaults(self.cli)
def testDefaults(self):
self._check_defaults(self.serv)
def testRecv(self):
msg = self.serv.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.send(MSG)
def testSend(self):
self.serv.send(MSG)
def _testSend(self):
msg = self.cli.recv(1024)
self.assertEqual(msg, MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class NonBlockingTCPTests(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
self.event = threading.Event()
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def testSetBlocking(self):
# Testing whether set blocking works
self.serv.setblocking(True)
self.assertIsNone(self.serv.gettimeout())
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
start = time.time()
try:
self.serv.accept()
except OSError:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error setting non-blocking mode.")
def _testSetBlocking(self):
pass
@support.cpython_only
def testSetBlocking_overflow(self):
# Issue 15989
import _testcapi
if _testcapi.UINT_MAX >= _testcapi.ULONG_MAX:
self.skipTest('needs UINT_MAX < ULONG_MAX')
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
self.serv.setblocking(_testcapi.UINT_MAX + 1)
self.assertIsNone(self.serv.gettimeout())
_testSetBlocking_overflow = support.cpython_only(_testSetBlocking)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'test needs socket.SOCK_NONBLOCK')
@support.requires_linux_version(2, 6, 28)
def testInitNonBlocking(self):
# reinit server socket
self.serv.close()
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM |
socket.SOCK_NONBLOCK)
self.port = support.bind_port(self.serv)
self.serv.listen()
# actual testing
start = time.time()
try:
self.serv.accept()
except OSError:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error creating with non-blocking mode.")
def _testInitNonBlocking(self):
pass
def testInheritFlags(self):
# Issue #7995: when calling accept() on a listening socket with a
# timeout, the resulting socket should not be non-blocking.
self.serv.settimeout(10)
try:
conn, addr = self.serv.accept()
message = conn.recv(len(MSG))
finally:
conn.close()
self.serv.settimeout(None)
def _testInheritFlags(self):
time.sleep(0.1)
self.cli.connect((HOST, self.port))
time.sleep(0.5)
self.cli.send(MSG)
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(0)
# connect() didn't start: non-blocking accept() fails
with self.assertRaises(BlockingIOError):
conn, addr = self.serv.accept()
self.event.set()
read, write, err = select.select([self.serv], [], [], MAIN_TIMEOUT)
if self.serv not in read:
self.fail("Error trying to do accept after select.")
# connect() completed: non-blocking accept() doesn't block
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertIsNone(conn.gettimeout())
def _testAccept(self):
# don't connect before event is set to check
# that non-blocking accept() raises BlockingIOError
self.event.wait()
self.cli.connect((HOST, self.port))
def testConnect(self):
# Testing non-blocking connect
conn, addr = self.serv.accept()
conn.close()
def _testConnect(self):
self.cli.settimeout(10)
self.cli.connect((HOST, self.port))
def testRecv(self):
# Testing non-blocking recv
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
conn.setblocking(0)
# the server didn't send data yet: non-blocking recv() fails
with self.assertRaises(BlockingIOError):
msg = conn.recv(len(MSG))
self.event.set()
read, write, err = select.select([conn], [], [], MAIN_TIMEOUT)
if conn not in read:
self.fail("Error during select call to non-blocking socket.")
# the server sent data yet: non-blocking recv() doesn't block
msg = conn.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.connect((HOST, self.port))
# don't send anything before event is set to check
# that non-blocking recv() raises BlockingIOError
self.event.wait()
# send data: recv() will no longer block
self.cli.sendall(MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class FileObjectClassTestCase(SocketConnectedTest):
"""Unit tests for the object returned by socket.makefile()
self.read_file is the io object returned by makefile() on
the client connection. You can read from this file to
get output from the server.
self.write_file is the io object returned by makefile() on the
server connection. You can write to this file to send output
to the client.
"""
bufsize = -1 # Use default buffer size
encoding = 'utf-8'
errors = 'strict'
newline = None
read_mode = 'rb'
read_msg = MSG
write_mode = 'wb'
write_msg = MSG
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def setUp(self):
self.evt1, self.evt2, self.serv_finished, self.cli_finished = [
threading.Event() for i in range(4)]
SocketConnectedTest.setUp(self)
self.read_file = self.cli_conn.makefile(
self.read_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def tearDown(self):
self.serv_finished.set()
self.read_file.close()
self.assertTrue(self.read_file.closed)
self.read_file = None
SocketConnectedTest.tearDown(self)
def clientSetUp(self):
SocketConnectedTest.clientSetUp(self)
self.write_file = self.serv_conn.makefile(
self.write_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def clientTearDown(self):
self.cli_finished.set()
self.write_file.close()
self.assertTrue(self.write_file.closed)
self.write_file = None
SocketConnectedTest.clientTearDown(self)
def testReadAfterTimeout(self):
# Issue #7322: A file object must disallow further reads
# after a timeout has occurred.
self.cli_conn.settimeout(1)
self.read_file.read(3)
# First read raises a timeout
self.assertRaises(socket.timeout, self.read_file.read, 1)
# Second read is disallowed
with self.assertRaises(OSError) as ctx:
self.read_file.read(1)
self.assertIn("cannot read from timed out object", str(ctx.exception))
def _testReadAfterTimeout(self):
self.write_file.write(self.write_msg[0:3])
self.write_file.flush()
self.serv_finished.wait()
def testSmallRead(self):
# Performing small file read test
first_seg = self.read_file.read(len(self.read_msg)-3)
second_seg = self.read_file.read(3)
msg = first_seg + second_seg
self.assertEqual(msg, self.read_msg)
def _testSmallRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testFullRead(self):
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testFullRead(self):
self.write_file.write(self.write_msg)
self.write_file.close()
def testUnbufferedRead(self):
# Performing unbuffered file read test
buf = type(self.read_msg)()
while 1:
char = self.read_file.read(1)
if not char:
break
buf += char
self.assertEqual(buf, self.read_msg)
def _testUnbufferedRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testReadline(self):
# Performing file readline test
line = self.read_file.readline()
self.assertEqual(line, self.read_msg)
def _testReadline(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testCloseAfterMakefile(self):
# The file returned by makefile should keep the socket open.
self.cli_conn.close()
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testCloseAfterMakefile(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileAfterMakefileClose(self):
self.read_file.close()
msg = self.cli_conn.recv(len(MSG))
if isinstance(self.read_msg, str):
msg = msg.decode()
self.assertEqual(msg, self.read_msg)
def _testMakefileAfterMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testClosedAttr(self):
self.assertTrue(not self.read_file.closed)
def _testClosedAttr(self):
self.assertTrue(not self.write_file.closed)
def testAttributes(self):
self.assertEqual(self.read_file.mode, self.read_mode)
self.assertEqual(self.read_file.name, self.cli_conn.fileno())
def _testAttributes(self):
self.assertEqual(self.write_file.mode, self.write_mode)
self.assertEqual(self.write_file.name, self.serv_conn.fileno())
def testRealClose(self):
self.read_file.close()
self.assertRaises(ValueError, self.read_file.fileno)
self.cli_conn.close()
self.assertRaises(OSError, self.cli_conn.getsockname)
def _testRealClose(self):
pass
class UnbufferedFileObjectClassTestCase(FileObjectClassTestCase):
"""Repeat the tests from FileObjectClassTestCase with bufsize==0.
In this case (and in this case only), it should be possible to
create a file object, read a line from it, create another file
object, read another line from it, without loss of data in the
first file object's buffer. Note that http.client relies on this
when reading multiple requests from the same socket."""
bufsize = 0 # Use unbuffered mode
def testUnbufferedReadline(self):
# Read a line, create a new file object, read another line with it
line = self.read_file.readline() # first line
self.assertEqual(line, b"A. " + self.write_msg) # first line
self.read_file = self.cli_conn.makefile('rb', 0)
line = self.read_file.readline() # second line
self.assertEqual(line, b"B. " + self.write_msg) # second line
def _testUnbufferedReadline(self):
self.write_file.write(b"A. " + self.write_msg)
self.write_file.write(b"B. " + self.write_msg)
self.write_file.flush()
def testMakefileClose(self):
# The file returned by makefile should keep the socket open...
self.cli_conn.close()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, self.read_msg)
# ...until the file is itself closed
self.read_file.close()
self.assertRaises(OSError, self.cli_conn.recv, 1024)
def _testMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileCloseSocketDestroy(self):
refcount_before = sys.getrefcount(self.cli_conn)
self.read_file.close()
refcount_after = sys.getrefcount(self.cli_conn)
self.assertEqual(refcount_before - 1, refcount_after)
def _testMakefileCloseSocketDestroy(self):
pass
# Non-blocking ops
# NOTE: to set `read_file` as non-blocking, we must call
# `cli_conn.setblocking` and vice-versa (see setUp / clientSetUp).
def testSmallReadNonBlocking(self):
self.cli_conn.setblocking(False)
self.assertEqual(self.read_file.readinto(bytearray(10)), None)
self.assertEqual(self.read_file.read(len(self.read_msg) - 3), None)
self.evt1.set()
self.evt2.wait(1.0)
first_seg = self.read_file.read(len(self.read_msg) - 3)
if first_seg is None:
# Data not arrived (can happen under Windows), wait a bit
time.sleep(0.5)
first_seg = self.read_file.read(len(self.read_msg) - 3)
buf = bytearray(10)
n = self.read_file.readinto(buf)
self.assertEqual(n, 3)
msg = first_seg + buf[:n]
self.assertEqual(msg, self.read_msg)
self.assertEqual(self.read_file.readinto(bytearray(16)), None)
self.assertEqual(self.read_file.read(1), None)
def _testSmallReadNonBlocking(self):
self.evt1.wait(1.0)
self.write_file.write(self.write_msg)
self.write_file.flush()
self.evt2.set()
# Avoid closing the socket before the server test has finished,
# otherwise system recv() will return 0 instead of EWOULDBLOCK.
self.serv_finished.wait(5.0)
def testWriteNonBlocking(self):
self.cli_finished.wait(5.0)
# The client thread can't skip directly - the SkipTest exception
# would appear as a failure.
if self.serv_skipped:
self.skipTest(self.serv_skipped)
def _testWriteNonBlocking(self):
self.serv_skipped = None
self.serv_conn.setblocking(False)
# Try to saturate the socket buffer pipe with repeated large writes.
BIG = b"x" * support.SOCK_MAX_SIZE
LIMIT = 10
# The first write() succeeds since a chunk of data can be buffered
n = self.write_file.write(BIG)
self.assertGreater(n, 0)
for i in range(LIMIT):
n = self.write_file.write(BIG)
if n is None:
# Succeeded
break
self.assertGreater(n, 0)
else:
# Let us know that this test didn't manage to establish
# the expected conditions. This is not a failure in itself but,
# if it happens repeatedly, the test should be fixed.
self.serv_skipped = "failed to saturate the socket buffer"
class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 1 # Default-buffered for reading; line-buffered for writing
class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 2 # Exercise the buffering code
class UnicodeReadFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'wb'
write_msg = MSG
newline = ''
class UnicodeWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'rb'
read_msg = MSG
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class UnicodeReadWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class NetworkConnectionTest(object):
"""Prove network connection."""
def clientSetUp(self):
# We're inherited below by BasicTCPTest2, which also inherits
# BasicTCPTest, which defines self.port referenced below.
self.cli = socket.create_connection((HOST, self.port))
self.serv_conn = self.cli
class BasicTCPTest2(NetworkConnectionTest, BasicTCPTest):
"""Tests that NetworkConnection does not break existing TCP functionality.
"""
class NetworkConnectionNoServer(unittest.TestCase):
class MockSocket(socket.socket):
def connect(self, *args):
raise socket.timeout('timed out')
@contextlib.contextmanager
def mocked_socket_module(self):
"""Return a socket which times out on connect"""
old_socket = socket.socket
socket.socket = self.MockSocket
try:
yield
finally:
socket.socket = old_socket
def test_connect(self):
port = support.find_unused_port()
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(cli.close)
with self.assertRaises(OSError) as cm:
cli.connect((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection(self):
# Issue #9792: errors raised by create_connection() should have
# a proper errno attribute.
port = support.find_unused_port()
with self.assertRaises(OSError) as cm:
socket.create_connection((HOST, port))
# Issue #16257: create_connection() calls getaddrinfo() against
# 'localhost'. This may result in an IPV6 addr being returned
# as well as an IPV4 one:
# >>> socket.getaddrinfo('localhost', port, 0, SOCK_STREAM)
# >>> [(2, 2, 0, '', ('127.0.0.1', 41230)),
# (26, 2, 0, '', ('::1', 41230, 0, 0))]
#
# create_connection() enumerates through all the addresses returned
# and if it doesn't successfully bind to any of them, it propagates
# the last exception it encountered.
#
# On Solaris, ENETUNREACH is returned in this circumstance instead
# of ECONNREFUSED. So, if that errno exists, add it to our list of
# expected errnos.
expected_errnos = [ errno.ECONNREFUSED, ]
if hasattr(errno, 'ENETUNREACH'):
expected_errnos.append(errno.ENETUNREACH)
if hasattr(errno, 'EADDRNOTAVAIL'):
# bpo-31910: socket.create_connection() fails randomly
# with EADDRNOTAVAIL on Travis CI
expected_errnos.append(errno.EADDRNOTAVAIL)
self.assertIn(cm.exception.errno, expected_errnos)
def test_create_connection_timeout(self):
# Issue #9792: create_connection() should not recast timeout errors
# as generic socket errors.
with self.mocked_socket_module():
with self.assertRaises(socket.timeout):
socket.create_connection((HOST, 1234))
@unittest.skipUnless(thread, 'Threading required for this test.')
class NetworkConnectionAttributesTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.source_port = support.find_unused_port()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def _justAccept(self):
conn, addr = self.serv.accept()
conn.close()
testFamily = _justAccept
def _testFamily(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.family, 2)
testSourceAddress = _justAccept
def _testSourceAddress(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30,
source_address=('', self.source_port))
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.getsockname()[1], self.source_port)
# The port number being used is sufficient to show that the bind()
# call happened.
testTimeoutDefault = _justAccept
def _testTimeoutDefault(self):
# passing no explicit timeout uses socket's global default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(42)
try:
self.cli = socket.create_connection((HOST, self.port))
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), 42)
testTimeoutNone = _justAccept
def _testTimeoutNone(self):
# None timeout means the same as sock.settimeout(None)
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
self.cli = socket.create_connection((HOST, self.port), timeout=None)
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), None)
testTimeoutValueNamed = _justAccept
def _testTimeoutValueNamed(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.assertEqual(self.cli.gettimeout(), 30)
testTimeoutValueNonamed = _justAccept
def _testTimeoutValueNonamed(self):
self.cli = socket.create_connection((HOST, self.port), 30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.gettimeout(), 30)
@unittest.skipUnless(thread, 'Threading required for this test.')
class NetworkConnectionBehaviourTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def testInsideTimeout(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
time.sleep(3)
conn.send(b"done!")
testOutsideTimeout = testInsideTimeout
def _testInsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port))
data = sock.recv(5)
self.assertEqual(data, b"done!")
def _testOutsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port), timeout=1)
self.assertRaises(socket.timeout, lambda: sock.recv(5))
class TCPTimeoutTest(SocketTCPTest):
def testTCPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.accept()
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (TCP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of error (TCP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (TCP)")
if not ok:
self.fail("accept() returned success when we did not expect it")
@unittest.skipUnless(hasattr(signal, 'alarm'),
'test needs signal.alarm()')
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# plaform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
def alarm_handler(signal, frame):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
try:
signal.alarm(2) # POSIX allows alarm to be up to 1 second early
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(sys.exc_info()[:2] + (traceback.format_exc(),)))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
class UDPTimeoutTest(SocketUDPTest):
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except socket.timeout:
self.fail("caught timeout instead of error (UDP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (UDP)")
if not ok:
self.fail("recv() returned success when we did not expect it")
class TestExceptions(unittest.TestCase):
def testExceptionTree(self):
self.assertTrue(issubclass(OSError, Exception))
self.assertTrue(issubclass(socket.herror, OSError))
self.assertTrue(issubclass(socket.gaierror, OSError))
self.assertTrue(issubclass(socket.timeout, OSError))
def test_setblocking_invalidfd(self):
# Regression test for issue #28471
sock0 = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM, 0, sock0.fileno())
sock0.close()
self.addCleanup(sock.detach)
with self.assertRaises(OSError):
sock.setblocking(False)
@unittest.skipUnless(sys.platform == 'linux', 'Linux specific test')
class TestLinuxAbstractNamespace(unittest.TestCase):
UNIX_PATH_MAX = 108
def testLinuxAbstractNamespace(self):
address = b"\x00python-test-hello\x00\xff"
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s1:
s1.bind(address)
s1.listen()
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s2:
s2.connect(s1.getsockname())
with s1.accept()[0] as s3:
self.assertEqual(s1.getsockname(), address)
self.assertEqual(s2.getpeername(), address)
def testMaxName(self):
address = b"\x00" + b"h" * (self.UNIX_PATH_MAX - 1)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testNameOverflow(self):
address = "\x00" + "h" * self.UNIX_PATH_MAX
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
self.assertRaises(OSError, s.bind, address)
def testStrName(self):
# Check that an abstract name can be passed as a string.
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
s.bind("\x00python\x00test\x00")
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
finally:
s.close()
def testBytearrayName(self):
# Check that an abstract name can be passed as a bytearray.
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(bytearray(b"\x00python\x00test\x00"))
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'test needs socket.AF_UNIX')
class TestUnixDomain(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
def tearDown(self):
self.sock.close()
def encoded(self, path):
# Return the given path encoded in the file system encoding,
# or skip the test if this is not possible.
try:
return os.fsencode(path)
except UnicodeEncodeError:
self.skipTest(
"Pathname {0!a} cannot be represented in file "
"system encoding {1!r}".format(
path, sys.getfilesystemencoding()))
def bind(self, sock, path):
# Bind the socket
try:
support.bind_unix_socket(sock, path)
except OSError as e:
if str(e) == "AF_UNIX path too long":
self.skipTest(
"Pathname {0!a} is too long to serve as an AF_UNIX path"
.format(path))
else:
raise
def testUnbound(self):
# Issue #30205
self.assertIn(self.sock.getsockname(), ('', None))
def testStrAddr(self):
# Test binding to and retrieving a normal string pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testBytesAddr(self):
# Test binding to a bytes pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, self.encoded(path))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testSurrogateescapeBind(self):
# Test binding to a valid non-ASCII pathname, with the
# non-ASCII bytes supplied using surrogateescape encoding.
path = os.path.abspath(support.TESTFN_UNICODE)
b = self.encoded(path)
self.bind(self.sock, b.decode("ascii", "surrogateescape"))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testUnencodableAddr(self):
# Test binding to a pathname that cannot be encoded in the
# file system encoding.
if support.TESTFN_UNENCODABLE is None:
self.skipTest("No unencodable filename available")
path = os.path.abspath(support.TESTFN_UNENCODABLE)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BufferIOTest(SocketConnectedTest):
"""
Test the buffer versions of socket.recv() and socket.send().
"""
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecvIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvIntoBytearray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoBytearray = _testRecvIntoArray
def testRecvIntoMemoryview(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoMemoryview = _testRecvIntoArray
def testRecvFromIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvFromIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvFromIntoBytearray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoBytearray = _testRecvFromIntoArray
def testRecvFromIntoMemoryview(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoMemoryview = _testRecvFromIntoArray
def testRecvFromIntoSmallBuffer(self):
# See issue #20246.
buf = bytearray(8)
self.assertRaises(ValueError, self.cli_conn.recvfrom_into, buf, 1024)
def _testRecvFromIntoSmallBuffer(self):
self.serv_conn.send(MSG)
def testRecvFromIntoEmptyBuffer(self):
buf = bytearray()
self.cli_conn.recvfrom_into(buf)
self.cli_conn.recvfrom_into(buf, 0)
_testRecvFromIntoEmptyBuffer = _testRecvFromIntoArray
TIPC_STYPE = 2000
TIPC_LOWER = 200
TIPC_UPPER = 210
def isTipcAvailable():
"""Check if the TIPC module is loaded
The TIPC module is not loaded automatically on Ubuntu and probably
other Linux distros.
"""
if not hasattr(socket, "AF_TIPC"):
return False
try:
f = open("/proc/modules")
except (FileNotFoundError, IsADirectoryError, PermissionError):
# It's ok if the file does not exist, is a directory or if we
# have not the permission to read it.
return False
with f:
for line in f:
if line.startswith("tipc "):
return True
return False
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCTest(unittest.TestCase):
def testRDM(self):
srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
self.addCleanup(srv.close)
self.addCleanup(cli.close)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
srv.bind(srvaddr)
sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
cli.sendto(MSG, sendaddr)
msg, recvaddr = srv.recvfrom(1024)
self.assertEqual(cli.getsockname(), recvaddr)
self.assertEqual(msg, MSG)
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCThreadableTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName = 'runTest'):
unittest.TestCase.__init__(self, methodName = methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.srv.close)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
self.srv.bind(srvaddr)
self.srv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.srv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
# There is a hittable race between serverExplicitReady() and the
# accept() call; sleep a little while to avoid it, otherwise
# we could get an exception
time.sleep(0.1)
self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
self.cli.connect(addr)
self.cliaddr = self.cli.getsockname()
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
self.assertEqual(self.cliaddr, self.connaddr)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
@unittest.skipUnless(thread, 'Threading required for this test.')
class ContextManagersTest(ThreadedTCPSocketTest):
def _testSocketClass(self):
# base test
with socket.socket() as sock:
self.assertFalse(sock._closed)
self.assertTrue(sock._closed)
# close inside with block
with socket.socket() as sock:
sock.close()
self.assertTrue(sock._closed)
# exception inside with block
with socket.socket() as sock:
self.assertRaises(OSError, sock.sendall, b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionBase(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionBase(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
self.assertFalse(sock._closed)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionClose(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionClose(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
sock.close()
self.assertTrue(sock._closed)
self.assertRaises(OSError, sock.sendall, b'foo')
class InheritanceTest(unittest.TestCase):
@unittest.skipUnless(hasattr(socket, "SOCK_CLOEXEC"),
"SOCK_CLOEXEC not defined")
@support.requires_linux_version(2, 6, 28)
def test_SOCK_CLOEXEC(self):
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_CLOEXEC) as s:
self.assertTrue(s.type & socket.SOCK_CLOEXEC)
self.assertFalse(s.get_inheritable())
def test_default_inheritable(self):
sock = socket.socket()
with sock:
self.assertEqual(sock.get_inheritable(), False)
def test_dup(self):
sock = socket.socket()
with sock:
newsock = sock.dup()
sock.close()
with newsock:
self.assertEqual(newsock.get_inheritable(), False)
def test_set_inheritable(self):
sock = socket.socket()
with sock:
sock.set_inheritable(True)
self.assertEqual(sock.get_inheritable(), True)
sock.set_inheritable(False)
self.assertEqual(sock.get_inheritable(), False)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_get_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(sock.get_inheritable(), False)
# clear FD_CLOEXEC flag
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
self.assertEqual(sock.get_inheritable(), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_set_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
fcntl.FD_CLOEXEC)
sock.set_inheritable(True)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
0)
@unittest.skipUnless(hasattr(socket, "socketpair"),
"need socket.socketpair()")
def test_socketpair(self):
s1, s2 = socket.socketpair()
self.addCleanup(s1.close)
self.addCleanup(s2.close)
self.assertEqual(s1.get_inheritable(), False)
self.assertEqual(s2.get_inheritable(), False)
@unittest.skipUnless(hasattr(socket, "SOCK_NONBLOCK"),
"SOCK_NONBLOCK not defined")
class NonblockConstantTest(unittest.TestCase):
def checkNonblock(self, s, nonblock=True, timeout=0.0):
if nonblock:
self.assertTrue(s.type & socket.SOCK_NONBLOCK)
self.assertEqual(s.gettimeout(), timeout)
else:
self.assertFalse(s.type & socket.SOCK_NONBLOCK)
self.assertEqual(s.gettimeout(), None)
@support.requires_linux_version(2, 6, 28)
def test_SOCK_NONBLOCK(self):
# a lot of it seems silly and redundant, but I wanted to test that
# changing back and forth worked ok
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK) as s:
self.checkNonblock(s)
s.setblocking(1)
self.checkNonblock(s, False)
s.setblocking(0)
self.checkNonblock(s)
s.settimeout(None)
self.checkNonblock(s, False)
s.settimeout(2.0)
self.checkNonblock(s, timeout=2.0)
s.setblocking(1)
self.checkNonblock(s, False)
# defaulttimeout
t = socket.getdefaulttimeout()
socket.setdefaulttimeout(0.0)
with socket.socket() as s:
self.checkNonblock(s)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(2.0)
with socket.socket() as s:
self.checkNonblock(s, timeout=2.0)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(t)
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(multiprocessing, "need multiprocessing")
class TestSocketSharing(SocketTCPTest):
# This must be classmethod and not staticmethod or multiprocessing
# won't be able to bootstrap it.
@classmethod
def remoteProcessServer(cls, q):
# Recreate socket from shared data
sdata = q.get()
message = q.get()
s = socket.fromshare(sdata)
s2, c = s.accept()
# Send the message
s2.sendall(message)
s2.close()
s.close()
def testShare(self):
# Transfer the listening server socket to another process
# and service it from there.
# Create process:
q = multiprocessing.Queue()
p = multiprocessing.Process(target=self.remoteProcessServer, args=(q,))
p.start()
# Get the shared socket data
data = self.serv.share(p.pid)
# Pass the shared socket to the other process
addr = self.serv.getsockname()
self.serv.close()
q.put(data)
# The data that the server will send us
message = b"slapmahfro"
q.put(message)
# Connect
s = socket.create_connection(addr)
# listen for the data
m = []
while True:
data = s.recv(100)
if not data:
break
m.append(data)
s.close()
received = b"".join(m)
self.assertEqual(received, message)
p.join()
def testShareLength(self):
data = self.serv.share(os.getpid())
self.assertRaises(ValueError, socket.fromshare, data[:-1])
self.assertRaises(ValueError, socket.fromshare, data+b"foo")
def compareSockets(self, org, other):
# socket sharing is expected to work only for blocking socket
# since the internal python timeout value isn't transferred.
self.assertEqual(org.gettimeout(), None)
self.assertEqual(org.gettimeout(), other.gettimeout())
self.assertEqual(org.family, other.family)
self.assertEqual(org.type, other.type)
# If the user specified "0" for proto, then
# internally windows will have picked the correct value.
# Python introspection on the socket however will still return
# 0. For the shared socket, the python value is recreated
# from the actual value, so it may not compare correctly.
if org.proto != 0:
self.assertEqual(org.proto, other.proto)
def testShareLocal(self):
data = self.serv.share(os.getpid())
s = socket.fromshare(data)
try:
self.compareSockets(self.serv, s)
finally:
s.close()
def testTypes(self):
families = [socket.AF_INET, socket.AF_INET6]
types = [socket.SOCK_STREAM, socket.SOCK_DGRAM]
for f in families:
for t in types:
try:
source = socket.socket(f, t)
except OSError:
continue # This combination is not supported
try:
data = source.share(os.getpid())
shared = socket.fromshare(data)
try:
self.compareSockets(source, shared)
finally:
shared.close()
finally:
source.close()
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendfileUsingSendTest(ThreadedTCPSocketTest):
"""
Test the send() implementation of socket.sendfile().
"""
FILESIZE = (10 * 1024 * 1024) # 10MB
BUFSIZE = 8192
FILEDATA = b""
TIMEOUT = 2
@classmethod
def setUpClass(cls):
def chunks(total, step):
assert total >= step
while total > step:
yield step
total -= step
if total:
yield total
chunk = b"".join([random.choice(string.ascii_letters).encode()
for i in range(cls.BUFSIZE)])
with open(support.TESTFN, 'wb') as f:
for csize in chunks(cls.FILESIZE, cls.BUFSIZE):
f.write(chunk)
with open(support.TESTFN, 'rb') as f:
cls.FILEDATA = f.read()
assert len(cls.FILEDATA) == cls.FILESIZE
@classmethod
def tearDownClass(cls):
support.unlink(support.TESTFN)
def accept_conn(self):
self.serv.settimeout(self.TIMEOUT)
conn, addr = self.serv.accept()
conn.settimeout(self.TIMEOUT)
self.addCleanup(conn.close)
return conn
def recv_data(self, conn):
received = []
while True:
chunk = conn.recv(self.BUFSIZE)
if not chunk:
break
received.append(chunk)
return b''.join(received)
def meth_from_sock(self, sock):
# Depending on the mixin class being run return either send()
# or sendfile() method implementation.
return getattr(sock, "_sendfile_use_send")
# regular file
def _testRegularFile(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
def testRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# non regular file
def _testNonRegularFile(self):
address = self.serv.getsockname()
file = io.BytesIO(self.FILEDATA)
with socket.create_connection(address) as sock, file as file:
sent = sock.sendfile(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
self.assertRaises(socket._GiveupOnSendfile,
sock._sendfile_use_sendfile, file)
def testNonRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# empty file
def _testEmptyFileSend(self):
address = self.serv.getsockname()
filename = support.TESTFN + "2"
with open(filename, 'wb'):
self.addCleanup(support.unlink, filename)
file = open(filename, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, 0)
self.assertEqual(file.tell(), 0)
def testEmptyFileSend(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(data, b"")
# offset
def _testOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file, offset=5000)
self.assertEqual(sent, self.FILESIZE - 5000)
self.assertEqual(file.tell(), self.FILESIZE)
def testOffset(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE - 5000)
self.assertEqual(data, self.FILEDATA[5000:])
# count
def _testCount(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 5000007
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCount(self):
count = 5000007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count small
def _testCountSmall(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 1
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCountSmall(self):
count = 1
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count + offset
def _testCountWithOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 100007
meth = self.meth_from_sock(sock)
sent = meth(file, offset=2007, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count + 2007)
def testCountWithOffset(self):
count = 100007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[2007:count+2007])
# non blocking sockets are not supposed to work
def _testNonBlocking(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
sock.setblocking(False)
meth = self.meth_from_sock(sock)
self.assertRaises(ValueError, meth, file)
self.assertRaises(ValueError, sock.sendfile, file)
def testNonBlocking(self):
conn = self.accept_conn()
if conn.recv(8192):
self.fail('was not supposed to receive any data')
# timeout (non-triggered)
def _testWithTimeout(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
def testWithTimeout(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# timeout (triggered)
def _testWithTimeoutTriggeredSend(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=0.01) as sock, \
file as file:
meth = self.meth_from_sock(sock)
self.assertRaises(socket.timeout, meth, file)
def testWithTimeoutTriggeredSend(self):
conn = self.accept_conn()
conn.recv(88192)
# errors
def _test_errors(self):
pass
def test_errors(self):
with open(support.TESTFN, 'rb') as file:
with socket.socket(type=socket.SOCK_DGRAM) as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "SOCK_STREAM", meth, file)
with open(support.TESTFN, 'rt') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "binary mode", meth, file)
with open(support.TESTFN, 'rb') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count='2')
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count=0.1)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=0)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=-1)
@unittest.skipUnless(thread, 'Threading required for this test.')
@unittest.skipUnless(hasattr(os, "sendfile"),
'os.sendfile() required for this test.')
class SendfileUsingSendfileTest(SendfileUsingSendTest):
"""
Test the sendfile() implementation of socket.sendfile().
"""
def meth_from_sock(self, sock):
return getattr(sock, "_sendfile_use_sendfile")
@unittest.skipUnless(HAVE_SOCKET_ALG, 'AF_ALG required')
class LinuxKernelCryptoAPI(unittest.TestCase):
# tests for AF_ALG
def create_alg(self, typ, name):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
try:
sock.bind((typ, name))
except FileNotFoundError as e:
# type / algorithm is not available
sock.close()
raise unittest.SkipTest(str(e), typ, name)
else:
return sock
# bpo-31705: On kernel older than 4.5, sendto() failed with ENOKEY,
# at least on ppc64le architecture
@support.requires_linux_version(4, 5)
def test_sha256(self):
expected = bytes.fromhex("ba7816bf8f01cfea414140de5dae2223b00361a396"
"177a9cb410ff61f20015ad")
with self.create_alg('hash', 'sha256') as algo:
op, _ = algo.accept()
with op:
op.sendall(b"abc")
self.assertEqual(op.recv(512), expected)
op, _ = algo.accept()
with op:
op.send(b'a', socket.MSG_MORE)
op.send(b'b', socket.MSG_MORE)
op.send(b'c', socket.MSG_MORE)
op.send(b'')
self.assertEqual(op.recv(512), expected)
def test_hmac_sha1(self):
expected = bytes.fromhex("effcdf6ae5eb2fa2d27416d5f184df9c259a7c79")
with self.create_alg('hash', 'hmac(sha1)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, b"Jefe")
op, _ = algo.accept()
with op:
op.sendall(b"what do ya want for nothing?")
self.assertEqual(op.recv(512), expected)
# Although it should work with 3.19 and newer the test blocks on
# Ubuntu 15.10 with Kernel 4.2.0-19.
@support.requires_linux_version(4, 3)
def test_aes_cbc(self):
key = bytes.fromhex('06a9214036b8a15b512e03d534120006')
iv = bytes.fromhex('3dafba429d9eb430b422da802c9fac41')
msg = b"Single block msg"
ciphertext = bytes.fromhex('e353779c1079aeb82708942dbe77181a')
msglen = len(msg)
with self.create_alg('skcipher', 'cbc(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
flags=socket.MSG_MORE)
op.sendall(msg)
self.assertEqual(op.recv(msglen), ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([ciphertext],
op=socket.ALG_OP_DECRYPT, iv=iv)
self.assertEqual(op.recv(msglen), msg)
# long message
multiplier = 1024
longmsg = [msg] * multiplier
op, _ = algo.accept()
with op:
op.sendmsg_afalg(longmsg,
op=socket.ALG_OP_ENCRYPT, iv=iv)
enc = op.recv(msglen * multiplier)
self.assertEqual(len(enc), msglen * multiplier)
self.assertEqual(enc[:msglen], ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([enc],
op=socket.ALG_OP_DECRYPT, iv=iv)
dec = op.recv(msglen * multiplier)
self.assertEqual(len(dec), msglen * multiplier)
self.assertEqual(dec, msg * multiplier)
@support.requires_linux_version(4, 9) # see issue29324
def test_aead_aes_gcm(self):
key = bytes.fromhex('c939cc13397c1d37de6ae0e1cb7c423c')
iv = bytes.fromhex('b3d8cc017cbb89b39e0f67e2')
plain = bytes.fromhex('c3b3c41f113a31b73d9a5cd432103069')
assoc = bytes.fromhex('24825602bd12a984e0092d3e448eda5f')
expected_ct = bytes.fromhex('93fe7d9e9bfd10348a5606e5cafa7354')
expected_tag = bytes.fromhex('0032a1dc85f1c9786925a2e71d8272dd')
taglen = len(expected_tag)
assoclen = len(assoc)
with self.create_alg('aead', 'gcm(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_AEAD_AUTHSIZE,
None, taglen)
# send assoc, plain and tag buffer in separate steps
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen, flags=socket.MSG_MORE)
op.sendall(assoc, socket.MSG_MORE)
op.sendall(plain)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# now with msg
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg_afalg([msg], op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# create anc data manually
pack_uint32 = struct.Struct('I').pack
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg(
[msg],
([socket.SOL_ALG, socket.ALG_SET_OP, pack_uint32(socket.ALG_OP_ENCRYPT)],
[socket.SOL_ALG, socket.ALG_SET_IV, pack_uint32(len(iv)) + iv],
[socket.SOL_ALG, socket.ALG_SET_AEAD_ASSOCLEN, pack_uint32(assoclen)],
)
)
res = op.recv(len(msg) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# decrypt and verify
op, _ = algo.accept()
with op:
msg = assoc + expected_ct + expected_tag
op.sendmsg_afalg([msg], op=socket.ALG_OP_DECRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(len(msg) - taglen)
self.assertEqual(plain, res[assoclen:])
@support.requires_linux_version(4, 3) # see test_aes_cbc
def test_drbg_pr_sha256(self):
# deterministic random bit generator, prediction resistance, sha256
with self.create_alg('rng', 'drbg_pr_sha256') as algo:
extra_seed = os.urandom(32)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, extra_seed)
op, _ = algo.accept()
with op:
rn = op.recv(32)
self.assertEqual(len(rn), 32)
def test_sendmsg_afalg_args(self):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
with sock:
with self.assertRaises(TypeError):
sock.sendmsg_afalg()
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(1)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=-1)
def test_length_restriction(self):
# bpo-35050, off-by-one error in length check
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
self.addCleanup(sock.close)
# salg_type[14]
with self.assertRaises(FileNotFoundError):
sock.bind(("t" * 13, "name"))
with self.assertRaisesRegex(ValueError, "type too long"):
sock.bind(("t" * 14, "name"))
# salg_name[64]
with self.assertRaises(FileNotFoundError):
sock.bind(("type", "n" * 63))
with self.assertRaisesRegex(ValueError, "name too long"):
sock.bind(("type", "n" * 64))
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
class TestMSWindowsTCPFlags(unittest.TestCase):
knownTCPFlags = {
# available since long time ago
'TCP_MAXSEG',
'TCP_NODELAY',
# available starting with Windows 10 1607
'TCP_FASTOPEN',
# available starting with Windows 10 1703
'TCP_KEEPCNT',
}
def test_new_tcp_flags(self):
provided = [s for s in dir(socket) if s.startswith('TCP')]
unknown = [s for s in provided if s not in self.knownTCPFlags]
self.assertEqual([], unknown,
"New TCP flags were discovered. See bpo-32394 for more information")
def test_main():
tests = [GeneralModuleTests, BasicTCPTest, TCPCloserTest, TCPTimeoutTest,
TestExceptions, BufferIOTest, BasicTCPTest2, BasicUDPTest, UDPTimeoutTest ]
tests.extend([
NonBlockingTCPTests,
FileObjectClassTestCase,
UnbufferedFileObjectClassTestCase,
LineBufferedFileObjectClassTestCase,
SmallBufferedFileObjectClassTestCase,
UnicodeReadFileObjectClassTestCase,
UnicodeWriteFileObjectClassTestCase,
UnicodeReadWriteFileObjectClassTestCase,
NetworkConnectionNoServer,
NetworkConnectionAttributesTest,
NetworkConnectionBehaviourTest,
ContextManagersTest,
InheritanceTest,
NonblockConstantTest
])
tests.append(BasicSocketPairTest)
tests.append(TestUnixDomain)
tests.append(TestLinuxAbstractNamespace)
tests.extend([TIPCTest, TIPCThreadableTest])
tests.extend([BasicCANTest, CANTest])
tests.extend([BasicRDSTest, RDSTest])
tests.append(LinuxKernelCryptoAPI)
tests.extend([
CmsgMacroTests,
SendmsgUDPTest,
RecvmsgUDPTest,
RecvmsgIntoUDPTest,
SendmsgUDP6Test,
RecvmsgUDP6Test,
RecvmsgRFC3542AncillaryUDP6Test,
RecvmsgIntoRFC3542AncillaryUDP6Test,
RecvmsgIntoUDP6Test,
SendmsgTCPTest,
RecvmsgTCPTest,
RecvmsgIntoTCPTest,
SendmsgSCTPStreamTest,
RecvmsgSCTPStreamTest,
RecvmsgIntoSCTPStreamTest,
SendmsgUnixStreamTest,
RecvmsgUnixStreamTest,
RecvmsgIntoUnixStreamTest,
RecvmsgSCMRightsStreamTest,
RecvmsgIntoSCMRightsStreamTest,
# These are slow when setitimer() is not available
InterruptedRecvTimeoutTest,
InterruptedSendTimeoutTest,
TestSocketSharing,
SendfileUsingSendTest,
SendfileUsingSendfileTest,
])
tests.append(TestMSWindowsTCPFlags)
thread_info = support.threading_setup()
support.run_unittest(*tests)
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
generate_breakpad_symbols.py
|
#!/usr/bin/env python
# Copyright (c) 2013 GitHub, Inc. All rights reserved.
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A tool to generate symbols for a binary suitable for breakpad.
Currently, the tool only supports Linux, Android, and Mac. Support for other
platforms is planned.
"""
import errno
import optparse
import os
import Queue
import re
import shutil
import subprocess
import sys
import threading
CONCURRENT_TASKS=4
def GetCommandOutput(command):
"""Runs the command list, returning its output.
Prints the given command (which should be a list of one or more strings),
then runs it and returns its output (stdout) as a string.
From chromium_utils.
"""
devnull = open(os.devnull, 'w')
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=devnull,
bufsize=1)
output = proc.communicate()[0]
return output
def GetDumpSymsBinary(build_dir=None):
"""Returns the path to the dump_syms binary."""
DUMP_SYMS = 'dump_syms'
dump_syms_bin = os.path.join(os.path.expanduser(build_dir), DUMP_SYMS)
if not os.access(dump_syms_bin, os.X_OK):
print 'Cannot find %s.' % DUMP_SYMS
sys.exit(1)
return dump_syms_bin
def FindBundlePart(full_path):
if full_path.endswith(('.dylib', '.framework', '.app')):
return os.path.basename(full_path)
elif full_path != '' and full_path != '/':
return FindBundlePart(os.path.dirname(full_path))
else:
return ''
def GetDSYMBundle(options, binary_path):
"""Finds the .dSYM bundle to the binary."""
if binary_path[0] == '/' or binary_path == '':
return binary_path
filename = FindBundlePart(binary_path)
search_dirs = [options.build_dir, options.libchromiumcontent_dir]
if filename.endswith(('.dylib', '.framework', '.app')):
for directory in search_dirs:
dsym_path = os.path.join(directory, filename) + '.dSYM'
if os.path.exists(dsym_path):
return dsym_path
return binary_path
def GetSymbolPath(options, binary_path):
"""Finds the .dbg to the binary."""
filename = os.path.basename(binary_path)
dbg_path = os.path.join(options.libchromiumcontent_dir, filename) + '.dbg'
if os.path.exists(dbg_path):
return dbg_path
return binary_path
def Resolve(path, exe_path, loader_path, rpaths):
"""Resolve a dyld path.
@executable_path is replaced with |exe_path|
@loader_path is replaced with |loader_path|
@rpath is replaced with the first path in |rpaths| where the referenced file
is found
"""
path = path.replace('@loader_path', loader_path)
path = path.replace('@executable_path', exe_path)
if path.find('@rpath') != -1:
for rpath in rpaths:
new_path = Resolve(path.replace('@rpath', rpath), exe_path, loader_path,
[])
if os.access(new_path, os.F_OK):
return new_path
return ''
return path
def GetSharedLibraryDependenciesLinux(binary):
"""Return absolute paths to all shared library dependecies of the binary.
This implementation assumes that we're running on a Linux system."""
ldd = GetCommandOutput(['ldd', binary])
lib_re = re.compile('\t.* => (.+) \(.*\)$')
result = []
for line in ldd.splitlines():
m = lib_re.match(line)
if m:
result.append(m.group(1))
return result
def GetSharedLibraryDependenciesMac(binary, exe_path):
"""Return absolute paths to all shared library dependecies of the binary.
This implementation assumes that we're running on a Mac system."""
loader_path = os.path.dirname(binary)
otool = GetCommandOutput(['otool', '-l', binary]).splitlines()
rpaths = []
for idx, line in enumerate(otool):
if line.find('cmd LC_RPATH') != -1:
m = re.match(' *path (.*) \(offset .*\)$', otool[idx+2])
rpaths.append(m.group(1))
otool = GetCommandOutput(['otool', '-L', binary]).splitlines()
lib_re = re.compile('\t(.*) \(compatibility .*\)$')
deps = []
for line in otool:
m = lib_re.match(line)
if m:
dep = Resolve(m.group(1), exe_path, loader_path, rpaths)
if dep:
deps.append(os.path.normpath(dep))
return deps
def GetSharedLibraryDependencies(options, binary, exe_path):
"""Return absolute paths to all shared library dependecies of the binary."""
deps = []
if sys.platform.startswith('linux'):
deps = GetSharedLibraryDependenciesLinux(binary)
elif sys.platform == 'darwin':
deps = GetSharedLibraryDependenciesMac(binary, exe_path)
else:
print "Platform not supported."
sys.exit(1)
result = []
build_dir = os.path.abspath(options.build_dir)
for dep in deps:
if (os.access(dep, os.F_OK)):
result.append(dep)
return result
def mkdir_p(path):
"""Simulates mkdir -p."""
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
def GenerateSymbols(options, binaries):
"""Dumps the symbols of binary and places them in the given directory."""
queue = Queue.Queue()
print_lock = threading.Lock()
def _Worker():
while True:
binary = queue.get()
if options.verbose:
with print_lock:
print "Generating symbols for %s" % binary
if sys.platform == 'darwin':
binary = GetDSYMBundle(options, binary)
elif sys.platform == 'linux2':
binary = GetSymbolPath(options, binary)
syms = GetCommandOutput([GetDumpSymsBinary(options.build_dir), '-r', '-c',
binary])
module_line = re.match("MODULE [^ ]+ [^ ]+ ([0-9A-F]+) (.*)\n", syms)
output_path = os.path.join(options.symbols_dir, module_line.group(2),
module_line.group(1))
mkdir_p(output_path)
symbol_file = "%s.sym" % module_line.group(2)
f = open(os.path.join(output_path, symbol_file), 'w')
f.write(syms)
f.close()
queue.task_done()
for binary in binaries:
queue.put(binary)
for _ in range(options.jobs):
t = threading.Thread(target=_Worker)
t.daemon = True
t.start()
queue.join()
def main():
parser = optparse.OptionParser()
parser.add_option('', '--build-dir', default='',
help='The build output directory.')
parser.add_option('', '--symbols-dir', default='',
help='The directory where to write the symbols file.')
parser.add_option('', '--libchromiumcontent-dir', default='',
help='The directory where libchromiumcontent is downloaded.')
parser.add_option('', '--binary', default='',
help='The path of the binary to generate symbols for.')
parser.add_option('', '--clear', default=False, action='store_true',
help='Clear the symbols directory before writing new '
'symbols.')
parser.add_option('-j', '--jobs', default=CONCURRENT_TASKS, action='store',
type='int', help='Number of parallel tasks to run.')
parser.add_option('-v', '--verbose', action='store_true',
help='Print verbose status output.')
(options, _) = parser.parse_args()
if not options.symbols_dir:
print "Required option --symbols-dir missing."
return 1
if not options.build_dir:
print "Required option --build-dir missing."
return 1
if not options.libchromiumcontent_dir:
print "Required option --libchromiumcontent-dir missing."
return 1
if not options.binary:
print "Required option --binary missing."
return 1
if not os.access(options.binary, os.X_OK):
print "Cannot find %s." % options.binary
return 1
if options.clear:
try:
shutil.rmtree(options.symbols_dir)
except:
pass
# Build the transitive closure of all dependencies.
binaries = set([options.binary])
queue = [options.binary]
exe_path = os.path.dirname(options.binary)
while queue:
deps = GetSharedLibraryDependencies(options, queue.pop(0), exe_path)
new_deps = set(deps) - binaries
binaries |= new_deps
queue.extend(list(new_deps))
GenerateSymbols(options, binaries)
return 0
if '__main__' == __name__:
sys.exit(main())
|
udp_shotgun.py
|
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import socket
import json
import random
import itertools
import time
from sys import stdout
from threading import Thread
from SocketServer import BaseRequestHandler, UDPServer
from mbed_host_tests import BaseHostTest, event_callback
class UDPEchoClientHandler(BaseRequestHandler):
def handle(self):
""" UDP packet handler. Responds with multiple simultaneous packets
"""
data, sock = self.request
pattern = [ord(d) << 4 for d in data]
# Each byte in request indicates size of packet to recieve
# Each packet size is shifted over by 4 to fit in a byte, which
# avoids any issues with endianess or decoding
for packet in pattern:
data = [random.randint(0, 255) for _ in range(packet-1)]
data.append(reduce(lambda a,b: a^b, data))
data = ''.join(map(chr, data))
sock.sendto(data, self.client_address)
# Sleep a tiny bit to compensate for local network
time.sleep(0.01)
class UDPEchoClientTest(BaseHostTest):
def __init__(self):
"""
Initialise test parameters.
:return:
"""
BaseHostTest.__init__(self)
self.SERVER_IP = None # Will be determined after knowing the target IP
self.SERVER_PORT = 0 # Let TCPServer choose an arbitrary port
self.server = None
self.server_thread = None
self.target_ip = None
@staticmethod
def find_interface_to_target_addr(target_ip):
"""
Finds IP address of the interface through which it is connected to the target.
:return:
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect((target_ip, 0)) # Target IP, any port
except socket.error:
s.connect((target_ip, 8000)) # Target IP, 'random' port
ip = s.getsockname()[0]
s.close()
return ip
def setup_udp_server(self):
"""
sets up a UDP server for target to connect and send test data.
:return:
"""
# !NOTE: There should mechanism to assert in the host test
if self.SERVER_IP is None:
self.log("setup_udp_server() called before determining server IP!")
self.notify_complete(False)
# Returning none will suppress host test from printing success code
self.server = UDPServer((self.SERVER_IP, self.SERVER_PORT), UDPEchoClientHandler)
ip, port = self.server.server_address
self.SERVER_PORT = port
self.server.allow_reuse_address = True
self.log("HOST: Listening for UDP packets: " + self.SERVER_IP + ":" + str(self.SERVER_PORT))
self.server_thread = Thread(target=UDPEchoClientTest.server_thread_func, args=(self,))
self.server_thread.start()
@staticmethod
def server_thread_func(this):
"""
Thread function to run TCP server forever.
:param this:
:return:
"""
this.server.serve_forever()
@event_callback("target_ip")
def _callback_target_ip(self, key, value, timestamp):
"""
Callback to handle reception of target's IP address.
:param key:
:param value:
:param timestamp:
:return:
"""
self.target_ip = value
self.SERVER_IP = self.find_interface_to_target_addr(self.target_ip)
self.setup_udp_server()
@event_callback("host_ip")
def _callback_host_ip(self, key, value, timestamp):
"""
Callback for request for host IP Addr
"""
self.send_kv("host_ip", self.SERVER_IP)
@event_callback("host_port")
def _callback_host_port(self, key, value, timestamp):
"""
Callback for request for host port
"""
self.send_kv("host_port", self.SERVER_PORT)
def teardown(self):
if self.server:
self.server.shutdown()
self.server_thread.join()
|
train_abstractive.py
|
#!/usr/bin/env python
"""
Main training workflow
"""
from __future__ import division
import argparse
import glob
import os
import random
import signal
import time
import torch
# from pytorch_transformers import BertTokenizer
from transformers import BertTokenizer, AutoTokenizer
import distributed
from models import data_loader, model_builder
from models.data_loader import load_dataset
from models.loss import abs_loss
from models.model_builder import AbsSummarizer
from models.predictor import build_predictor
from models.trainer import build_trainer
from others.logging import logger, init_logger
model_flags = ['hidden_size', 'ff_size', 'heads', 'emb_size', 'enc_layers', 'enc_hidden_size', 'enc_ff_size',
'dec_layers', 'dec_hidden_size', 'dec_ff_size', 'encoder', 'ff_actv', 'use_interval']
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def train_abs_multi(args):
""" Spawns 1 process per GPU """
init_logger()
nb_gpu = args.world_size
mp = torch.multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for i in range(nb_gpu):
device_id = i
procs.append(mp.Process(target=run, args=(args,
device_id, error_queue,), daemon=True))
procs[i].start()
logger.info(" Starting process pid: %d " % procs[i].pid)
error_handler.add_child(procs[i].pid)
for p in procs:
p.join()
def run(args, device_id, error_queue):
""" run process """
setattr(args, 'gpu_ranks', [int(i) for i in args.gpu_ranks])
try:
gpu_rank = distributed.multi_init(device_id, args.world_size, args.gpu_ranks)
print('gpu_rank %d' % gpu_rank)
if gpu_rank != args.gpu_ranks[device_id]:
raise AssertionError("An error occurred in \
Distributed initialization")
train_abs_single(args, device_id)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((args.gpu_ranks[device_id], traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
def validate_abs(args, device_id):
timestep = 0
if (args.test_all):
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
xent_lst = []
for i, cp in enumerate(cp_files):
step = int(cp.split('.')[-2].split('_')[-1])
if (args.test_start_from != -1 and step < args.test_start_from):
xent_lst.append((1e6, cp))
continue
xent = validate(args, device_id, cp, step)
xent_lst.append((xent, cp))
max_step = xent_lst.index(min(xent_lst))
if (i - max_step > 10):
break
xent_lst = sorted(xent_lst, key=lambda x: x[0])[:5]
logger.info('PPL %s' % str(xent_lst))
for xent, cp in xent_lst:
step = int(cp.split('.')[-2].split('_')[-1])
test_abs(args, device_id, cp, step)
else:
while (True):
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (not os.path.getsize(cp) > 0):
time.sleep(60)
continue
if (time_of_cp > timestep):
timestep = time_of_cp
step = int(cp.split('.')[-2].split('_')[-1])
validate(args, device_id, cp, step)
test_abs(args, device_id, cp, step)
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (time_of_cp > timestep):
continue
else:
time.sleep(300)
def validate(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
model = AbsSummarizer(args, device, checkpoint)
model.eval()
valid_iter = data_loader.Dataloader(args, load_dataset(args, 'valid', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=False)
if args.bart:
tokenizer = AutoTokenizer.from_pretrained('/home/ybai/downloads/bart', do_lower_case=True,
cache_dir=args.temp_dir, local_files_only=False)
symbols = {'BOS': tokenizer.encoder['madeupword0000'], 'EOS': tokenizer.encoder['madeupword0001'],
'PAD': tokenizer.encoder['<pad>'], 'EOQ': tokenizer.encoder['madeupword0002']}
else:
tokenizer = BertTokenizer.from_pretrained('bert-base-multilingual-uncased', do_lower_case=True,
cache_dir=args.temp_dir, local_files_only=True)
symbols = {'BOS': tokenizer.vocab['[unused1]'], 'EOS': tokenizer.vocab['[unused2]'],
'PAD': tokenizer.vocab['[PAD]'], 'EOQ': tokenizer.vocab['[unused3]']}
valid_loss = abs_loss(model.generator, symbols, model.vocab_size, train=False, device=device)
trainer = build_trainer(args, device_id, model, None, valid_loss)
stats = trainer.validate(valid_iter, step)
return stats.xent()
def test_abs(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
model = AbsSummarizer(args, device, checkpoint)
model.eval()
test_iter = data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.test_batch_size, device,
shuffle=False, is_test=True)
# 为了中文的tokenize能把unused分开
# for chinese tokenization, or it will split the word 'unused'
add_token_list = ['[unused1]', '[unused2]', '[unused3]', '[unused4]', '[unused5]']
if args.bart:
tokenizer = AutoTokenizer.from_pretrained('bart-base', do_lower_case=True, cache_dir=args.temp_dir, local_files_only=False)
symbols = {'BOS': tokenizer.encoder['madeupword0000'], 'EOS': tokenizer.encoder['madeupword0001'],
'PAD': 0, 'EOQ': tokenizer.encoder['madeupword0002']}
else:
tokenizer = BertTokenizer.from_pretrained('bert-base-multilingual-uncased', do_lower_case=True,
cache_dir=args.temp_dir, local_files_only=False, additional_special_tokens=add_token_list)
symbols = {'BOS': tokenizer.vocab['[unused1]'], 'EOS': tokenizer.vocab['[unused2]'],
'PAD': tokenizer.vocab['[PAD]'], 'EOQ': tokenizer.vocab['[unused3]']}
predictor = build_predictor(args, tokenizer, symbols, model, logger)
predictor.translate(test_iter, step)
def test_text_abs(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
model = AbsSummarizer(args, device, checkpoint)
model.eval()
test_iter = data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.test_batch_size, device,
shuffle=False, is_test=True)
# for chinese tokenization
add_token_list = ['[unused1]', '[unused2]', '[unused3]', '[unused4]', '[unused5]']
if args.bart:
tokenizer = AutoTokenizer.from_pretrained('bart-base', do_lower_case=True, cache_dir=args.temp_dir, local_files_only=False)
# tokenizer = AutoTokenizer.from_pretrained('/home/ybai/downloads/bart', do_lower_case=True,
# cache_dir=args.temp_dir, local_files_only=False)
symbols = {'BOS': tokenizer.encoder['madeupword0000'], 'EOS': tokenizer.encoder['madeupword0001'],
'PAD': tokenizer.encoder['<pad>'], 'EOQ': tokenizer.encoder['madeupword0002']}
else:
tokenizer = BertTokenizer.from_pretrained('bert-base-multilingual-uncased', do_lower_case=True,
cache_dir=args.temp_dir, local_files_only=False, additional_special_tokens=add_token_list)
symbols = {'BOS': tokenizer.vocab['[unused1]'], 'EOS': tokenizer.vocab['[unused2]'],
'PAD': tokenizer.vocab['[PAD]'], 'EOQ': tokenizer.vocab['[unused3]']}
predictor = build_predictor(args, tokenizer, symbols, model, logger)
predictor.translate(test_iter, step)
def baseline(args, cal_lead=False, cal_oracle=False):
test_iter = data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.batch_size, 'cpu',
shuffle=False, is_test=True)
trainer = build_trainer(args, '-1', None, None, None)
#
if (cal_lead):
trainer.test(test_iter, 0, cal_lead=True)
elif (cal_oracle):
trainer.test(test_iter, 0, cal_oracle=True)
def train_abs(args, device_id):
if (args.world_size > 1):
train_abs_multi(args)
else:
train_abs_single(args, device_id)
def train_abs_single(args, device_id):
init_logger(args.log_file)
logger.info(str(args))
device = "cpu" if args.visible_gpus == '-1' else "cuda"
logger.info('Device ID %d' % device_id)
logger.info('Device %s' % device)
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
if device_id >= 0:
torch.cuda.set_device(device_id)
torch.cuda.manual_seed(args.seed)
if args.train_from != '':
logger.info('Loading checkpoint from %s' % args.train_from)
checkpoint = torch.load(args.train_from,
map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
else:
checkpoint = None
if (args.load_from_extractive != ''):
logger.info('Loading bert from extractive model %s' % args.load_from_extractive)
bert_from_extractive = torch.load(args.load_from_extractive, map_location=lambda storage, loc: storage)
bert_from_extractive = bert_from_extractive['model']
else:
bert_from_extractive = None
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
def train_iter_fct():
return data_loader.Dataloader(args, load_dataset(args, 'train', shuffle=True), args.batch_size, device,
shuffle=True, is_test=False)
model = AbsSummarizer(args, device, checkpoint, bert_from_extractive)
if (args.sep_optim):
optim_bert = model_builder.build_optim_bert(args, model, checkpoint)
optim_dec = model_builder.build_optim_dec(args, model, checkpoint)
optim = [optim_bert, optim_dec]
else:
optim = [model_builder.build_optim(args, model, checkpoint)]
logger.info(model)
if args.bart:
tokenizer = AutoTokenizer.from_pretrained('bart-base', do_lower_case=True, cache_dir=args.temp_dir, local_files_only=False)
# tokenizer = AutoTokenizer.from_pretrained('/home/ybai/downloads/bart', do_lower_case=True,
# cache_dir=args.temp_dir, local_files_only=False)
symbols = {'BOS': tokenizer.encoder['madeupword0000'], 'EOS': tokenizer.encoder['madeupword0001'],
'PAD': tokenizer.encoder['<pad>'], 'EOQ': tokenizer.encoder['madeupword0002']}
else:
tokenizer = BertTokenizer.from_pretrained('bert-base-multilingual-uncased', do_lower_case=True,
cache_dir=args.temp_dir, local_files_only=False)
symbols = {'BOS': tokenizer.vocab['[unused1]'], 'EOS': tokenizer.vocab['[unused2]']
'PAD': tokenizer.vocab['[PAD]'], 'EOQ': tokenizer.vocab['[unused3]']}
train_loss = abs_loss(model.generator, symbols, model.vocab_size, device, train=True,
label_smoothing=args.label_smoothing)
trainer = build_trainer(args, device_id, model, optim, train_loss)
trainer.train(train_iter_fct, args.train_steps)
|
utils.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
utility classes
"""
__author__ = "Robert Wen <robert.wen@nyu.edu>, Caicai Chen <caicai.chen@nyu.edu>"
import os
import md5
import json
import Queue
import threading
import time
class Worker(object):
''' Worker thread for concurrent process of tasks from a queue using multiple threads.
This worker is designed to never die, always keeping num_threads threads active.
It can work on any function with arbitrary arguemtns using the add_task() method.
Example:
worker = Worker(50)
for i in xrange(100):
worker.add_task(func, arg1, arg2) # blocks when queue is full
worker.join() # blocks here
Args:
num_threads: the number of num_threads threads to use from the Queue.
queue_size: the number of elements that can be placed in Queue. If 0 then infinite.
'''
def __init__(self, num_threads=1, queue_size=0, keep_alive=True, quiet=False):
if queue_size != 0 and queue_size < num_threads:
raise Exception('queue_size has to be > num_threads to make sense')
self.num_threads = num_threads
self.queue = Queue.Queue(queue_size)
self.threads = []
self.keep_alive = keep_alive
self.quiet = quiet
self._retain_threads() # Start the threads.
# The following extra thread keeps all the threads alive even if they are crashing.
# This makes it possible to block on a queue size, have threads fail, and still be able to add
# more to the queue because this thread will spawn more new ones to take some stuff off the
# queue.
self.thr = threading.Thread(target=self._keep_alive, args=[self])
self.thr.setDaemon(True)
self.thr.start()
def _retain_threads(self):
''' Make sure there at self.num_threads always. '''
while len(self.threads) < self.num_threads:
t = threading.Thread(target=self._run, args=[self])
t.setDaemon(True)
t.start()
self.threads.append(t)
def _keep_alive(self, *args):
''' This is called by thread self.t to keep all the self.threads alive forever. '''
while self.keep_alive:
# This join(1) here checks if the thread hit an exception and terminated
self.threads = [t.join(1) or t for t in self.threads if t.isAlive()]
if not self.queue.empty() and self.keep_alive:
self._retain_threads()
def _end_func(self):
''' Dummy function that when added it stops the threads. '''
pass
def _run(self, *args):
''' This is the function the threads have as their targets. '''
while True:
(func, args, kargs) = self.queue.get()
if func == self._end_func: # Check for dummy function and if so end thread.
break
func(*args, **kargs)
def restart(self):
''' If the threads have been killed by a KeyboardInterrupt, then you can call this on the worker
to set keep_alive to True and recreate the extra thread which in turn creates worker threads.
'''
self.keep_alive = True
self._retain_threads()
del self.thr
self.thr = threading.Thread(target=self._keep_alive, args=[self])
self.thr.setDaemon(True)
self.thr.start()
def apply_async(self, func, args): # to match multiprocessing.ThreadPool
self.add_task(func, *args)
def add_task(self, func, *args, **kargs):
''' Add a task to the queue, blocking if the queue is full. This also resets the threads to do
work.
'''
if not self.threads:
self.restart()
self.queue.put((func, args, kargs))
def close(self): # to match multiprocessing.ThreadPool
pass
def join(self, block=True, timeout=None):
''' Wait for the queue to empty.
Args:
block: If block is True, this will stall the interpreter at that line until the queue is
empty, recreating threads if they die until the queue is empty. If False, this just recreates
any stalled threads once, and returns so the interpreter can go on. Setting to False does not
ensure that threads will stay alive, but is handy to keep more tasks to work on until you
finally want to wait on all them to be finished at the end of your program.
'''
if timeout is not None:
start_time = time.time()
time_join = timeout
else:
time_join = 100
if block:
try:
# Keep the threads going until the queue is emptied.
# This is the marker to to the threads, so put it in the queue now.
for t in range(self.num_threads):
self.add_task(self._end_func)
while self.threads and (timeout is None or time.time() - start_time < timeout):
if self.queue.empty():
raise Exception()
time.sleep(0.0001)
except KeyboardInterrupt:
# self.threads = [t.join(0.01 / self.num_threads) or t for t in self.threads if t.isAlive()]
self.keep_alive = False
for t in range(self.num_threads):
self.add_task(self._end_func)
except Exception:
# Prevent the keep_alive thread from running
self.keep_alive = False
# Stop all the work threads.
for t in range(self.num_threads):
self.add_task(self._end_func)
# Wait on threads.
self.threads = [t.join(time_join) or t for t in self.threads if t.isAlive()]
|
raspivoice.py
|
import setproctitle #Set process name to something easily killable
from threading import Thread
import cv2
import os
import subprocess #so I can run subprocesses in the background if I want
#import ConfigParser #To read the config file modified by menu.py
from subprocess import call #to call a process in the foreground
import time
class Raspivoice:
def __init__(self, cam, cfg):
self.cam = cam #Camera object from WebcamVideoStream class
self.Config = cfg
self.raspiframe = "/dev/shm/raspi_frame" #Raspivoice takes this as a signal that a frame is available
self.raspiimg = "/dev/shm/opencv.jpg" # The image
self.raspiplayed = "/dev/shm/raspi_played" #Raspivoice creates this after playback of a frame finishes
self.running = False
def start(self):
if (os.path.exists(self.raspiplayed)):
os.remove(self.raspiplayed)
cmdList = ["sudo","/home/pi/raspivoice/Release/./raspivoice",self.Config.ConfigRaspivoiceCamera,self.Config.ConfigRaspivoicePlaybackSpeed,self.Config.ConfigBlinders,self.Config.ConfigZoom, self.Config.ConfigRaspivoiceContrast]
if self.Config.ConfigFovealmapping == "--foveal_mapping":
cmdList.append("--foveal_mapping")
subprocess.Popen(cmdList) #Launch using config settings plus a few obligate command line flags for spoken menu and rotary encoder input
if (os.path.exists(self.raspiframe)):
os.remove(self.raspiframe)
if (os.path.exists(self.raspiimg)):
os.remove(self.raspiimg)
self.running = True
t = Thread(target=self.worker, args=())
t.start()
def worker(self):
while True:
if not self.running:
return
if (not os.path.exists(self.raspiframe)):
# We need to change the resolution if Foveal mapping is enabled
if self.Config.ConfigFovealmapping == "--foveal_mapping":
res = cv2.resize(self.cam.read(), (320, 240), interpolation =cv2.INTER_AREA)
else:
res = cv2.resize(self.cam.read(), (176, 64), interpolation =cv2.INTER_AREA)
cv2.imwrite(self.raspiimg, res)
os.mknod(self.raspiframe)
def stop(self):
self.running = False
call (["sudo","killall","raspivoice"]) # Kills raspivoice if its running
def restart(self):
if not self.running:
return
self.stop()
time.sleep(1)
self.start()
|
test_tracker.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from queue import Queue
import threading
import pytest
import torch
from torch import nn
from fairscale.nn.pipe.checkpoint import enable_checkpointing, enable_recomputing
from fairscale.nn.pipe.microbatch import Batch
from fairscale.nn.pipe.skip import pop, skippable, stash
from fairscale.nn.pipe.skip.layout import SkipLayout
from fairscale.nn.pipe.skip.tracker import SkipTracker, SkipTrackerThroughPotals, current_skip_tracker
def test_default_skip_tracker():
q = Queue()
def f():
q.put(current_skip_tracker())
t = threading.Thread(target=f)
t.start()
t.join()
skip_tracker = q.get()
assert type(skip_tracker) is SkipTracker
assert type(skip_tracker) is not SkipTrackerThroughPotals
@pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
def test_default_skip_tracker_by_data_parallel():
@skippable(stash=["foo"])
class Stash(nn.Module):
def forward(self, input):
yield stash("foo", input)
return input * 2
@skippable(pop=["foo"])
class Pop(nn.Module):
def forward(self, input):
foo = yield pop("foo")
return foo
model = nn.Sequential(Stash(), Pop())
model = nn.DataParallel(model, device_ids=[0, 0], output_device=0)
input = torch.rand(10, device=0)
output = model(input)
assert torch.allclose(output, input)
def test_reuse_portal():
skip_layout = SkipLayout(num_partitions=2, skip_routes={(None, "test"): (0, 1)})
skip_tracker = SkipTrackerThroughPotals(skip_layout, 0)
batch = Batch(torch.tensor([1.0]), 0)
a = torch.tensor([2.0])
b = torch.tensor([2.0])
skip_tracker.save(batch, None, "test", a)
portal = skip_tracker.portals[(None, "test")]
skip_tracker.save(batch, None, "test", b)
assert portal is skip_tracker.portals[(None, "test")]
def test_no_copy_no_portal():
skip_layout = SkipLayout(num_partitions=2, skip_routes={(None, "copy"): (0, 1), (None, "not_copy"): (0, 0)})
skip_tracker = SkipTrackerThroughPotals(skip_layout, 0)
batch = Batch(torch.tensor([1.0]), 0)
a = torch.tensor([2.0])
b = torch.tensor([2.0])
skip_tracker.save(batch, None, "copy", a)
skip_tracker.save(batch, None, "not_copy", b)
assert (None, "copy") in skip_tracker.portals
assert (None, "copy") not in skip_tracker.tensors
assert (None, "not_copy") in skip_tracker.tensors
assert (None, "not_copy") not in skip_tracker.portals
def test_tensor_life_without_checkpointing():
skip_layout = SkipLayout(num_partitions=2, skip_routes={(None, "test"): (0, 1)})
skip_tracker = SkipTrackerThroughPotals(skip_layout, 0)
batch = Batch(torch.tensor([1.0]), 0)
tensor = torch.tensor([2.0])
skip_tracker.save(batch, None, "test", tensor)
assert skip_tracker.portals[(None, "test")].tensor_life == 1
skip_tracker.load(batch, None, "test")
assert skip_tracker.portals[(None, "test")].tensor_life == 0
def test_tensor_life_with_checkpointing():
skip_layout = SkipLayout(num_partitions=2, skip_routes={(None, "test"): (0, 1)})
skip_tracker = SkipTrackerThroughPotals(skip_layout, 0)
batch = Batch(torch.tensor([1.0]), 0)
tensor = torch.tensor([2.0])
with enable_checkpointing():
skip_tracker.save(batch, None, "test", tensor)
assert skip_tracker.portals[(None, "test")].tensor_life == 2
with enable_checkpointing():
skip_tracker.load(batch, None, "test")
assert skip_tracker.portals[(None, "test")].tensor_life == 1
with enable_recomputing():
skip_tracker.load(batch, None, "test")
assert skip_tracker.portals[(None, "test")].tensor_life == 0
with enable_recomputing():
skip_tracker.save(batch, None, "test", tensor)
assert skip_tracker.portals[(None, "test")].tensor_life == 0
|
test_kvstore.py
|
# -*- coding: utf-8 -*-
import threading
import unittest
from sorl.thumbnail.kvstores.cached_db_kvstore import KVStore
class KVStoreTestCase(unittest.TestCase):
@unittest.skipIf(threading is None, 'Test requires threading')
def test_cache_backend(self):
kv = KVStore()
cache_backends = []
def thread_cache_backend():
cache_backends.append(kv.cache)
for x in range(2):
t = threading.Thread(target=thread_cache_backend)
t.start()
t.join()
# Cache backend for each thread needs to be unique
self.assertNotEqual(cache_backends[0], cache_backends[1])
|
tests.py
|
# Unit tests for cache framework
# Uses whatever cache backend is set in the test settings file.
import copy
import io
import os
import pickle
import re
import shutil
import sys
import tempfile
import threading
import time
import unittest
from pathlib import Path
from unittest import mock, skipIf
from django.conf import settings
from django.core import management, signals
from django.core.cache import (
DEFAULT_CACHE_ALIAS,
CacheHandler,
CacheKeyWarning,
InvalidCacheKey,
cache,
caches,
)
from django.core.cache.backends.base import InvalidCacheBackendError
from django.core.cache.backends.redis import RedisCacheClient
from django.core.cache.utils import make_template_fragment_key
from django.db import close_old_connections, connection, connections
from django.db.backends.utils import CursorWrapper
from django.http import (
HttpRequest,
HttpResponse,
HttpResponseNotModified,
StreamingHttpResponse,
)
from django.middleware.cache import (
CacheMiddleware,
FetchFromCacheMiddleware,
UpdateCacheMiddleware,
)
from django.middleware.csrf import CsrfViewMiddleware
from django.template import engines
from django.template.context_processors import csrf
from django.template.response import TemplateResponse
from django.test import (
RequestFactory,
SimpleTestCase,
TestCase,
TransactionTestCase,
override_settings,
)
from django.test.signals import setting_changed
from django.test.utils import CaptureQueriesContext
from django.utils import timezone, translation
from django.utils.cache import (
get_cache_key,
learn_cache_key,
patch_cache_control,
patch_vary_headers,
)
from django.views.decorators.cache import cache_control, cache_page
from .models import Poll, expensive_calculation
# functions/classes for complex data type tests
def f():
return 42
class C:
def m(n):
return 24
class Unpicklable:
def __getstate__(self):
raise pickle.PickleError()
def empty_response(request):
return HttpResponse()
KEY_ERRORS_WITH_MEMCACHED_MSG = (
"Cache key contains characters that will cause errors if used with memcached: %r"
)
@override_settings(
CACHES={
"default": {
"BACKEND": "django.core.cache.backends.dummy.DummyCache",
}
}
)
class DummyCacheTests(SimpleTestCase):
# The Dummy cache backend doesn't really behave like a test backend,
# so it has its own test case.
def test_simple(self):
"Dummy cache backend ignores cache set calls"
cache.set("key", "value")
self.assertIsNone(cache.get("key"))
def test_add(self):
"Add doesn't do anything in dummy cache backend"
self.assertIs(cache.add("addkey1", "value"), True)
self.assertIs(cache.add("addkey1", "newvalue"), True)
self.assertIsNone(cache.get("addkey1"))
def test_non_existent(self):
"Nonexistent keys aren't found in the dummy cache backend"
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
"get_many returns nothing for the dummy cache backend"
cache.set_many({"a": "a", "b": "b", "c": "c", "d": "d"})
self.assertEqual(cache.get_many(["a", "c", "d"]), {})
self.assertEqual(cache.get_many(["a", "b", "e"]), {})
def test_get_many_invalid_key(self):
msg = KEY_ERRORS_WITH_MEMCACHED_MSG % ":1:key with spaces"
with self.assertWarnsMessage(CacheKeyWarning, msg):
cache.get_many(["key with spaces"])
def test_delete(self):
"Cache deletion is transparently ignored on the dummy cache backend"
cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertIsNone(cache.get("key1"))
self.assertIs(cache.delete("key1"), False)
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_has_key(self):
"The has_key method doesn't ever return True for the dummy cache backend"
cache.set("hello1", "goodbye1")
self.assertIs(cache.has_key("hello1"), False)
self.assertIs(cache.has_key("goodbye1"), False)
def test_in(self):
"The in operator doesn't ever return True for the dummy cache backend"
cache.set("hello2", "goodbye2")
self.assertNotIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
"Dummy cache values can't be incremented"
cache.set("answer", 42)
with self.assertRaises(ValueError):
cache.incr("answer")
with self.assertRaises(ValueError):
cache.incr("does_not_exist")
with self.assertRaises(ValueError):
cache.incr("does_not_exist", -1)
def test_decr(self):
"Dummy cache values can't be decremented"
cache.set("answer", 42)
with self.assertRaises(ValueError):
cache.decr("answer")
with self.assertRaises(ValueError):
cache.decr("does_not_exist")
with self.assertRaises(ValueError):
cache.decr("does_not_exist", -1)
def test_touch(self):
"""Dummy cache can't do touch()."""
self.assertIs(cache.touch("whatever"), False)
def test_data_types(self):
"All data types are ignored equally by the dummy cache"
tests = {
"string": "this is a string",
"int": 42,
"bool": True,
"list": [1, 2, 3, 4],
"tuple": (1, 2, 3, 4),
"dict": {"A": 1, "B": 2},
"function": f,
"class": C,
}
for key, value in tests.items():
with self.subTest(key=key):
cache.set(key, value)
self.assertIsNone(cache.get(key))
def test_expiration(self):
"Expiration has no effect on the dummy cache"
cache.set("expire1", "very quickly", 1)
cache.set("expire2", "very quickly", 1)
cache.set("expire3", "very quickly", 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
self.assertIs(cache.add("expire2", "newvalue"), True)
self.assertIsNone(cache.get("expire2"))
self.assertIs(cache.has_key("expire3"), False)
def test_unicode(self):
"Unicode values are ignored by the dummy cache"
stuff = {
"ascii": "ascii_value",
"unicode_ascii": "Iñtërnâtiônàlizætiøn1",
"Iñtërnâtiônàlizætiøn": "Iñtërnâtiônàlizætiøn2",
"ascii2": {"x": 1},
}
for (key, value) in stuff.items():
with self.subTest(key=key):
cache.set(key, value)
self.assertIsNone(cache.get(key))
def test_set_many(self):
"set_many does nothing for the dummy cache backend"
self.assertEqual(cache.set_many({"a": 1, "b": 2}), [])
self.assertEqual(cache.set_many({"a": 1, "b": 2}, timeout=2, version="1"), [])
def test_set_many_invalid_key(self):
msg = KEY_ERRORS_WITH_MEMCACHED_MSG % ":1:key with spaces"
with self.assertWarnsMessage(CacheKeyWarning, msg):
cache.set_many({"key with spaces": "foo"})
def test_delete_many(self):
"delete_many does nothing for the dummy cache backend"
cache.delete_many(["a", "b"])
def test_delete_many_invalid_key(self):
msg = KEY_ERRORS_WITH_MEMCACHED_MSG % ":1:key with spaces"
with self.assertWarnsMessage(CacheKeyWarning, msg):
cache.delete_many(["key with spaces"])
def test_clear(self):
"clear does nothing for the dummy cache backend"
cache.clear()
def test_incr_version(self):
"Dummy cache versions can't be incremented"
cache.set("answer", 42)
with self.assertRaises(ValueError):
cache.incr_version("answer")
with self.assertRaises(ValueError):
cache.incr_version("does_not_exist")
def test_decr_version(self):
"Dummy cache versions can't be decremented"
cache.set("answer", 42)
with self.assertRaises(ValueError):
cache.decr_version("answer")
with self.assertRaises(ValueError):
cache.decr_version("does_not_exist")
def test_get_or_set(self):
self.assertEqual(cache.get_or_set("mykey", "default"), "default")
self.assertIsNone(cache.get_or_set("mykey", None))
def test_get_or_set_callable(self):
def my_callable():
return "default"
self.assertEqual(cache.get_or_set("mykey", my_callable), "default")
self.assertEqual(cache.get_or_set("mykey", my_callable()), "default")
def custom_key_func(key, key_prefix, version):
"A customized cache key function"
return "CUSTOM-" + "-".join([key_prefix, str(version), key])
_caches_setting_base = {
"default": {},
"prefix": {"KEY_PREFIX": "cacheprefix{}".format(os.getpid())},
"v2": {"VERSION": 2},
"custom_key": {"KEY_FUNCTION": custom_key_func},
"custom_key2": {"KEY_FUNCTION": "cache.tests.custom_key_func"},
"cull": {"OPTIONS": {"MAX_ENTRIES": 30}},
"zero_cull": {"OPTIONS": {"CULL_FREQUENCY": 0, "MAX_ENTRIES": 30}},
}
def caches_setting_for_tests(base=None, exclude=None, **params):
# `base` is used to pull in the memcached config from the original settings,
# `exclude` is a set of cache names denoting which `_caches_setting_base` keys
# should be omitted.
# `params` are test specific overrides and `_caches_settings_base` is the
# base config for the tests.
# This results in the following search order:
# params -> _caches_setting_base -> base
base = base or {}
exclude = exclude or set()
setting = {k: base.copy() for k in _caches_setting_base if k not in exclude}
for key, cache_params in setting.items():
cache_params.update(_caches_setting_base[key])
cache_params.update(params)
return setting
class BaseCacheTests:
# A common set of tests to apply to all cache backends
factory = RequestFactory()
# Some clients raise custom exceptions when .incr() or .decr() are called
# with a non-integer value.
incr_decr_type_error = TypeError
def tearDown(self):
cache.clear()
def test_simple(self):
# Simple cache set/get works
cache.set("key", "value")
self.assertEqual(cache.get("key"), "value")
def test_default_used_when_none_is_set(self):
"""If None is cached, get() returns it instead of the default."""
cache.set("key_default_none", None)
self.assertIsNone(cache.get("key_default_none", default="default"))
def test_add(self):
# A key can be added to a cache
self.assertIs(cache.add("addkey1", "value"), True)
self.assertIs(cache.add("addkey1", "newvalue"), False)
self.assertEqual(cache.get("addkey1"), "value")
def test_prefix(self):
# Test for same cache key conflicts between shared backend
cache.set("somekey", "value")
# should not be set in the prefixed cache
self.assertIs(caches["prefix"].has_key("somekey"), False)
caches["prefix"].set("somekey", "value2")
self.assertEqual(cache.get("somekey"), "value")
self.assertEqual(caches["prefix"].get("somekey"), "value2")
def test_non_existent(self):
"""Nonexistent cache keys return as None/default."""
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
# Multiple cache keys can be returned using get_many
cache.set_many({"a": "a", "b": "b", "c": "c", "d": "d"})
self.assertEqual(
cache.get_many(["a", "c", "d"]), {"a": "a", "c": "c", "d": "d"}
)
self.assertEqual(cache.get_many(["a", "b", "e"]), {"a": "a", "b": "b"})
self.assertEqual(cache.get_many(iter(["a", "b", "e"])), {"a": "a", "b": "b"})
cache.set_many({"x": None, "y": 1})
self.assertEqual(cache.get_many(["x", "y"]), {"x": None, "y": 1})
def test_delete(self):
# Cache keys can be deleted
cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(cache.get("key1"), "spam")
self.assertIs(cache.delete("key1"), True)
self.assertIsNone(cache.get("key1"))
self.assertEqual(cache.get("key2"), "eggs")
def test_delete_nonexistent(self):
self.assertIs(cache.delete("nonexistent_key"), False)
def test_has_key(self):
# The cache can be inspected for cache keys
cache.set("hello1", "goodbye1")
self.assertIs(cache.has_key("hello1"), True)
self.assertIs(cache.has_key("goodbye1"), False)
cache.set("no_expiry", "here", None)
self.assertIs(cache.has_key("no_expiry"), True)
cache.set("null", None)
self.assertIs(cache.has_key("null"), True)
def test_in(self):
# The in operator can be used to inspect cache contents
cache.set("hello2", "goodbye2")
self.assertIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
cache.set("null", None)
self.assertIn("null", cache)
def test_incr(self):
# Cache values can be incremented
cache.set("answer", 41)
self.assertEqual(cache.incr("answer"), 42)
self.assertEqual(cache.get("answer"), 42)
self.assertEqual(cache.incr("answer", 10), 52)
self.assertEqual(cache.get("answer"), 52)
self.assertEqual(cache.incr("answer", -10), 42)
with self.assertRaises(ValueError):
cache.incr("does_not_exist")
with self.assertRaises(ValueError):
cache.incr("does_not_exist", -1)
cache.set("null", None)
with self.assertRaises(self.incr_decr_type_error):
cache.incr("null")
def test_decr(self):
# Cache values can be decremented
cache.set("answer", 43)
self.assertEqual(cache.decr("answer"), 42)
self.assertEqual(cache.get("answer"), 42)
self.assertEqual(cache.decr("answer", 10), 32)
self.assertEqual(cache.get("answer"), 32)
self.assertEqual(cache.decr("answer", -10), 42)
with self.assertRaises(ValueError):
cache.decr("does_not_exist")
with self.assertRaises(ValueError):
cache.incr("does_not_exist", -1)
cache.set("null", None)
with self.assertRaises(self.incr_decr_type_error):
cache.decr("null")
def test_close(self):
self.assertTrue(hasattr(cache, "close"))
cache.close()
def test_data_types(self):
# Many different data types can be cached
tests = {
"string": "this is a string",
"int": 42,
"bool": True,
"list": [1, 2, 3, 4],
"tuple": (1, 2, 3, 4),
"dict": {"A": 1, "B": 2},
"function": f,
"class": C,
}
for key, value in tests.items():
with self.subTest(key=key):
cache.set(key, value)
self.assertEqual(cache.get(key), value)
def test_cache_read_for_model_instance(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="Well?")
self.assertEqual(Poll.objects.count(), 1)
pub_date = my_poll.pub_date
cache.set("question", my_poll)
cached_poll = cache.get("question")
self.assertEqual(cached_poll.pub_date, pub_date)
# We only want the default expensive calculation run once
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_write_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache write
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.defer("question")
self.assertEqual(defer_qs.count(), 1)
self.assertEqual(expensive_calculation.num_runs, 1)
cache.set("deferred_queryset", defer_qs)
# cache set should not re-evaluate default functions
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_read_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.defer("question")
self.assertEqual(defer_qs.count(), 1)
cache.set("deferred_queryset", defer_qs)
self.assertEqual(expensive_calculation.num_runs, 1)
runs_before_cache_read = expensive_calculation.num_runs
cache.get("deferred_queryset")
# We only want the default expensive calculation run on creation and set
self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read)
def test_expiration(self):
# Cache values can be set to expire
cache.set("expire1", "very quickly", 1)
cache.set("expire2", "very quickly", 1)
cache.set("expire3", "very quickly", 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
self.assertIs(cache.add("expire2", "newvalue"), True)
self.assertEqual(cache.get("expire2"), "newvalue")
self.assertIs(cache.has_key("expire3"), False)
def test_touch(self):
# cache.touch() updates the timeout.
cache.set("expire1", "very quickly", timeout=1)
self.assertIs(cache.touch("expire1", timeout=4), True)
time.sleep(2)
self.assertIs(cache.has_key("expire1"), True)
time.sleep(3)
self.assertIs(cache.has_key("expire1"), False)
# cache.touch() works without the timeout argument.
cache.set("expire1", "very quickly", timeout=1)
self.assertIs(cache.touch("expire1"), True)
time.sleep(2)
self.assertIs(cache.has_key("expire1"), True)
self.assertIs(cache.touch("nonexistent"), False)
def test_unicode(self):
# Unicode values can be cached
stuff = {
"ascii": "ascii_value",
"unicode_ascii": "Iñtërnâtiônàlizætiøn1",
"Iñtërnâtiônàlizætiøn": "Iñtërnâtiônàlizætiøn2",
"ascii2": {"x": 1},
}
# Test `set`
for (key, value) in stuff.items():
with self.subTest(key=key):
cache.set(key, value)
self.assertEqual(cache.get(key), value)
# Test `add`
for (key, value) in stuff.items():
with self.subTest(key=key):
self.assertIs(cache.delete(key), True)
self.assertIs(cache.add(key, value), True)
self.assertEqual(cache.get(key), value)
# Test `set_many`
for (key, value) in stuff.items():
self.assertIs(cache.delete(key), True)
cache.set_many(stuff)
for (key, value) in stuff.items():
with self.subTest(key=key):
self.assertEqual(cache.get(key), value)
def test_binary_string(self):
# Binary strings should be cacheable
from zlib import compress, decompress
value = "value_to_be_compressed"
compressed_value = compress(value.encode())
# Test set
cache.set("binary1", compressed_value)
compressed_result = cache.get("binary1")
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test add
self.assertIs(cache.add("binary1-add", compressed_value), True)
compressed_result = cache.get("binary1-add")
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test set_many
cache.set_many({"binary1-set_many": compressed_value})
compressed_result = cache.get("binary1-set_many")
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
def test_set_many(self):
# Multiple keys can be set using set_many
cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(cache.get("key1"), "spam")
self.assertEqual(cache.get("key2"), "eggs")
def test_set_many_returns_empty_list_on_success(self):
"""set_many() returns an empty list when all keys are inserted."""
failing_keys = cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(failing_keys, [])
def test_set_many_expiration(self):
# set_many takes a second ``timeout`` parameter
cache.set_many({"key1": "spam", "key2": "eggs"}, 1)
time.sleep(2)
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_delete_many(self):
# Multiple keys can be deleted using delete_many
cache.set_many({"key1": "spam", "key2": "eggs", "key3": "ham"})
cache.delete_many(["key1", "key2"])
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
self.assertEqual(cache.get("key3"), "ham")
def test_clear(self):
# The cache can be emptied using clear
cache.set_many({"key1": "spam", "key2": "eggs"})
cache.clear()
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_long_timeout(self):
"""
Follow memcached's convention where a timeout greater than 30 days is
treated as an absolute expiration timestamp instead of a relative
offset (#12399).
"""
cache.set("key1", "eggs", 60 * 60 * 24 * 30 + 1) # 30 days + 1 second
self.assertEqual(cache.get("key1"), "eggs")
self.assertIs(cache.add("key2", "ham", 60 * 60 * 24 * 30 + 1), True)
self.assertEqual(cache.get("key2"), "ham")
cache.set_many(
{"key3": "sausage", "key4": "lobster bisque"}, 60 * 60 * 24 * 30 + 1
)
self.assertEqual(cache.get("key3"), "sausage")
self.assertEqual(cache.get("key4"), "lobster bisque")
def test_forever_timeout(self):
"""
Passing in None into timeout results in a value that is cached forever
"""
cache.set("key1", "eggs", None)
self.assertEqual(cache.get("key1"), "eggs")
self.assertIs(cache.add("key2", "ham", None), True)
self.assertEqual(cache.get("key2"), "ham")
self.assertIs(cache.add("key1", "new eggs", None), False)
self.assertEqual(cache.get("key1"), "eggs")
cache.set_many({"key3": "sausage", "key4": "lobster bisque"}, None)
self.assertEqual(cache.get("key3"), "sausage")
self.assertEqual(cache.get("key4"), "lobster bisque")
cache.set("key5", "belgian fries", timeout=1)
self.assertIs(cache.touch("key5", timeout=None), True)
time.sleep(2)
self.assertEqual(cache.get("key5"), "belgian fries")
def test_zero_timeout(self):
"""
Passing in zero into timeout results in a value that is not cached
"""
cache.set("key1", "eggs", 0)
self.assertIsNone(cache.get("key1"))
self.assertIs(cache.add("key2", "ham", 0), True)
self.assertIsNone(cache.get("key2"))
cache.set_many({"key3": "sausage", "key4": "lobster bisque"}, 0)
self.assertIsNone(cache.get("key3"))
self.assertIsNone(cache.get("key4"))
cache.set("key5", "belgian fries", timeout=5)
self.assertIs(cache.touch("key5", timeout=0), True)
self.assertIsNone(cache.get("key5"))
def test_float_timeout(self):
# Make sure a timeout given as a float doesn't crash anything.
cache.set("key1", "spam", 100.2)
self.assertEqual(cache.get("key1"), "spam")
def _perform_cull_test(self, cull_cache_name, initial_count, final_count):
try:
cull_cache = caches[cull_cache_name]
except InvalidCacheBackendError:
self.skipTest("Culling isn't implemented.")
# Create initial cache key entries. This will overflow the cache,
# causing a cull.
for i in range(1, initial_count):
cull_cache.set("cull%d" % i, "value", 1000)
count = 0
# Count how many keys are left in the cache.
for i in range(1, initial_count):
if cull_cache.has_key("cull%d" % i):
count += 1
self.assertEqual(count, final_count)
def test_cull(self):
self._perform_cull_test("cull", 50, 29)
def test_zero_cull(self):
self._perform_cull_test("zero_cull", 50, 19)
def test_cull_delete_when_store_empty(self):
try:
cull_cache = caches["cull"]
except InvalidCacheBackendError:
self.skipTest("Culling isn't implemented.")
old_max_entries = cull_cache._max_entries
# Force _cull to delete on first cached record.
cull_cache._max_entries = -1
try:
cull_cache.set("force_cull_delete", "value", 1000)
self.assertIs(cull_cache.has_key("force_cull_delete"), True)
finally:
cull_cache._max_entries = old_max_entries
def _perform_invalid_key_test(self, key, expected_warning, key_func=None):
"""
All the builtin backends should warn (except memcached that should
error) on keys that would be refused by memcached. This encourages
portable caching code without making it too difficult to use production
backends with more liberal key rules. Refs #6447.
"""
# mimic custom ``make_key`` method being defined since the default will
# never show the below warnings
def func(key, *args):
return key
old_func = cache.key_func
cache.key_func = key_func or func
tests = [
("add", [key, 1]),
("get", [key]),
("set", [key, 1]),
("incr", [key]),
("decr", [key]),
("touch", [key]),
("delete", [key]),
("get_many", [[key, "b"]]),
("set_many", [{key: 1, "b": 2}]),
("delete_many", [[key, "b"]]),
]
try:
for operation, args in tests:
with self.subTest(operation=operation):
with self.assertWarns(CacheKeyWarning) as cm:
getattr(cache, operation)(*args)
self.assertEqual(str(cm.warning), expected_warning)
finally:
cache.key_func = old_func
def test_invalid_key_characters(self):
# memcached doesn't allow whitespace or control characters in keys.
key = "key with spaces and 清"
self._perform_invalid_key_test(key, KEY_ERRORS_WITH_MEMCACHED_MSG % key)
def test_invalid_key_length(self):
# memcached limits key length to 250.
key = ("a" * 250) + "清"
expected_warning = (
"Cache key will cause errors if used with memcached: "
"%r (longer than %s)" % (key, 250)
)
self._perform_invalid_key_test(key, expected_warning)
def test_invalid_with_version_key_length(self):
# Custom make_key() that adds a version to the key and exceeds the
# limit.
def key_func(key, *args):
return key + ":1"
key = "a" * 249
expected_warning = (
"Cache key will cause errors if used with memcached: "
"%r (longer than %s)" % (key_func(key), 250)
)
self._perform_invalid_key_test(key, expected_warning, key_func=key_func)
def test_cache_versioning_get_set(self):
# set, using default version = 1
cache.set("answer1", 42)
self.assertEqual(cache.get("answer1"), 42)
self.assertEqual(cache.get("answer1", version=1), 42)
self.assertIsNone(cache.get("answer1", version=2))
self.assertIsNone(caches["v2"].get("answer1"))
self.assertEqual(caches["v2"].get("answer1", version=1), 42)
self.assertIsNone(caches["v2"].get("answer1", version=2))
# set, default version = 1, but manually override version = 2
cache.set("answer2", 42, version=2)
self.assertIsNone(cache.get("answer2"))
self.assertIsNone(cache.get("answer2", version=1))
self.assertEqual(cache.get("answer2", version=2), 42)
self.assertEqual(caches["v2"].get("answer2"), 42)
self.assertIsNone(caches["v2"].get("answer2", version=1))
self.assertEqual(caches["v2"].get("answer2", version=2), 42)
# v2 set, using default version = 2
caches["v2"].set("answer3", 42)
self.assertIsNone(cache.get("answer3"))
self.assertIsNone(cache.get("answer3", version=1))
self.assertEqual(cache.get("answer3", version=2), 42)
self.assertEqual(caches["v2"].get("answer3"), 42)
self.assertIsNone(caches["v2"].get("answer3", version=1))
self.assertEqual(caches["v2"].get("answer3", version=2), 42)
# v2 set, default version = 2, but manually override version = 1
caches["v2"].set("answer4", 42, version=1)
self.assertEqual(cache.get("answer4"), 42)
self.assertEqual(cache.get("answer4", version=1), 42)
self.assertIsNone(cache.get("answer4", version=2))
self.assertIsNone(caches["v2"].get("answer4"))
self.assertEqual(caches["v2"].get("answer4", version=1), 42)
self.assertIsNone(caches["v2"].get("answer4", version=2))
def test_cache_versioning_add(self):
# add, default version = 1, but manually override version = 2
self.assertIs(cache.add("answer1", 42, version=2), True)
self.assertIsNone(cache.get("answer1", version=1))
self.assertEqual(cache.get("answer1", version=2), 42)
self.assertIs(cache.add("answer1", 37, version=2), False)
self.assertIsNone(cache.get("answer1", version=1))
self.assertEqual(cache.get("answer1", version=2), 42)
self.assertIs(cache.add("answer1", 37, version=1), True)
self.assertEqual(cache.get("answer1", version=1), 37)
self.assertEqual(cache.get("answer1", version=2), 42)
# v2 add, using default version = 2
self.assertIs(caches["v2"].add("answer2", 42), True)
self.assertIsNone(cache.get("answer2", version=1))
self.assertEqual(cache.get("answer2", version=2), 42)
self.assertIs(caches["v2"].add("answer2", 37), False)
self.assertIsNone(cache.get("answer2", version=1))
self.assertEqual(cache.get("answer2", version=2), 42)
self.assertIs(caches["v2"].add("answer2", 37, version=1), True)
self.assertEqual(cache.get("answer2", version=1), 37)
self.assertEqual(cache.get("answer2", version=2), 42)
# v2 add, default version = 2, but manually override version = 1
self.assertIs(caches["v2"].add("answer3", 42, version=1), True)
self.assertEqual(cache.get("answer3", version=1), 42)
self.assertIsNone(cache.get("answer3", version=2))
self.assertIs(caches["v2"].add("answer3", 37, version=1), False)
self.assertEqual(cache.get("answer3", version=1), 42)
self.assertIsNone(cache.get("answer3", version=2))
self.assertIs(caches["v2"].add("answer3", 37), True)
self.assertEqual(cache.get("answer3", version=1), 42)
self.assertEqual(cache.get("answer3", version=2), 37)
def test_cache_versioning_has_key(self):
cache.set("answer1", 42)
# has_key
self.assertIs(cache.has_key("answer1"), True)
self.assertIs(cache.has_key("answer1", version=1), True)
self.assertIs(cache.has_key("answer1", version=2), False)
self.assertIs(caches["v2"].has_key("answer1"), False)
self.assertIs(caches["v2"].has_key("answer1", version=1), True)
self.assertIs(caches["v2"].has_key("answer1", version=2), False)
def test_cache_versioning_delete(self):
cache.set("answer1", 37, version=1)
cache.set("answer1", 42, version=2)
self.assertIs(cache.delete("answer1"), True)
self.assertIsNone(cache.get("answer1", version=1))
self.assertEqual(cache.get("answer1", version=2), 42)
cache.set("answer2", 37, version=1)
cache.set("answer2", 42, version=2)
self.assertIs(cache.delete("answer2", version=2), True)
self.assertEqual(cache.get("answer2", version=1), 37)
self.assertIsNone(cache.get("answer2", version=2))
cache.set("answer3", 37, version=1)
cache.set("answer3", 42, version=2)
self.assertIs(caches["v2"].delete("answer3"), True)
self.assertEqual(cache.get("answer3", version=1), 37)
self.assertIsNone(cache.get("answer3", version=2))
cache.set("answer4", 37, version=1)
cache.set("answer4", 42, version=2)
self.assertIs(caches["v2"].delete("answer4", version=1), True)
self.assertIsNone(cache.get("answer4", version=1))
self.assertEqual(cache.get("answer4", version=2), 42)
def test_cache_versioning_incr_decr(self):
cache.set("answer1", 37, version=1)
cache.set("answer1", 42, version=2)
self.assertEqual(cache.incr("answer1"), 38)
self.assertEqual(cache.get("answer1", version=1), 38)
self.assertEqual(cache.get("answer1", version=2), 42)
self.assertEqual(cache.decr("answer1"), 37)
self.assertEqual(cache.get("answer1", version=1), 37)
self.assertEqual(cache.get("answer1", version=2), 42)
cache.set("answer2", 37, version=1)
cache.set("answer2", 42, version=2)
self.assertEqual(cache.incr("answer2", version=2), 43)
self.assertEqual(cache.get("answer2", version=1), 37)
self.assertEqual(cache.get("answer2", version=2), 43)
self.assertEqual(cache.decr("answer2", version=2), 42)
self.assertEqual(cache.get("answer2", version=1), 37)
self.assertEqual(cache.get("answer2", version=2), 42)
cache.set("answer3", 37, version=1)
cache.set("answer3", 42, version=2)
self.assertEqual(caches["v2"].incr("answer3"), 43)
self.assertEqual(cache.get("answer3", version=1), 37)
self.assertEqual(cache.get("answer3", version=2), 43)
self.assertEqual(caches["v2"].decr("answer3"), 42)
self.assertEqual(cache.get("answer3", version=1), 37)
self.assertEqual(cache.get("answer3", version=2), 42)
cache.set("answer4", 37, version=1)
cache.set("answer4", 42, version=2)
self.assertEqual(caches["v2"].incr("answer4", version=1), 38)
self.assertEqual(cache.get("answer4", version=1), 38)
self.assertEqual(cache.get("answer4", version=2), 42)
self.assertEqual(caches["v2"].decr("answer4", version=1), 37)
self.assertEqual(cache.get("answer4", version=1), 37)
self.assertEqual(cache.get("answer4", version=2), 42)
def test_cache_versioning_get_set_many(self):
# set, using default version = 1
cache.set_many({"ford1": 37, "arthur1": 42})
self.assertEqual(
cache.get_many(["ford1", "arthur1"]), {"ford1": 37, "arthur1": 42}
)
self.assertEqual(
cache.get_many(["ford1", "arthur1"], version=1),
{"ford1": 37, "arthur1": 42},
)
self.assertEqual(cache.get_many(["ford1", "arthur1"], version=2), {})
self.assertEqual(caches["v2"].get_many(["ford1", "arthur1"]), {})
self.assertEqual(
caches["v2"].get_many(["ford1", "arthur1"], version=1),
{"ford1": 37, "arthur1": 42},
)
self.assertEqual(caches["v2"].get_many(["ford1", "arthur1"], version=2), {})
# set, default version = 1, but manually override version = 2
cache.set_many({"ford2": 37, "arthur2": 42}, version=2)
self.assertEqual(cache.get_many(["ford2", "arthur2"]), {})
self.assertEqual(cache.get_many(["ford2", "arthur2"], version=1), {})
self.assertEqual(
cache.get_many(["ford2", "arthur2"], version=2),
{"ford2": 37, "arthur2": 42},
)
self.assertEqual(
caches["v2"].get_many(["ford2", "arthur2"]), {"ford2": 37, "arthur2": 42}
)
self.assertEqual(caches["v2"].get_many(["ford2", "arthur2"], version=1), {})
self.assertEqual(
caches["v2"].get_many(["ford2", "arthur2"], version=2),
{"ford2": 37, "arthur2": 42},
)
# v2 set, using default version = 2
caches["v2"].set_many({"ford3": 37, "arthur3": 42})
self.assertEqual(cache.get_many(["ford3", "arthur3"]), {})
self.assertEqual(cache.get_many(["ford3", "arthur3"], version=1), {})
self.assertEqual(
cache.get_many(["ford3", "arthur3"], version=2),
{"ford3": 37, "arthur3": 42},
)
self.assertEqual(
caches["v2"].get_many(["ford3", "arthur3"]), {"ford3": 37, "arthur3": 42}
)
self.assertEqual(caches["v2"].get_many(["ford3", "arthur3"], version=1), {})
self.assertEqual(
caches["v2"].get_many(["ford3", "arthur3"], version=2),
{"ford3": 37, "arthur3": 42},
)
# v2 set, default version = 2, but manually override version = 1
caches["v2"].set_many({"ford4": 37, "arthur4": 42}, version=1)
self.assertEqual(
cache.get_many(["ford4", "arthur4"]), {"ford4": 37, "arthur4": 42}
)
self.assertEqual(
cache.get_many(["ford4", "arthur4"], version=1),
{"ford4": 37, "arthur4": 42},
)
self.assertEqual(cache.get_many(["ford4", "arthur4"], version=2), {})
self.assertEqual(caches["v2"].get_many(["ford4", "arthur4"]), {})
self.assertEqual(
caches["v2"].get_many(["ford4", "arthur4"], version=1),
{"ford4": 37, "arthur4": 42},
)
self.assertEqual(caches["v2"].get_many(["ford4", "arthur4"], version=2), {})
def test_incr_version(self):
cache.set("answer", 42, version=2)
self.assertIsNone(cache.get("answer"))
self.assertIsNone(cache.get("answer", version=1))
self.assertEqual(cache.get("answer", version=2), 42)
self.assertIsNone(cache.get("answer", version=3))
self.assertEqual(cache.incr_version("answer", version=2), 3)
self.assertIsNone(cache.get("answer"))
self.assertIsNone(cache.get("answer", version=1))
self.assertIsNone(cache.get("answer", version=2))
self.assertEqual(cache.get("answer", version=3), 42)
caches["v2"].set("answer2", 42)
self.assertEqual(caches["v2"].get("answer2"), 42)
self.assertIsNone(caches["v2"].get("answer2", version=1))
self.assertEqual(caches["v2"].get("answer2", version=2), 42)
self.assertIsNone(caches["v2"].get("answer2", version=3))
self.assertEqual(caches["v2"].incr_version("answer2"), 3)
self.assertIsNone(caches["v2"].get("answer2"))
self.assertIsNone(caches["v2"].get("answer2", version=1))
self.assertIsNone(caches["v2"].get("answer2", version=2))
self.assertEqual(caches["v2"].get("answer2", version=3), 42)
with self.assertRaises(ValueError):
cache.incr_version("does_not_exist")
cache.set("null", None)
self.assertEqual(cache.incr_version("null"), 2)
def test_decr_version(self):
cache.set("answer", 42, version=2)
self.assertIsNone(cache.get("answer"))
self.assertIsNone(cache.get("answer", version=1))
self.assertEqual(cache.get("answer", version=2), 42)
self.assertEqual(cache.decr_version("answer", version=2), 1)
self.assertEqual(cache.get("answer"), 42)
self.assertEqual(cache.get("answer", version=1), 42)
self.assertIsNone(cache.get("answer", version=2))
caches["v2"].set("answer2", 42)
self.assertEqual(caches["v2"].get("answer2"), 42)
self.assertIsNone(caches["v2"].get("answer2", version=1))
self.assertEqual(caches["v2"].get("answer2", version=2), 42)
self.assertEqual(caches["v2"].decr_version("answer2"), 1)
self.assertIsNone(caches["v2"].get("answer2"))
self.assertEqual(caches["v2"].get("answer2", version=1), 42)
self.assertIsNone(caches["v2"].get("answer2", version=2))
with self.assertRaises(ValueError):
cache.decr_version("does_not_exist", version=2)
cache.set("null", None, version=2)
self.assertEqual(cache.decr_version("null", version=2), 1)
def test_custom_key_func(self):
# Two caches with different key functions aren't visible to each other
cache.set("answer1", 42)
self.assertEqual(cache.get("answer1"), 42)
self.assertIsNone(caches["custom_key"].get("answer1"))
self.assertIsNone(caches["custom_key2"].get("answer1"))
caches["custom_key"].set("answer2", 42)
self.assertIsNone(cache.get("answer2"))
self.assertEqual(caches["custom_key"].get("answer2"), 42)
self.assertEqual(caches["custom_key2"].get("answer2"), 42)
@override_settings(CACHE_MIDDLEWARE_ALIAS=DEFAULT_CACHE_ALIAS)
def test_cache_write_unpicklable_object(self):
fetch_middleware = FetchFromCacheMiddleware(empty_response)
request = self.factory.get("/cache/test")
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(
request
)
self.assertIsNone(get_cache_data)
content = "Testing cookie serialization."
def get_response(req):
response = HttpResponse(content)
response.set_cookie("foo", "bar")
return response
update_middleware = UpdateCacheMiddleware(get_response)
response = update_middleware(request)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
self.assertEqual(get_cache_data.cookies, response.cookies)
UpdateCacheMiddleware(lambda req: get_cache_data)(request)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
self.assertEqual(get_cache_data.cookies, response.cookies)
def test_add_fail_on_pickleerror(self):
# Shouldn't fail silently if trying to cache an unpicklable type.
with self.assertRaises(pickle.PickleError):
cache.add("unpicklable", Unpicklable())
def test_set_fail_on_pickleerror(self):
with self.assertRaises(pickle.PickleError):
cache.set("unpicklable", Unpicklable())
def test_get_or_set(self):
self.assertIsNone(cache.get("projector"))
self.assertEqual(cache.get_or_set("projector", 42), 42)
self.assertEqual(cache.get("projector"), 42)
self.assertIsNone(cache.get_or_set("null", None))
# Previous get_or_set() stores None in the cache.
self.assertIsNone(cache.get("null", "default"))
def test_get_or_set_callable(self):
def my_callable():
return "value"
self.assertEqual(cache.get_or_set("mykey", my_callable), "value")
self.assertEqual(cache.get_or_set("mykey", my_callable()), "value")
self.assertIsNone(cache.get_or_set("null", lambda: None))
# Previous get_or_set() stores None in the cache.
self.assertIsNone(cache.get("null", "default"))
def test_get_or_set_version(self):
msg = "get_or_set() missing 1 required positional argument: 'default'"
self.assertEqual(cache.get_or_set("brian", 1979, version=2), 1979)
with self.assertRaisesMessage(TypeError, msg):
cache.get_or_set("brian")
with self.assertRaisesMessage(TypeError, msg):
cache.get_or_set("brian", version=1)
self.assertIsNone(cache.get("brian", version=1))
self.assertEqual(cache.get_or_set("brian", 42, version=1), 42)
self.assertEqual(cache.get_or_set("brian", 1979, version=2), 1979)
self.assertIsNone(cache.get("brian", version=3))
def test_get_or_set_racing(self):
with mock.patch(
"%s.%s" % (settings.CACHES["default"]["BACKEND"], "add")
) as cache_add:
# Simulate cache.add() failing to add a value. In that case, the
# default value should be returned.
cache_add.return_value = False
self.assertEqual(cache.get_or_set("key", "default"), "default")
@override_settings(
CACHES=caches_setting_for_tests(
BACKEND="django.core.cache.backends.db.DatabaseCache",
# Spaces are used in the table name to ensure quoting/escaping is working
LOCATION="test cache table",
)
)
class DBCacheTests(BaseCacheTests, TransactionTestCase):
available_apps = ["cache"]
def setUp(self):
# The super calls needs to happen first for the settings override.
super().setUp()
self.create_table()
def tearDown(self):
# The super call needs to happen first because it uses the database.
super().tearDown()
self.drop_table()
def create_table(self):
management.call_command("createcachetable", verbosity=0)
def drop_table(self):
with connection.cursor() as cursor:
table_name = connection.ops.quote_name("test cache table")
cursor.execute("DROP TABLE %s" % table_name)
def test_get_many_num_queries(self):
cache.set_many({"a": 1, "b": 2})
cache.set("expired", "expired", 0.01)
with self.assertNumQueries(1):
self.assertEqual(cache.get_many(["a", "b"]), {"a": 1, "b": 2})
time.sleep(0.02)
with self.assertNumQueries(2):
self.assertEqual(cache.get_many(["a", "b", "expired"]), {"a": 1, "b": 2})
def test_delete_many_num_queries(self):
cache.set_many({"a": 1, "b": 2, "c": 3})
with self.assertNumQueries(1):
cache.delete_many(["a", "b", "c"])
def test_cull_queries(self):
old_max_entries = cache._max_entries
# Force _cull to delete on first cached record.
cache._max_entries = -1
with CaptureQueriesContext(connection) as captured_queries:
try:
cache.set("force_cull", "value", 1000)
finally:
cache._max_entries = old_max_entries
num_count_queries = sum("COUNT" in query["sql"] for query in captured_queries)
self.assertEqual(num_count_queries, 1)
# Column names are quoted.
for query in captured_queries:
sql = query["sql"]
if "expires" in sql:
self.assertIn(connection.ops.quote_name("expires"), sql)
if "cache_key" in sql:
self.assertIn(connection.ops.quote_name("cache_key"), sql)
def test_delete_cursor_rowcount(self):
"""
The rowcount attribute should not be checked on a closed cursor.
"""
class MockedCursorWrapper(CursorWrapper):
is_closed = False
def close(self):
self.cursor.close()
self.is_closed = True
@property
def rowcount(self):
if self.is_closed:
raise Exception("Cursor is closed.")
return self.cursor.rowcount
cache.set_many({"a": 1, "b": 2})
with mock.patch("django.db.backends.utils.CursorWrapper", MockedCursorWrapper):
self.assertIs(cache.delete("a"), True)
def test_zero_cull(self):
self._perform_cull_test("zero_cull", 50, 18)
def test_second_call_doesnt_crash(self):
out = io.StringIO()
management.call_command("createcachetable", stdout=out)
self.assertEqual(
out.getvalue(),
"Cache table 'test cache table' already exists.\n" * len(settings.CACHES),
)
@override_settings(
CACHES=caches_setting_for_tests(
BACKEND="django.core.cache.backends.db.DatabaseCache",
# Use another table name to avoid the 'table already exists' message.
LOCATION="createcachetable_dry_run_mode",
)
)
def test_createcachetable_dry_run_mode(self):
out = io.StringIO()
management.call_command("createcachetable", dry_run=True, stdout=out)
output = out.getvalue()
self.assertTrue(output.startswith("CREATE TABLE"))
def test_createcachetable_with_table_argument(self):
"""
Delete and recreate cache table with legacy behavior (explicitly
specifying the table name).
"""
self.drop_table()
out = io.StringIO()
management.call_command(
"createcachetable",
"test cache table",
verbosity=2,
stdout=out,
)
self.assertEqual(out.getvalue(), "Cache table 'test cache table' created.\n")
def test_has_key_query_columns_quoted(self):
with CaptureQueriesContext(connection) as captured_queries:
cache.has_key("key")
self.assertEqual(len(captured_queries), 1)
sql = captured_queries[0]["sql"]
# Column names are quoted.
self.assertIn(connection.ops.quote_name("expires"), sql)
self.assertIn(connection.ops.quote_name("cache_key"), sql)
@override_settings(USE_TZ=True)
class DBCacheWithTimeZoneTests(DBCacheTests):
pass
class DBCacheRouter:
"""A router that puts the cache table on the 'other' database."""
def db_for_read(self, model, **hints):
if model._meta.app_label == "django_cache":
return "other"
return None
def db_for_write(self, model, **hints):
if model._meta.app_label == "django_cache":
return "other"
return None
def allow_migrate(self, db, app_label, **hints):
if app_label == "django_cache":
return db == "other"
return None
@override_settings(
CACHES={
"default": {
"BACKEND": "django.core.cache.backends.db.DatabaseCache",
"LOCATION": "my_cache_table",
},
},
)
class CreateCacheTableForDBCacheTests(TestCase):
databases = {"default", "other"}
@override_settings(DATABASE_ROUTERS=[DBCacheRouter()])
def test_createcachetable_observes_database_router(self):
# cache table should not be created on 'default'
with self.assertNumQueries(0, using="default"):
management.call_command("createcachetable", database="default", verbosity=0)
# cache table should be created on 'other'
# Queries:
# 1: check table doesn't already exist
# 2: create savepoint (if transactional DDL is supported)
# 3: create the table
# 4: create the index
# 5: release savepoint (if transactional DDL is supported)
num = 5 if connections["other"].features.can_rollback_ddl else 3
with self.assertNumQueries(num, using="other"):
management.call_command("createcachetable", database="other", verbosity=0)
class PicklingSideEffect:
def __init__(self, cache):
self.cache = cache
self.locked = False
def __getstate__(self):
self.locked = self.cache._lock.locked()
return {}
limit_locmem_entries = override_settings(
CACHES=caches_setting_for_tests(
BACKEND="django.core.cache.backends.locmem.LocMemCache",
OPTIONS={"MAX_ENTRIES": 9},
)
)
@override_settings(
CACHES=caches_setting_for_tests(
BACKEND="django.core.cache.backends.locmem.LocMemCache",
)
)
class LocMemCacheTests(BaseCacheTests, TestCase):
def setUp(self):
super().setUp()
# LocMem requires a hack to make the other caches
# share a data store with the 'normal' cache.
caches["prefix"]._cache = cache._cache
caches["prefix"]._expire_info = cache._expire_info
caches["v2"]._cache = cache._cache
caches["v2"]._expire_info = cache._expire_info
caches["custom_key"]._cache = cache._cache
caches["custom_key"]._expire_info = cache._expire_info
caches["custom_key2"]._cache = cache._cache
caches["custom_key2"]._expire_info = cache._expire_info
@override_settings(
CACHES={
"default": {"BACKEND": "django.core.cache.backends.locmem.LocMemCache"},
"other": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "other",
},
}
)
def test_multiple_caches(self):
"Multiple locmem caches are isolated"
cache.set("value", 42)
self.assertEqual(caches["default"].get("value"), 42)
self.assertIsNone(caches["other"].get("value"))
def test_locking_on_pickle(self):
"""#20613/#18541 -- Ensures pickling is done outside of the lock."""
bad_obj = PicklingSideEffect(cache)
cache.set("set", bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
self.assertIs(cache.add("add", bad_obj), True)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
def test_incr_decr_timeout(self):
"""incr/decr does not modify expiry time (matches memcached behavior)"""
key = "value"
_key = cache.make_key(key)
cache.set(key, 1, timeout=cache.default_timeout * 10)
expire = cache._expire_info[_key]
self.assertEqual(cache.incr(key), 2)
self.assertEqual(expire, cache._expire_info[_key])
self.assertEqual(cache.decr(key), 1)
self.assertEqual(expire, cache._expire_info[_key])
@limit_locmem_entries
def test_lru_get(self):
"""get() moves cache keys."""
for key in range(9):
cache.set(key, key, timeout=None)
for key in range(6):
self.assertEqual(cache.get(key), key)
cache.set(9, 9, timeout=None)
for key in range(6):
self.assertEqual(cache.get(key), key)
for key in range(6, 9):
self.assertIsNone(cache.get(key))
self.assertEqual(cache.get(9), 9)
@limit_locmem_entries
def test_lru_set(self):
"""set() moves cache keys."""
for key in range(9):
cache.set(key, key, timeout=None)
for key in range(3, 9):
cache.set(key, key, timeout=None)
cache.set(9, 9, timeout=None)
for key in range(3, 10):
self.assertEqual(cache.get(key), key)
for key in range(3):
self.assertIsNone(cache.get(key))
@limit_locmem_entries
def test_lru_incr(self):
"""incr() moves cache keys."""
for key in range(9):
cache.set(key, key, timeout=None)
for key in range(6):
self.assertEqual(cache.incr(key), key + 1)
cache.set(9, 9, timeout=None)
for key in range(6):
self.assertEqual(cache.get(key), key + 1)
for key in range(6, 9):
self.assertIsNone(cache.get(key))
self.assertEqual(cache.get(9), 9)
# memcached and redis backends aren't guaranteed to be available.
# To check the backends, the test settings file will need to contain at least
# one cache backend setting that points at your cache server.
configured_caches = {}
for _cache_params in settings.CACHES.values():
configured_caches[_cache_params["BACKEND"]] = _cache_params
PyLibMCCache_params = configured_caches.get(
"django.core.cache.backends.memcached.PyLibMCCache"
)
PyMemcacheCache_params = configured_caches.get(
"django.core.cache.backends.memcached.PyMemcacheCache"
)
# The memcached backends don't support cull-related options like `MAX_ENTRIES`.
memcached_excluded_caches = {"cull", "zero_cull"}
RedisCache_params = configured_caches.get("django.core.cache.backends.redis.RedisCache")
# The redis backend does not support cull-related options like `MAX_ENTRIES`.
redis_excluded_caches = {"cull", "zero_cull"}
class BaseMemcachedTests(BaseCacheTests):
# By default it's assumed that the client doesn't clean up connections
# properly, in which case the backend must do so after each request.
should_disconnect_on_close = True
def test_location_multiple_servers(self):
locations = [
["server1.tld", "server2:11211"],
"server1.tld;server2:11211",
"server1.tld,server2:11211",
]
for location in locations:
with self.subTest(location=location):
params = {"BACKEND": self.base_params["BACKEND"], "LOCATION": location}
with self.settings(CACHES={"default": params}):
self.assertEqual(cache._servers, ["server1.tld", "server2:11211"])
def _perform_invalid_key_test(self, key, expected_warning):
"""
While other backends merely warn, memcached should raise for an invalid
key.
"""
msg = expected_warning.replace(key, cache.make_key(key))
tests = [
("add", [key, 1]),
("get", [key]),
("set", [key, 1]),
("incr", [key]),
("decr", [key]),
("touch", [key]),
("delete", [key]),
("get_many", [[key, "b"]]),
("set_many", [{key: 1, "b": 2}]),
("delete_many", [[key, "b"]]),
]
for operation, args in tests:
with self.subTest(operation=operation):
with self.assertRaises(InvalidCacheKey) as cm:
getattr(cache, operation)(*args)
self.assertEqual(str(cm.exception), msg)
def test_invalid_with_version_key_length(self):
# make_key() adds a version to the key and exceeds the limit.
key = "a" * 248
expected_warning = (
"Cache key will cause errors if used with memcached: "
"%r (longer than %s)" % (key, 250)
)
self._perform_invalid_key_test(key, expected_warning)
def test_default_never_expiring_timeout(self):
# Regression test for #22845
with self.settings(
CACHES=caches_setting_for_tests(
base=self.base_params, exclude=memcached_excluded_caches, TIMEOUT=None
)
):
cache.set("infinite_foo", "bar")
self.assertEqual(cache.get("infinite_foo"), "bar")
def test_default_far_future_timeout(self):
# Regression test for #22845
with self.settings(
CACHES=caches_setting_for_tests(
base=self.base_params,
exclude=memcached_excluded_caches,
# 60*60*24*365, 1 year
TIMEOUT=31536000,
)
):
cache.set("future_foo", "bar")
self.assertEqual(cache.get("future_foo"), "bar")
def test_memcached_deletes_key_on_failed_set(self):
# By default memcached allows objects up to 1MB. For the cache_db session
# backend to always use the current session, memcached needs to delete
# the old key if it fails to set.
max_value_length = 2**20
cache.set("small_value", "a")
self.assertEqual(cache.get("small_value"), "a")
large_value = "a" * (max_value_length + 1)
try:
cache.set("small_value", large_value)
except Exception:
# Most clients (e.g. pymemcache or pylibmc) raise when the value is
# too large. This test is primarily checking that the key was
# deleted, so the return/exception behavior for the set() itself is
# not important.
pass
# small_value should be deleted, or set if configured to accept larger values
value = cache.get("small_value")
self.assertTrue(value is None or value == large_value)
def test_close(self):
# For clients that don't manage their connections properly, the
# connection is closed when the request is complete.
signals.request_finished.disconnect(close_old_connections)
try:
with mock.patch.object(
cache._class, "disconnect_all", autospec=True
) as mock_disconnect:
signals.request_finished.send(self.__class__)
self.assertIs(mock_disconnect.called, self.should_disconnect_on_close)
finally:
signals.request_finished.connect(close_old_connections)
def test_set_many_returns_failing_keys(self):
def fail_set_multi(mapping, *args, **kwargs):
return mapping.keys()
with mock.patch.object(cache._class, "set_multi", side_effect=fail_set_multi):
failing_keys = cache.set_many({"key": "value"})
self.assertEqual(failing_keys, ["key"])
@unittest.skipUnless(PyLibMCCache_params, "PyLibMCCache backend not configured")
@override_settings(
CACHES=caches_setting_for_tests(
base=PyLibMCCache_params,
exclude=memcached_excluded_caches,
)
)
class PyLibMCCacheTests(BaseMemcachedTests, TestCase):
base_params = PyLibMCCache_params
# libmemcached manages its own connections.
should_disconnect_on_close = False
@property
def incr_decr_type_error(self):
return cache._lib.ClientError
@override_settings(
CACHES=caches_setting_for_tests(
base=PyLibMCCache_params,
exclude=memcached_excluded_caches,
OPTIONS={
"binary": True,
"behaviors": {"tcp_nodelay": True},
},
)
)
def test_pylibmc_options(self):
self.assertTrue(cache._cache.binary)
self.assertEqual(cache._cache.behaviors["tcp_nodelay"], int(True))
def test_pylibmc_client_servers(self):
backend = self.base_params["BACKEND"]
tests = [
("unix:/run/memcached/socket", "/run/memcached/socket"),
("/run/memcached/socket", "/run/memcached/socket"),
("localhost", "localhost"),
("localhost:11211", "localhost:11211"),
("[::1]", "[::1]"),
("[::1]:11211", "[::1]:11211"),
("127.0.0.1", "127.0.0.1"),
("127.0.0.1:11211", "127.0.0.1:11211"),
]
for location, expected in tests:
settings = {"default": {"BACKEND": backend, "LOCATION": location}}
with self.subTest(location), self.settings(CACHES=settings):
self.assertEqual(cache.client_servers, [expected])
@unittest.skipUnless(PyMemcacheCache_params, "PyMemcacheCache backend not configured")
@override_settings(
CACHES=caches_setting_for_tests(
base=PyMemcacheCache_params,
exclude=memcached_excluded_caches,
)
)
class PyMemcacheCacheTests(BaseMemcachedTests, TestCase):
base_params = PyMemcacheCache_params
@property
def incr_decr_type_error(self):
return cache._lib.exceptions.MemcacheClientError
def test_pymemcache_highest_pickle_version(self):
self.assertEqual(
cache._cache.default_kwargs["serde"]._serialize_func.keywords[
"pickle_version"
],
pickle.HIGHEST_PROTOCOL,
)
for cache_key in settings.CACHES:
for client_key, client in caches[cache_key]._cache.clients.items():
with self.subTest(cache_key=cache_key, server=client_key):
self.assertEqual(
client.serde._serialize_func.keywords["pickle_version"],
pickle.HIGHEST_PROTOCOL,
)
@override_settings(
CACHES=caches_setting_for_tests(
base=PyMemcacheCache_params,
exclude=memcached_excluded_caches,
OPTIONS={"no_delay": True},
)
)
def test_pymemcache_options(self):
self.assertIs(cache._cache.default_kwargs["no_delay"], True)
@override_settings(
CACHES=caches_setting_for_tests(
BACKEND="django.core.cache.backends.filebased.FileBasedCache",
)
)
class FileBasedCacheTests(BaseCacheTests, TestCase):
"""
Specific test cases for the file-based cache.
"""
def setUp(self):
super().setUp()
self.dirname = self.mkdtemp()
# Caches location cannot be modified through override_settings /
# modify_settings, hence settings are manipulated directly here and the
# setting_changed signal is triggered manually.
for cache_params in settings.CACHES.values():
cache_params["LOCATION"] = self.dirname
setting_changed.send(self.__class__, setting="CACHES", enter=False)
def tearDown(self):
super().tearDown()
# Call parent first, as cache.clear() may recreate cache base directory
shutil.rmtree(self.dirname)
def mkdtemp(self):
return tempfile.mkdtemp()
def test_ignores_non_cache_files(self):
fname = os.path.join(self.dirname, "not-a-cache-file")
with open(fname, "w"):
os.utime(fname, None)
cache.clear()
self.assertTrue(
os.path.exists(fname), "Expected cache.clear to ignore non cache files"
)
os.remove(fname)
def test_clear_does_not_remove_cache_dir(self):
cache.clear()
self.assertTrue(
os.path.exists(self.dirname), "Expected cache.clear to keep the cache dir"
)
def test_creates_cache_dir_if_nonexistent(self):
os.rmdir(self.dirname)
cache.set("foo", "bar")
self.assertTrue(os.path.exists(self.dirname))
def test_get_ignores_enoent(self):
cache.set("foo", "bar")
os.unlink(cache._key_to_file("foo"))
# Returns the default instead of erroring.
self.assertEqual(cache.get("foo", "baz"), "baz")
@skipIf(
sys.platform == "win32",
"Windows only partially supports umasks and chmod.",
)
def test_cache_dir_permissions(self):
os.rmdir(self.dirname)
dir_path = Path(self.dirname) / "nested" / "filebasedcache"
for cache_params in settings.CACHES.values():
cache_params["LOCATION"] = dir_path
setting_changed.send(self.__class__, setting="CACHES", enter=False)
cache.set("foo", "bar")
self.assertIs(dir_path.exists(), True)
tests = [
dir_path,
dir_path.parent,
dir_path.parent.parent,
]
for directory in tests:
with self.subTest(directory=directory):
dir_mode = directory.stat().st_mode & 0o777
self.assertEqual(dir_mode, 0o700)
def test_get_does_not_ignore_non_filenotfound_exceptions(self):
with mock.patch("builtins.open", side_effect=OSError):
with self.assertRaises(OSError):
cache.get("foo")
def test_empty_cache_file_considered_expired(self):
cache_file = cache._key_to_file("foo")
with open(cache_file, "wb") as fh:
fh.write(b"")
with open(cache_file, "rb") as fh:
self.assertIs(cache._is_expired(fh), True)
@unittest.skipUnless(RedisCache_params, "Redis backend not configured")
@override_settings(
CACHES=caches_setting_for_tests(
base=RedisCache_params,
exclude=redis_excluded_caches,
)
)
class RedisCacheTests(BaseCacheTests, TestCase):
def setUp(self):
import redis
super().setUp()
self.lib = redis
@property
def incr_decr_type_error(self):
return self.lib.ResponseError
def test_cache_client_class(self):
self.assertIs(cache._class, RedisCacheClient)
self.assertIsInstance(cache._cache, RedisCacheClient)
def test_get_backend_timeout_method(self):
positive_timeout = 10
positive_backend_timeout = cache.get_backend_timeout(positive_timeout)
self.assertEqual(positive_backend_timeout, positive_timeout)
negative_timeout = -5
negative_backend_timeout = cache.get_backend_timeout(negative_timeout)
self.assertEqual(negative_backend_timeout, 0)
none_timeout = None
none_backend_timeout = cache.get_backend_timeout(none_timeout)
self.assertIsNone(none_backend_timeout)
def test_get_connection_pool_index(self):
pool_index = cache._cache._get_connection_pool_index(write=True)
self.assertEqual(pool_index, 0)
pool_index = cache._cache._get_connection_pool_index(write=False)
if len(cache._cache._servers) == 1:
self.assertEqual(pool_index, 0)
else:
self.assertGreater(pool_index, 0)
self.assertLess(pool_index, len(cache._cache._servers))
def test_get_connection_pool(self):
pool = cache._cache._get_connection_pool(write=True)
self.assertIsInstance(pool, self.lib.ConnectionPool)
pool = cache._cache._get_connection_pool(write=False)
self.assertIsInstance(pool, self.lib.ConnectionPool)
def test_get_client(self):
self.assertIsInstance(cache._cache.get_client(), self.lib.Redis)
def test_serializer_dumps(self):
self.assertEqual(cache._cache._serializer.dumps(123), 123)
self.assertIsInstance(cache._cache._serializer.dumps(True), bytes)
self.assertIsInstance(cache._cache._serializer.dumps("abc"), bytes)
class FileBasedCachePathLibTests(FileBasedCacheTests):
def mkdtemp(self):
tmp_dir = super().mkdtemp()
return Path(tmp_dir)
@override_settings(
CACHES={
"default": {
"BACKEND": "cache.liberal_backend.CacheClass",
},
}
)
class CustomCacheKeyValidationTests(SimpleTestCase):
"""
Tests for the ability to mixin a custom ``validate_key`` method to
a custom cache backend that otherwise inherits from a builtin
backend, and override the default key validation. Refs #6447.
"""
def test_custom_key_validation(self):
# this key is both longer than 250 characters, and has spaces
key = "some key with spaces" * 15
val = "a value"
cache.set(key, val)
self.assertEqual(cache.get(key), val)
@override_settings(
CACHES={
"default": {
"BACKEND": "cache.closeable_cache.CacheClass",
}
}
)
class CacheClosingTests(SimpleTestCase):
def test_close(self):
self.assertFalse(cache.closed)
signals.request_finished.send(self.__class__)
self.assertTrue(cache.closed)
def test_close_only_initialized(self):
with self.settings(
CACHES={
"cache_1": {
"BACKEND": "cache.closeable_cache.CacheClass",
},
"cache_2": {
"BACKEND": "cache.closeable_cache.CacheClass",
},
}
):
self.assertEqual(caches.all(initialized_only=True), [])
signals.request_finished.send(self.__class__)
self.assertEqual(caches.all(initialized_only=True), [])
DEFAULT_MEMORY_CACHES_SETTINGS = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "unique-snowflake",
}
}
NEVER_EXPIRING_CACHES_SETTINGS = copy.deepcopy(DEFAULT_MEMORY_CACHES_SETTINGS)
NEVER_EXPIRING_CACHES_SETTINGS["default"]["TIMEOUT"] = None
class DefaultNonExpiringCacheKeyTests(SimpleTestCase):
"""
Settings having Cache arguments with a TIMEOUT=None create Caches that will
set non-expiring keys.
"""
def setUp(self):
# The 5 minute (300 seconds) default expiration time for keys is
# defined in the implementation of the initializer method of the
# BaseCache type.
self.DEFAULT_TIMEOUT = caches[DEFAULT_CACHE_ALIAS].default_timeout
def tearDown(self):
del self.DEFAULT_TIMEOUT
def test_default_expiration_time_for_keys_is_5_minutes(self):
"""The default expiration time of a cache key is 5 minutes.
This value is defined in
django.core.cache.backends.base.BaseCache.__init__().
"""
self.assertEqual(300, self.DEFAULT_TIMEOUT)
def test_caches_with_unset_timeout_has_correct_default_timeout(self):
"""Caches that have the TIMEOUT parameter undefined in the default
settings will use the default 5 minute timeout.
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertEqual(self.DEFAULT_TIMEOUT, cache.default_timeout)
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def test_caches_set_with_timeout_as_none_has_correct_default_timeout(self):
"""Memory caches that have the TIMEOUT parameter set to `None` in the
default settings with have `None` as the default timeout.
This means "no timeout".
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertIsNone(cache.default_timeout)
self.assertIsNone(cache.get_backend_timeout())
@override_settings(CACHES=DEFAULT_MEMORY_CACHES_SETTINGS)
def test_caches_with_unset_timeout_set_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter unset will set cache
keys having the default 5 minute timeout.
"""
key = "my-key"
value = "my-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNotNone(cache._expire_info[cache_key])
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def test_caches_set_with_timeout_as_none_set_non_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter set to `None` will set
a non expiring key by default.
"""
key = "another-key"
value = "another-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNone(cache._expire_info[cache_key])
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="settingsprefix",
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
},
},
USE_I18N=False,
ALLOWED_HOSTS=[".example.com"],
)
class CacheUtils(SimpleTestCase):
"""TestCase for django.utils.cache functions."""
host = "www.example.com"
path = "/cache/test/"
factory = RequestFactory(HTTP_HOST=host)
def tearDown(self):
cache.clear()
def _get_request_cache(self, method="GET", query_string=None, update_cache=None):
request = self._get_request(
self.host, self.path, method, query_string=query_string
)
request._cache_update_cache = update_cache if update_cache else True
return request
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ("Accept-Encoding",), "Accept-Encoding"),
("Accept-Encoding", ("accept-encoding",), "Accept-Encoding"),
("Accept-Encoding", ("ACCEPT-ENCODING",), "Accept-Encoding"),
("Cookie", ("Accept-Encoding",), "Cookie, Accept-Encoding"),
(
"Cookie, Accept-Encoding",
("Accept-Encoding",),
"Cookie, Accept-Encoding",
),
(
"Cookie, Accept-Encoding",
("Accept-Encoding", "cookie"),
"Cookie, Accept-Encoding",
),
(None, ("Accept-Encoding", "COOKIE"), "Accept-Encoding, COOKIE"),
(
"Cookie, Accept-Encoding",
("Accept-Encoding", "cookie"),
"Cookie, Accept-Encoding",
),
(
"Cookie , Accept-Encoding",
("Accept-Encoding", "cookie"),
"Cookie, Accept-Encoding",
),
("*", ("Accept-Language", "Cookie"), "*"),
("Accept-Language, Cookie", ("*",), "*"),
)
for initial_vary, newheaders, resulting_vary in headers:
with self.subTest(initial_vary=initial_vary, newheaders=newheaders):
response = HttpResponse()
if initial_vary is not None:
response.headers["Vary"] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response.headers["Vary"], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
"views.decorators.cache.cache_page.settingsprefix.GET."
"18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e",
)
# A specified key_prefix is taken into account.
key_prefix = "localprefix"
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
"views.decorators.cache.cache_page.localprefix.GET."
"18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e",
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {"test": 1})
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# The querystring is taken into account.
self.assertEqual(
get_cache_key(request),
"views.decorators.cache.cache_page.settingsprefix.GET."
"beaf87a9a99ee81c673ea2d67ccbec2a.d41d8cd98f00b204e9800998ecf8427e",
)
def test_cache_key_varies_by_url(self):
"""
get_cache_key keys differ by fully-qualified URL instead of path
"""
request1 = self.factory.get(self.path, HTTP_HOST="sub-1.example.com")
learn_cache_key(request1, HttpResponse())
request2 = self.factory.get(self.path, HTTP_HOST="sub-2.example.com")
learn_cache_key(request2, HttpResponse())
self.assertNotEqual(get_cache_key(request1), get_cache_key(request2))
def test_learn_cache_key(self):
request = self.factory.head(self.path)
response = HttpResponse()
response.headers["Vary"] = "Pony"
# Make sure that the Vary header is added to the key hash
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
"views.decorators.cache.cache_page.settingsprefix.GET."
"18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e",
)
def test_patch_cache_control(self):
tests = (
# Initial Cache-Control, kwargs to patch_cache_control, expected
# Cache-Control parts.
(None, {"private": True}, {"private"}),
("", {"private": True}, {"private"}),
# no-cache.
("", {"no_cache": "Set-Cookie"}, {"no-cache=Set-Cookie"}),
("", {"no-cache": "Set-Cookie"}, {"no-cache=Set-Cookie"}),
("no-cache=Set-Cookie", {"no_cache": True}, {"no-cache"}),
("no-cache=Set-Cookie,no-cache=Link", {"no_cache": True}, {"no-cache"}),
(
"no-cache=Set-Cookie",
{"no_cache": "Link"},
{"no-cache=Set-Cookie", "no-cache=Link"},
),
(
"no-cache=Set-Cookie,no-cache=Link",
{"no_cache": "Custom"},
{"no-cache=Set-Cookie", "no-cache=Link", "no-cache=Custom"},
),
# Test whether private/public attributes are mutually exclusive
("private", {"private": True}, {"private"}),
("private", {"public": True}, {"public"}),
("public", {"public": True}, {"public"}),
("public", {"private": True}, {"private"}),
(
"must-revalidate,max-age=60,private",
{"public": True},
{"must-revalidate", "max-age=60", "public"},
),
(
"must-revalidate,max-age=60,public",
{"private": True},
{"must-revalidate", "max-age=60", "private"},
),
(
"must-revalidate,max-age=60",
{"public": True},
{"must-revalidate", "max-age=60", "public"},
),
)
cc_delim_re = re.compile(r"\s*,\s*")
for initial_cc, newheaders, expected_cc in tests:
with self.subTest(initial_cc=initial_cc, newheaders=newheaders):
response = HttpResponse()
if initial_cc is not None:
response.headers["Cache-Control"] = initial_cc
patch_cache_control(response, **newheaders)
parts = set(cc_delim_re.split(response.headers["Cache-Control"]))
self.assertEqual(parts, expected_cc)
@override_settings(
CACHES={
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"KEY_PREFIX": "cacheprefix",
},
},
)
class PrefixedCacheUtils(CacheUtils):
pass
@override_settings(
CACHE_MIDDLEWARE_SECONDS=60,
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHES={
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
},
},
)
class CacheHEADTest(SimpleTestCase):
path = "/cache/test/"
factory = RequestFactory()
def tearDown(self):
cache.clear()
def _set_cache(self, request, msg):
return UpdateCacheMiddleware(lambda req: HttpResponse(msg))(request)
def test_head_caches_correctly(self):
test_content = "test content"
request = self.factory.head(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(
request
)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
def test_head_with_cached_get(self):
test_content = "test content"
request = self.factory.get(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(
request
)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="settingsprefix",
CACHES={
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
},
},
LANGUAGES=[
("en", "English"),
("es", "Spanish"),
],
)
class CacheI18nTest(SimpleTestCase):
path = "/cache/test/"
factory = RequestFactory()
def tearDown(self):
cache.clear()
@override_settings(USE_I18N=True, USE_TZ=False)
def test_cache_key_i18n_translation(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(
lang,
key,
"Cache keys should include the language name when translation is active",
)
key2 = get_cache_key(request)
self.assertEqual(key, key2)
def check_accept_language_vary(self, accept_language, vary, reference_key):
request = self.factory.get(self.path)
request.META["HTTP_ACCEPT_LANGUAGE"] = accept_language
request.META["HTTP_ACCEPT_ENCODING"] = "gzip;q=1.0, identity; q=0.5, *;q=0"
response = HttpResponse()
response.headers["Vary"] = vary
key = learn_cache_key(request, response)
key2 = get_cache_key(request)
self.assertEqual(key, reference_key)
self.assertEqual(key2, reference_key)
@override_settings(USE_I18N=True, USE_TZ=False)
def test_cache_key_i18n_translation_accept_language(self):
lang = translation.get_language()
self.assertEqual(lang, "en")
request = self.factory.get(self.path)
request.META["HTTP_ACCEPT_ENCODING"] = "gzip;q=1.0, identity; q=0.5, *;q=0"
response = HttpResponse()
response.headers["Vary"] = "accept-encoding"
key = learn_cache_key(request, response)
self.assertIn(
lang,
key,
"Cache keys should include the language name when translation is active",
)
self.check_accept_language_vary(
"en-us", "cookie, accept-language, accept-encoding", key
)
self.check_accept_language_vary(
"en-US", "cookie, accept-encoding, accept-language", key
)
self.check_accept_language_vary(
"en-US,en;q=0.8", "accept-encoding, accept-language, cookie", key
)
self.check_accept_language_vary(
"en-US,en;q=0.8,ko;q=0.6", "accept-language, cookie, accept-encoding", key
)
self.check_accept_language_vary(
"ko-kr,ko;q=0.8,en-us;q=0.5,en;q=0.3 ",
"accept-encoding, cookie, accept-language",
key,
)
self.check_accept_language_vary(
"ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4",
"accept-language, accept-encoding, cookie",
key,
)
self.check_accept_language_vary(
"ko;q=1.0,en;q=0.5", "cookie, accept-language, accept-encoding", key
)
self.check_accept_language_vary(
"ko, en", "cookie, accept-encoding, accept-language", key
)
self.check_accept_language_vary(
"ko-KR, en-US", "accept-encoding, accept-language, cookie", key
)
@override_settings(USE_I18N=False, USE_TZ=True)
def test_cache_key_i18n_timezone(self):
request = self.factory.get(self.path)
tz = timezone.get_current_timezone_name()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(
tz,
key,
"Cache keys should include the time zone name when time zones are active",
)
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False)
def test_cache_key_no_i18n(self):
request = self.factory.get(self.path)
lang = translation.get_language()
tz = timezone.get_current_timezone_name()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertNotIn(
lang,
key,
"Cache keys shouldn't include the language name when i18n isn't active",
)
self.assertNotIn(
tz,
key,
"Cache keys shouldn't include the time zone name when i18n isn't active",
)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_I18N=True,
)
def test_middleware(self):
def set_cache(request, lang, msg):
def get_response(req):
return HttpResponse(msg)
translation.activate(lang)
return UpdateCacheMiddleware(get_response)(request)
# cache with non empty request.GET
request = self.factory.get(self.path, {"foo": "bar", "other": "true"})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(
request
)
# first access, cache must return None
self.assertIsNone(get_cache_data)
content = "Check for cache with QUERY_STRING"
def get_response(req):
return HttpResponse(content)
UpdateCacheMiddleware(get_response)(request)
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(
request
)
# cache must return content
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
# different QUERY_STRING, cache must be empty
request = self.factory.get(self.path, {"foo": "bar", "somethingelse": "true"})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(
request
)
self.assertIsNone(get_cache_data)
# i18n tests
en_message = "Hello world!"
es_message = "Hola mundo!"
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, "en", en_message)
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(
request
)
# The cache can be recovered
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, en_message.encode())
# change the session language and set content
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, "es", es_message)
# change again the language
translation.activate("en")
# retrieve the content from cache
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(
request
)
self.assertEqual(get_cache_data.content, en_message.encode())
# change again the language
translation.activate("es")
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(
request
)
self.assertEqual(get_cache_data.content, es_message.encode())
# reset the language
translation.deactivate()
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
)
def test_middleware_doesnt_cache_streaming_response(self):
request = self.factory.get(self.path)
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(
request
)
self.assertIsNone(get_cache_data)
def get_stream_response(req):
return StreamingHttpResponse(["Check for cache with streaming content."])
UpdateCacheMiddleware(get_stream_response)(request)
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(
request
)
self.assertIsNone(get_cache_data)
@override_settings(
CACHES={
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"KEY_PREFIX": "cacheprefix",
},
},
)
class PrefixedCacheI18nTest(CacheI18nTest):
pass
def hello_world_view(request, value):
return HttpResponse("Hello World %s" % value)
def csrf_view(request):
return HttpResponse(csrf(request)["csrf_token"])
@override_settings(
CACHE_MIDDLEWARE_ALIAS="other",
CACHE_MIDDLEWARE_KEY_PREFIX="middlewareprefix",
CACHE_MIDDLEWARE_SECONDS=30,
CACHES={
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
},
"other": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "other",
"TIMEOUT": "1",
},
},
)
class CacheMiddlewareTest(SimpleTestCase):
factory = RequestFactory()
def setUp(self):
self.default_cache = caches["default"]
self.other_cache = caches["other"]
def tearDown(self):
self.default_cache.clear()
self.other_cache.clear()
super().tearDown()
def test_constructor(self):
"""
The constructor is correctly distinguishing between usage of
CacheMiddleware as Middleware vs. usage of CacheMiddleware as view
decorator and setting attributes appropriately.
"""
# If only one argument is passed in construction, it's being used as
# middleware.
middleware = CacheMiddleware(empty_response)
# Now test object attributes against values defined in setUp above
self.assertEqual(middleware.cache_timeout, 30)
self.assertEqual(middleware.key_prefix, "middlewareprefix")
self.assertEqual(middleware.cache_alias, "other")
self.assertEqual(middleware.cache, self.other_cache)
# If more arguments are being passed in construction, it's being used
# as a decorator. First, test with "defaults":
as_view_decorator = CacheMiddleware(
empty_response, cache_alias=None, key_prefix=None
)
self.assertEqual(
as_view_decorator.cache_timeout, 30
) # Timeout value for 'default' cache, i.e. 30
self.assertEqual(as_view_decorator.key_prefix, "")
# Value of DEFAULT_CACHE_ALIAS from django.core.cache
self.assertEqual(as_view_decorator.cache_alias, "default")
self.assertEqual(as_view_decorator.cache, self.default_cache)
# Next, test with custom values:
as_view_decorator_with_custom = CacheMiddleware(
hello_world_view, cache_timeout=60, cache_alias="other", key_prefix="foo"
)
self.assertEqual(as_view_decorator_with_custom.cache_timeout, 60)
self.assertEqual(as_view_decorator_with_custom.key_prefix, "foo")
self.assertEqual(as_view_decorator_with_custom.cache_alias, "other")
self.assertEqual(as_view_decorator_with_custom.cache, self.other_cache)
def test_update_cache_middleware_constructor(self):
middleware = UpdateCacheMiddleware(empty_response)
self.assertEqual(middleware.cache_timeout, 30)
self.assertIsNone(middleware.page_timeout)
self.assertEqual(middleware.key_prefix, "middlewareprefix")
self.assertEqual(middleware.cache_alias, "other")
self.assertEqual(middleware.cache, self.other_cache)
def test_fetch_cache_middleware_constructor(self):
middleware = FetchFromCacheMiddleware(empty_response)
self.assertEqual(middleware.key_prefix, "middlewareprefix")
self.assertEqual(middleware.cache_alias, "other")
self.assertEqual(middleware.cache, self.other_cache)
def test_middleware(self):
middleware = CacheMiddleware(hello_world_view)
prefix_middleware = CacheMiddleware(hello_world_view, key_prefix="prefix1")
timeout_middleware = CacheMiddleware(hello_world_view, cache_timeout=1)
request = self.factory.get("/view/")
# Put the request through the request middleware
result = middleware.process_request(request)
self.assertIsNone(result)
response = hello_world_view(request, "1")
# Now put the response through the response middleware
response = middleware.process_response(request, response)
# Repeating the request should result in a cache hit
result = middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b"Hello World 1")
# The same request through a different middleware won't hit
result = prefix_middleware.process_request(request)
self.assertIsNone(result)
# The same request with a timeout _will_ hit
result = timeout_middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b"Hello World 1")
def test_view_decorator(self):
# decorate the same view with different cache decorators
default_view = cache_page(3)(hello_world_view)
default_with_prefix_view = cache_page(3, key_prefix="prefix1")(hello_world_view)
explicit_default_view = cache_page(3, cache="default")(hello_world_view)
explicit_default_with_prefix_view = cache_page(
3, cache="default", key_prefix="prefix1"
)(hello_world_view)
other_view = cache_page(1, cache="other")(hello_world_view)
other_with_prefix_view = cache_page(1, cache="other", key_prefix="prefix2")(
hello_world_view
)
request = self.factory.get("/view/")
# Request the view once
response = default_view(request, "1")
self.assertEqual(response.content, b"Hello World 1")
# Request again -- hit the cache
response = default_view(request, "2")
self.assertEqual(response.content, b"Hello World 1")
# Requesting the same view with the explicit cache should yield the same result
response = explicit_default_view(request, "3")
self.assertEqual(response.content, b"Hello World 1")
# Requesting with a prefix will hit a different cache key
response = explicit_default_with_prefix_view(request, "4")
self.assertEqual(response.content, b"Hello World 4")
# Hitting the same view again gives a cache hit
response = explicit_default_with_prefix_view(request, "5")
self.assertEqual(response.content, b"Hello World 4")
# And going back to the implicit cache will hit the same cache
response = default_with_prefix_view(request, "6")
self.assertEqual(response.content, b"Hello World 4")
# Requesting from an alternate cache won't hit cache
response = other_view(request, "7")
self.assertEqual(response.content, b"Hello World 7")
# But a repeated hit will hit cache
response = other_view(request, "8")
self.assertEqual(response.content, b"Hello World 7")
# And prefixing the alternate cache yields yet another cache entry
response = other_with_prefix_view(request, "9")
self.assertEqual(response.content, b"Hello World 9")
# But if we wait a couple of seconds...
time.sleep(2)
# ... the default cache will still hit
caches["default"]
response = default_view(request, "11")
self.assertEqual(response.content, b"Hello World 1")
# ... the default cache with a prefix will still hit
response = default_with_prefix_view(request, "12")
self.assertEqual(response.content, b"Hello World 4")
# ... the explicit default cache will still hit
response = explicit_default_view(request, "13")
self.assertEqual(response.content, b"Hello World 1")
# ... the explicit default cache with a prefix will still hit
response = explicit_default_with_prefix_view(request, "14")
self.assertEqual(response.content, b"Hello World 4")
# .. but a rapidly expiring cache won't hit
response = other_view(request, "15")
self.assertEqual(response.content, b"Hello World 15")
# .. even if it has a prefix
response = other_with_prefix_view(request, "16")
self.assertEqual(response.content, b"Hello World 16")
def test_cache_page_timeout(self):
# Page timeout takes precedence over the "max-age" section of the
# "Cache-Control".
tests = [
(1, 3), # max_age < page_timeout.
(3, 1), # max_age > page_timeout.
]
for max_age, page_timeout in tests:
with self.subTest(max_age=max_age, page_timeout=page_timeout):
view = cache_page(timeout=page_timeout)(
cache_control(max_age=max_age)(hello_world_view)
)
request = self.factory.get("/view/")
response = view(request, "1")
self.assertEqual(response.content, b"Hello World 1")
time.sleep(1)
response = view(request, "2")
self.assertEqual(
response.content,
b"Hello World 1" if page_timeout > max_age else b"Hello World 2",
)
cache.clear()
def test_cached_control_private_not_cached(self):
"""Responses with 'Cache-Control: private' are not cached."""
view_with_private_cache = cache_page(3)(
cache_control(private=True)(hello_world_view)
)
request = self.factory.get("/view/")
response = view_with_private_cache(request, "1")
self.assertEqual(response.content, b"Hello World 1")
response = view_with_private_cache(request, "2")
self.assertEqual(response.content, b"Hello World 2")
def test_sensitive_cookie_not_cached(self):
"""
Django must prevent caching of responses that set a user-specific (and
maybe security sensitive) cookie in response to a cookie-less request.
"""
request = self.factory.get("/view/")
csrf_middleware = CsrfViewMiddleware(csrf_view)
csrf_middleware.process_view(request, csrf_view, (), {})
cache_middleware = CacheMiddleware(csrf_middleware)
self.assertIsNone(cache_middleware.process_request(request))
cache_middleware(request)
# Inserting a CSRF cookie in a cookie-less request prevented caching.
self.assertIsNone(cache_middleware.process_request(request))
def test_304_response_has_http_caching_headers_but_not_cached(self):
original_view = mock.Mock(return_value=HttpResponseNotModified())
view = cache_page(2)(original_view)
request = self.factory.get("/view/")
# The view shouldn't be cached on the second call.
view(request).close()
response = view(request)
response.close()
self.assertEqual(original_view.call_count, 2)
self.assertIsInstance(response, HttpResponseNotModified)
self.assertIn("Cache-Control", response)
self.assertIn("Expires", response)
def test_per_thread(self):
"""The cache instance is different for each thread."""
thread_caches = []
middleware = CacheMiddleware(empty_response)
def runner():
thread_caches.append(middleware.cache)
for _ in range(2):
thread = threading.Thread(target=runner)
thread.start()
thread.join()
self.assertIsNot(thread_caches[0], thread_caches[1])
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="settingsprefix",
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
},
},
USE_I18N=False,
)
class TestWithTemplateResponse(SimpleTestCase):
"""
Tests various headers w/ TemplateResponse.
Most are probably redundant since they manipulate the same object
anyway but the ETag header is 'special' because it relies on the
content being complete (which is not necessarily always the case
with a TemplateResponse)
"""
path = "/cache/test/"
factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ("Accept-Encoding",), "Accept-Encoding"),
("Accept-Encoding", ("accept-encoding",), "Accept-Encoding"),
("Accept-Encoding", ("ACCEPT-ENCODING",), "Accept-Encoding"),
("Cookie", ("Accept-Encoding",), "Cookie, Accept-Encoding"),
(
"Cookie, Accept-Encoding",
("Accept-Encoding",),
"Cookie, Accept-Encoding",
),
(
"Cookie, Accept-Encoding",
("Accept-Encoding", "cookie"),
"Cookie, Accept-Encoding",
),
(None, ("Accept-Encoding", "COOKIE"), "Accept-Encoding, COOKIE"),
(
"Cookie, Accept-Encoding",
("Accept-Encoding", "cookie"),
"Cookie, Accept-Encoding",
),
(
"Cookie , Accept-Encoding",
("Accept-Encoding", "cookie"),
"Cookie, Accept-Encoding",
),
)
for initial_vary, newheaders, resulting_vary in headers:
with self.subTest(initial_vary=initial_vary, newheaders=newheaders):
template = engines["django"].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
if initial_vary is not None:
response.headers["Vary"] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response.headers["Vary"], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
template = engines["django"].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
key_prefix = "localprefix"
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
"views.decorators.cache.cache_page.settingsprefix.GET."
"58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e",
)
# A specified key_prefix is taken into account.
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
"views.decorators.cache.cache_page.localprefix.GET."
"58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e",
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {"test": 1})
template = engines["django"].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# The querystring is taken into account.
self.assertEqual(
get_cache_key(request),
"views.decorators.cache.cache_page.settingsprefix.GET."
"0f1c2d56633c943073c4569d9a9502fe.d41d8cd98f00b204e9800998ecf8427e",
)
class TestMakeTemplateFragmentKey(SimpleTestCase):
def test_without_vary_on(self):
key = make_template_fragment_key("a.fragment")
self.assertEqual(
key, "template.cache.a.fragment.d41d8cd98f00b204e9800998ecf8427e"
)
def test_with_one_vary_on(self):
key = make_template_fragment_key("foo", ["abc"])
self.assertEqual(key, "template.cache.foo.493e283d571a73056196f1a68efd0f66")
def test_with_many_vary_on(self):
key = make_template_fragment_key("bar", ["abc", "def"])
self.assertEqual(key, "template.cache.bar.17c1a507a0cb58384f4c639067a93520")
def test_proper_escaping(self):
key = make_template_fragment_key("spam", ["abc:def%"])
self.assertEqual(key, "template.cache.spam.06c8ae8e8c430b69fb0a6443504153dc")
def test_with_ints_vary_on(self):
key = make_template_fragment_key("foo", [1, 2, 3, 4, 5])
self.assertEqual(key, "template.cache.foo.7ae8fd2e0d25d651c683bdeebdb29461")
def test_with_unicode_vary_on(self):
key = make_template_fragment_key("foo", ["42º", "😀"])
self.assertEqual(key, "template.cache.foo.7ced1c94e543668590ba39b3c08b0237")
def test_long_vary_on(self):
key = make_template_fragment_key("foo", ["x" * 10000])
self.assertEqual(key, "template.cache.foo.3670b349b5124aa56bdb50678b02b23a")
class CacheHandlerTest(SimpleTestCase):
def test_same_instance(self):
"""
Attempting to retrieve the same alias should yield the same instance.
"""
cache1 = caches["default"]
cache2 = caches["default"]
self.assertIs(cache1, cache2)
def test_per_thread(self):
"""
Requesting the same alias from separate threads should yield separate
instances.
"""
c = []
def runner():
c.append(caches["default"])
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertIsNot(c[0], c[1])
def test_nonexistent_alias(self):
msg = "The connection 'nonexistent' doesn't exist."
with self.assertRaisesMessage(InvalidCacheBackendError, msg):
caches["nonexistent"]
def test_nonexistent_backend(self):
test_caches = CacheHandler(
{
"invalid_backend": {
"BACKEND": "django.nonexistent.NonexistentBackend",
},
}
)
msg = (
"Could not find backend 'django.nonexistent.NonexistentBackend': "
"No module named 'django.nonexistent'"
)
with self.assertRaisesMessage(InvalidCacheBackendError, msg):
test_caches["invalid_backend"]
def test_all(self):
test_caches = CacheHandler(
{
"cache_1": {
"BACKEND": "django.core.cache.backends.dummy.DummyCache",
},
"cache_2": {
"BACKEND": "django.core.cache.backends.dummy.DummyCache",
},
}
)
self.assertEqual(test_caches.all(initialized_only=True), [])
cache_1 = test_caches["cache_1"]
self.assertEqual(test_caches.all(initialized_only=True), [cache_1])
self.assertEqual(len(test_caches.all()), 2)
# .all() initializes all caches.
self.assertEqual(len(test_caches.all(initialized_only=True)), 2)
self.assertEqual(test_caches.all(), test_caches.all(initialized_only=True))
|
web_utils.py
|
#!/usr/bin/env python
# -*-coding:utf-8-*-
import threading
import logging
import requests
import js2py
from my_fake_useragent import UserAgent
from datetime import datetime
from dateutil.relativedelta import relativedelta
from .cache_utils import cachedb, func_cache
from .datetime_helper import get_timestamp, get_dt_fromtimestamp
logger = logging.getLogger(__name__)
rank_url = 'https://lol.qq.com/act/lbp/common/guides/guideschampion_rank.js'
position_url = 'https://lol.qq.com/act/lbp/common/guides/guideschampion_position.js'
hero_url = 'https://game.gtimg.cn/images/lol/act/img/js/heroList/hero_list.js'
ua = UserAgent(family=['chrome', 'firefox'])
def use_cache_callback_requests_web(cache_data, func, args, kwargs, use_cache_oldest_dt=None):
timestamp = cache_data.get('timestamp', get_timestamp())
data_dt = get_dt_fromtimestamp(timestamp)
if use_cache_oldest_dt is None:
target_dt = datetime.now() - relativedelta(seconds=14) # default 14 days
else:
target_dt = use_cache_oldest_dt
if data_dt < target_dt: # too old then we will re-excute the function
t = threading.Thread(target=update_requests_web, args=(cache_data, args))
t.daemon = True
t.start()
def update_requests_web(cache_data, args):
logger.info('update_requests_web')
headers = {
'user-agent': ua.random()
}
url = args[0]
data = requests.get(url, headers=headers, timeout=30)
cache_data['data'] = data
cache_data['timestamp'] = str(get_timestamp())
key = cache_data.get('key')
cachedb.set(key, cache_data)
return data
@func_cache(use_cache_callback=use_cache_callback_requests_web)
def _requests_web(url):
"""
有数据则直接使用 没有数据则试着从网络上请求
直接使用数据的时候会根据数据的时间戳来判断新旧,如果数据过旧则启动后台更新线程
:param url:
:return:
"""
headers = {
'user-agent': ua.random()
}
data = requests.get(url, headers=headers, timeout=30)
return data
@func_cache("position_data")
def download_position_data():
res = _requests_web(position_url)
position_data = js2py.eval_js(res.text).to_dict()
return position_data
@func_cache("rank_data")
def download_rank_data():
res = _requests_web(rank_url)
rank_data = js2py.eval_js(res.text).to_dict()
return rank_data
@func_cache("hero_data")
def download_hero_data():
"""
:return:
"""
res = _requests_web(hero_url)
hero_data = res.json()
return hero_data
def get_all_hero_name(data=None):
if not data:
data = download_hero_data()['hero']
res = []
for item in data:
res.append(item['name'])
return res
def mix_all_data_togather():
hero_data = download_hero_data()
positon_data = download_position_data()
rank_data = download_rank_data()
res = []
for item in hero_data['hero']:
heroId = item['heroId']
new_item = item.copy()
if 'selectAudio' in new_item:
del new_item['selectAudio']
if 'banAudio' in new_item:
del new_item['banAudio']
new_item['rank_data'] = rank_data['list'].get(str(heroId), {})
new_item['position_data'] = positon_data['list'].get(str(heroId), {})
res.append(new_item)
return res
|
util.py
|
#!/usr/bin/python3
import os
import sys
import shutil
import threading
import traceback
from time import sleep
from datetime import time, date, datetime, timedelta
class ansi:
RESET = "\033[0m"
BLACK = "\033[30m"
RED = "\033[31m"
GREEN = "\033[32m"
YELLOW = "\033[33m"
BLUE = "\033[34m"
MAGENTA = "\033[35m"
CYAN = "\033[36m"
WHITE = "\033[37m"
BBLACK = "\033[30;1m"
BRED = "\033[31;1m"
BGREEN = "\033[32;1m"
BYELLOW = "\033[33;1m"
BBLUE = "\033[34;1m"
BMAGENTA = "\033[35;1m"
BCYAN = "\033[36;1m"
BWHITE = "\033[37;1m"
BG_BLACK = "\033[40m"
BG_RED = "\033[41m"
BG_GREEN = "\033[42m"
BG_YELLOW = "\033[43m"
BG_BLUE = "\033[44m"
BG_MAGENTA = "\033[45m"
BG_CYAN = "\033[46m"
BG_WHITE = "\033[47m"
def abort(msg):
print(str(msg))
sys.exit(1)
def sensible_input(prompt):
try:
return input(prompt)
except KeyboardInterrupt:
print("^C")
exit(0)
def inner_text(el):
if el.nodeType == el.TEXT_NODE:
return el.data
s = ""
for e in el.childNodes:
s += inner_text(e)
return s
def clear():
cmd = "cls" if os.name == "nt" else "clear"
# for clearing scrollback
os.system(cmd)
os.system(cmd)
print("\033[3J\033c", end='')
def timestr_to_delta(timestr):
if isinstance(timestr, timedelta):
return timestr
if ":" not in timestr:
return timedelta(0, float(timestr))
time_parts = timestr.split(":")
return timedelta(0, (int(time_parts[0])*60 + int(time_parts[1]))*60 + int(time_parts[2] if len(time_parts) > 2 else 0))
def datestr_to_date(datestr):
if isinstance(datestr, date):
return datestr
date_parts = datestr.split("-")
return date(int(date_parts[0]), int(date_parts[1]), int(date_parts[2] if len(date_parts) > 2 else 0))
def timestr_to_time(timestr):
if isinstance(timestr, time):
return timestr
time_parts = timestr.split(":")
return time(int(time_parts[0]), int(time_parts[1]), int(time_parts[2] if len(time_parts) > 2 else 0))
def iso_to_datetime(iso):
if isinstance(iso, datetime):
return iso
if "T" in iso:
d, t = iso.split("T")
elif " " in iso:
d, t = iso.split(" ")
return datetime.combine(datestr_to_date(d), timestr_to_time(t))
class Progress:
def __init__(self, msg="Loading", rate=.2, max_length=3, char='.', overwrite=False, fill=True):
self.msg = msg
self.rate = rate
self.max_length = max_length
self.char = char
self.overwrite = overwrite
self.fill = fill
self.event = threading.Event()
self.thread = threading.Thread(target=self.worker)
def worker(self):
timer = 0
update_rate = 0.01
ellipsis = 1
while True:
if timer >= self.rate:
timer = 0
ellipsis = (ellipsis + 1) % (self.max_length + 1)
msg = "{}{}{}".format(self.msg, self.char*ellipsis, ' '*(self.max_length-ellipsis))
spacing = shutil.get_terminal_size((80, 24))[0] - len(msg)
print("{}{}\r".format(msg, spacing * " " if self.fill else ""), end='')
if self.event.is_set():
if not self.overwrite:
print("")
break
timer += update_rate
sleep(update_rate)
def stop(self):
self.event.set()
self.thread.join()
def __enter__(self):
self.thread.start()
def __exit__(self, type, value, traceback):
self.stop()
|
datasets.py
|
import glob
import os
import random
import shutil
import time
from pathlib import Path
from threading import Thread
import cv2
import math
import numpy as np
import torch
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from utils.general import xyxy2xywh, xywh2xyxy, torch_distributed_zero_first
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.dng']
vid_formats = ['.mov', '.avi', '.mp4', '.mpg', '.mpeg', '.m4v', '.wmv', '.mkv']
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(files):
# Returns a single hash value of a list of files
return sum(os.path.getsize(f) for f in files if os.path.isfile(f))
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,
rank=-1, world_size=1, workers=8):
# Make sure only the first process in DDP process the dataset first, and the following others can use the cache.
with torch_distributed_zero_first(rank):
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augment images
hyp=hyp, # augmentation hyperparameters
rect=rect, # rectangular training
cache_images=cache,
single_cls=opt.single_cls,
stride=int(stride),
pad=pad,
rank=rank)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers
sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
dataloader = InfiniteDataLoader(dataset,
batch_size=batch_size,
num_workers=nw,
sampler=sampler,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn) # torch.utils.data.DataLoader()
return dataloader, dataset
class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
""" Dataloader that reuses workers.
Uses same syntax as vanilla DataLoader.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
class _RepeatSampler(object):
""" Sampler that repeats forever.
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadImages: # for inference
def __init__(self, path, img_size=640):
p = str(Path(path)) # os-agnostic
p = os.path.abspath(p) # absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception('ERROR: %s does not exist' % p)
images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'images'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, 'No images or videos found in %s. Supported formats are:\nimages: %s\nvideos: %s' % \
(p, img_formats, vid_formats)
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nf, self.frame, self.nframes, path), end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
print('image %g/%g %s: ' % (self.count, self.nf, path), end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
# cv2.imwrite(path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
def my_LoadImages(source):
path = '/home/qm/ros_learn/py3_yolo/src/ros_yolo/scripts/path'
img_size = 640
cap = None
img0 = source
img = letterbox(img0, new_shape=img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
print('img', img.shape)
return path,img,img0,cap
class LoadWebcam: # for inference
def __init__(self, pipe=0, img_size=640):
self.img_size = img_size
if pipe == '0':
pipe = 0 # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa' # IP traffic camera
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
# https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/
# pipe = '"rtspsrc location="rtsp://username:password@192.168.1.64/1" latency=10 ! appsink' # GStreamer
# https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/
# https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package # install help
# pipe = "rtspsrc location=rtsp://root:root@192.168.0.91:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink" # GStreamer
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, 'Camera Error %s' % self.pipe
img_path = 'webcam.jpg'
print('webcam %g: ' % self.count, end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=640):
self.mode = 'images'
self.img_size = img_size
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = sources
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print('%g/%g: %s... ' % (i + 1, n, s), end='')
cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s)
assert cap.isOpened(), 'Failed to open %s' % s
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
thread.start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
_, self.imgs[index] = cap.retrieve()
n = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0, rank=-1):
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
def img2label_paths(img_paths):
# Define label paths as a function of image paths
sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
return [x.replace(sa, sb, 1).replace(os.path.splitext(x)[-1], '.txt') for x in img_paths]
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = str(Path(p)) # os-agnostic
parent = str(Path(p).parent) + os.sep
if os.path.isfile(p): # file
with open(p, 'r') as t:
t = t.read().splitlines()
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
elif os.path.isdir(p): # folder
f += glob.iglob(p + os.sep + '*.*')
else:
raise Exception('%s does not exist' % p)
self.img_files = sorted(
[x.replace('/', os.sep) for x in f if os.path.splitext(x)[-1].lower() in img_formats])
assert len(self.img_files) > 0, 'No images found'
except Exception as e:
raise Exception('Error loading data from %s: %s\nSee %s' % (path, e, help_url))
# Check cache
self.label_files = img2label_paths(self.img_files) # labels
cache_path = str(Path(self.label_files[0]).parent) + '.cache' # cached labels
if os.path.isfile(cache_path):
cache = torch.load(cache_path) # load
if cache['hash'] != get_hash(self.label_files + self.img_files): # dataset changed
cache = self.cache_labels(cache_path) # re-cache
else:
cache = self.cache_labels(cache_path) # cache
# Read cache
cache.pop('hash') # remove hash
labels, shapes = zip(*cache.values())
self.labels = list(labels)
self.shapes = np.array(shapes, dtype=np.float64)
self.img_files = list(cache.keys()) # update
self.label_files = img2label_paths(cache.keys()) # update
n = len(shapes) # number of images
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.batch = bi # batch index of image
self.n = n
# Rectangular Training
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Check labels
create_datasubset, extract_bounding_boxes, labels_loaded = False, False, False
nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate
pbar = enumerate(self.label_files)
if rank in [-1, 0]:
pbar = tqdm(pbar)
for i, file in pbar:
l = self.labels[i] # label
if l is not None and l.shape[0]:
assert l.shape[1] == 5, '> 5 label columns: %s' % file
assert (l >= 0).all(), 'negative labels: %s' % file
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows
nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows
if single_cls:
l[:, 0] = 0 # force dataset into single-class mode
self.labels[i] = l
nf += 1 # file found
# Create subdataset (a smaller dataset)
if create_datasubset and ns < 1E4:
if ns == 0:
create_folder(path='./datasubset')
os.makedirs('./datasubset/images')
exclude_classes = 43
if exclude_classes not in l[:, 0]:
ns += 1
# shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image
with open('./datasubset/images.txt', 'a') as f:
f.write(self.img_files[i] + '\n')
# Extract object detection boxes for a second stage classifier
if extract_bounding_boxes:
p = Path(self.img_files[i])
img = cv2.imread(str(p))
h, w = img.shape[:2]
for j, x in enumerate(l):
f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name)
if not os.path.exists(Path(f).parent):
os.makedirs(Path(f).parent) # make new output folder
b = x[1:] * [w, h, w, h] # box
b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.3 + 30 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes'
else:
ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty
# os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
if rank in [-1, 0]:
pbar.desc = 'Scanning labels %s (%g found, %g missing, %g empty, %g duplicate, for %g images)' % (
cache_path, nf, nm, ne, nd, n)
if nf == 0:
s = 'WARNING: No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url)
print(s)
assert not augment, '%s. Can not train without labels.' % s
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
self.imgs = [None] * n
if cache_images:
gb = 0 # Gigabytes of cached images
pbar = tqdm(range(len(self.img_files)), desc='Caching images')
self.img_hw0, self.img_hw = [None] * n, [None] * n
for i in pbar: # max 10k images
self.imgs[i], self.img_hw0[i], self.img_hw[i] = load_image(self, i) # img, hw_original, hw_resized
gb += self.imgs[i].nbytes
pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
def cache_labels(self, path='labels.cache'):
# Cache dataset labels, check images and read shapes
x = {} # dict
pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))
for (img, label) in pbar:
try:
l = []
im = Image.open(img)
im.verify() # PIL verify
shape = exif_size(im) # image size
assert (shape[0] > 9) & (shape[1] > 9), 'image size <10 pixels'
if os.path.isfile(label):
with open(label, 'r') as f:
l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32) # labels
if len(l) == 0:
l = np.zeros((0, 5), dtype=np.float32)
x[img] = [l, shape]
except Exception as e:
print('WARNING: Ignoring corrupted image and/or label %s: %s' % (img, e))
x['hash'] = get_hash(self.label_files + self.img_files)
torch.save(x, path) # save for next time
return x
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
if self.image_weights:
index = self.indices[index]
hyp = self.hyp
mosaic = self.mosaic and random.random() < hyp['mosaic']
if mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
# MixUp https://arxiv.org/pdf/1710.09412.pdf
if random.random() < hyp['mixup']:
img2, labels2 = load_mosaic(self, random.randint(0, len(self.labels) - 1))
r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0
img = (img * r + img2 * (1 - r)).astype(np.uint8)
labels = np.concatenate((labels, labels2), 0)
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
# Load labels
labels = []
x = self.labels[index]
if x.size > 0:
# Normalized xywh to pixel xyxy format
labels = x.copy()
labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width
labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height
labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
if self.augment:
# Augment imagespace
if not mosaic:
img, labels = random_perspective(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'],
perspective=hyp['perspective'])
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh
labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1
labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1
if self.augment:
# flip up-down
if random.random() < hyp['flipud']:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
# flip left-right
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
# Ancillary functions --------------------------------------------------------------------------------------------------
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
path = self.img_files[index]
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # resize image to img_size
if r != 1: # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
# Histogram equalization
# if random.random() < 0.2:
# for i in range(3):
# img[:, :, i] = cv2.equalizeHist(img[:, :, i])
def load_mosaic(self, index):
# loads images in a mosaic
labels4 = []
s = self.img_size
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(3)] # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
x = self.labels[index]
labels = x.copy()
if x.size > 0: # Normalized xywh to pixel xyxy format
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
labels4.append(labels)
# Concat/clip labels
if len(labels4):
labels4 = np.concatenate(labels4, 0)
np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_perspective
# img4, labels4 = replicate(img4, labels4) # replicate
# Augment
img4, labels4 = random_perspective(img4, labels4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img4, labels4
def replicate(img, labels):
# Replicate labels
h, w = img.shape[:2]
boxes = labels[:, 1:].astype(int)
x1, y1, x2, y2 = boxes.T
s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
x1b, y1b, x2b, y2b = boxes[i]
bh, bw = y2b - y1b, x2b - x1b
yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
return img, labels
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 32), np.mod(dh, 32) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_perspective(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, border=(0, 0)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# targets = [cls, xyxy]
height = img.shape[0] + border[0] * 2 # shape(h,w,c)
width = img.shape[1] + border[1] * 2
# Center
C = np.eye(3)
C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
# Perspective
P = np.eye(3)
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
# Combined rotation matrix
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
if perspective:
img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))
else: # affine
img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
# Visualize
# import matplotlib.pyplot as plt
# ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
# ax[0].imshow(img[:, :, ::-1]) # base
# ax[1].imshow(img2[:, :, ::-1]) # warped
# Transform label coordinates
n = len(targets)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = xy @ M.T # transform
if perspective:
xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale
else: # affine
xy = xy[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# # apply angle-based reduction of bounding boxes
# radians = a * math.pi / 180
# reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
# x = (xy[:, 2] + xy[:, 0]) / 2
# y = (xy[:, 3] + xy[:, 1]) / 2
# w = (xy[:, 2] - xy[:, 0]) * reduction
# h = (xy[:, 3] - xy[:, 1]) * reduction
# xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# clip boxes
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
# filter candidates
i = box_candidates(box1=targets[:, 1:5].T * s, box2=xy.T)
targets = targets[i]
targets[:, 1:5] = xy[i]
return img, targets
def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1): # box1(4,n), box2(4,n)
# Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
ar = np.maximum(w2 / (h2 + 1e-16), h2 / (w2 + 1e-16)) # aspect ratio
return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + 1e-16) > area_thr) & (ar < ar_thr) # candidates
def cutout(image, labels):
# Applies image cutout augmentation https://arxiv.org/abs/1708.04552
h, w = image.shape[:2]
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def reduce_img_size(path='path/images', img_size=1024): # from utils.datasets import *; reduce_img_size()
# creates a new ./images_reduced folder with reduced size images of maximum size img_size
path_new = path + '_reduced' # reduced images path
create_folder(path_new)
for f in tqdm(glob.glob('%s/*.*' % path)):
try:
img = cv2.imread(f)
h, w = img.shape[:2]
r = img_size / max(h, w) # size ratio
if r < 1.0:
img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_AREA) # _LINEAR fastest
fnew = f.replace(path, path_new) # .replace(Path(f).suffix, '.jpg')
cv2.imwrite(fnew, img)
except:
print('WARNING: image failure %s' % f)
def recursive_dataset2bmp(dataset='path/dataset_bmp'): # from utils.datasets import *; recursive_dataset2bmp()
# Converts dataset to bmp (for faster training)
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
for a, b, files in os.walk(dataset):
for file in tqdm(files, desc=a):
p = a + '/' + file
s = Path(file).suffix
if s == '.txt': # replace text
with open(p, 'r') as f:
lines = f.read()
for f in formats:
lines = lines.replace(f, '.bmp')
with open(p, 'w') as f:
f.write(lines)
elif s in formats: # replace image
cv2.imwrite(p.replace(s, '.bmp'), cv2.imread(p))
if s != '.bmp':
os.system("rm '%s'" % p)
def imagelist2folder(path='path/images.txt'): # from utils.datasets import *; imagelist2folder()
# Copies all the images in a text file (list of images) into a folder
create_folder(path[:-4])
with open(path, 'r') as f:
for line in f.read().splitlines():
os.system('cp "%s" %s' % (line, path[:-4]))
print(line)
def create_folder(path='./new'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
|
local_timer_test.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import multiprocessing as mp
import signal
import time
import unittest
import unittest.mock as mock
import torchelastic.timer as timer
from torchelastic.timer.api import TimerRequest
from torchelastic.timer.local_timer import MultiprocessingRequestQueue
class LocalTimerTest(unittest.TestCase):
def setUp(self):
self.mp_queue = mp.Queue()
self.max_interval = 0.01
self.server = timer.LocalTimerServer(self.mp_queue, self.max_interval)
self.server.start()
def tearDown(self):
self.server.stop()
def test_exception_propagation(self):
with self.assertRaises(Exception, msg="foobar"):
with timer.expires(after=1):
raise Exception("foobar")
def test_no_client(self):
# no timer client configured; exception expected
with self.assertRaises(RuntimeError):
with timer.expires(after=1):
pass
def test_client_interaction(self):
# no timer client configured but one passed in explicitly
# no exception expected
timer_client = timer.LocalTimerClient(self.mp_queue)
timer_client.acquire = mock.MagicMock(wraps=timer_client.acquire)
timer_client.release = mock.MagicMock(wraps=timer_client.release)
with timer.expires(after=1, scope="test", client=timer_client):
pass
timer_client.acquire.assert_called_once_with("test", mock.ANY)
timer_client.release.assert_called_once_with("test")
def test_happy_path(self):
timer.configure(timer.LocalTimerClient(self.mp_queue))
with timer.expires(after=0.5):
time.sleep(0.1)
def test_get_timer_recursive(self):
"""
If a function acquires a countdown timer with default scope,
then recursive calls to the function should re-acquire the
timer rather than creating a new one. That is only the last
recursive call's timer will take effect.
"""
self.server.start()
timer.configure(timer.LocalTimerClient(self.mp_queue))
# func should not time out
def func(n):
if n > 0:
with timer.expires(after=0.1):
func(n - 1)
time.sleep(0.05)
func(4)
# func2 should time out
def func2(n):
if n > 0:
with timer.expires(after=0.1):
func2(n - 1)
time.sleep(0.2)
p = mp.Process(target=func2, args=(2,))
p.start()
p.join()
self.assertEqual(-signal.SIGKILL, p.exitcode)
@staticmethod
def _run(mp_queue, timeout, duration):
client = timer.LocalTimerClient(mp_queue)
timer.configure(client)
with timer.expires(after=timeout):
time.sleep(duration)
def test_timer(self):
timeout = 0.1
duration = 1
p = mp.Process(target=self._run, args=(self.mp_queue, timeout, duration))
p.start()
p.join()
self.assertEqual(-signal.SIGKILL, p.exitcode)
def _enqueue_on_interval(mp_queue, n, interval, sem):
"""
enqueues ``n`` timer requests into ``mp_queue`` one element per
interval seconds. Releases the given semaphore once before going to work.
"""
sem.release()
for i in range(0, n):
mp_queue.put(TimerRequest(i, "test_scope", 0))
time.sleep(interval)
class MultiprocessingRequestQueueTest(unittest.TestCase):
def test_get(self):
mp_queue = mp.Queue()
request_queue = MultiprocessingRequestQueue(mp_queue)
requests = request_queue.get(1, timeout=0.01)
self.assertEqual(0, len(requests))
request = TimerRequest(1, "test_scope", 0)
mp_queue.put(request)
requests = request_queue.get(2, timeout=0.01)
self.assertEqual(1, len(requests))
self.assertIn(request, requests)
def test_get_size(self):
"""
Creates a "producer" process that enqueues ``n`` elements
every ``interval`` seconds. Asserts that a ``get(n, timeout=n*interval+delta)``
yields all ``n`` elements.
"""
mp_queue = mp.Queue()
request_queue = MultiprocessingRequestQueue(mp_queue)
n = 10
interval = 0.1
sem = mp.Semaphore(0)
p = mp.Process(target=_enqueue_on_interval, args=(mp_queue, n, interval, sem))
p.start()
sem.acquire() # blocks until the process has started to run the function
timeout = interval * (n + 1)
start = time.time()
requests = request_queue.get(n, timeout=timeout)
self.assertLessEqual(time.time() - start, timeout + interval)
self.assertEqual(n, len(requests))
def test_get_less_than_size(self):
"""
Tests slow producer.
Creates a "producer" process that enqueues ``n`` elements
every ``interval`` seconds. Asserts that a ``get(n, timeout=(interval * n/2))``
yields at most ``n/2`` elements.
"""
mp_queue = mp.Queue()
request_queue = MultiprocessingRequestQueue(mp_queue)
n = 10
interval = 0.1
sem = mp.Semaphore(0)
p = mp.Process(target=_enqueue_on_interval, args=(mp_queue, n, interval, sem))
p.start()
sem.acquire() # blocks until the process has started to run the function
requests = request_queue.get(n, timeout=(interval * (n / 2)))
self.assertLessEqual(n / 2, len(requests))
class LocalTimerServerTest(unittest.TestCase):
def setUp(self):
self.mp_queue = mp.Queue()
self.max_interval = 0.01
self.server = timer.LocalTimerServer(self.mp_queue, self.max_interval)
def tearDown(self):
self.server.stop()
def test_watchdog_call_count(self):
"""
checks that the watchdog function ran wait/interval +- 1 times
"""
self.server._run_watchdog = mock.MagicMock(wraps=self.server._run_watchdog)
wait = 0.1
self.server.start()
time.sleep(wait)
self.server.stop()
watchdog_call_count = self.server._run_watchdog.call_count
self.assertGreaterEqual(watchdog_call_count, int(wait / self.max_interval) - 1)
self.assertLessEqual(watchdog_call_count, int(wait / self.max_interval) + 1)
def test_watchdog_empty_queue(self):
"""
checks that the watchdog can run on an empty queue
"""
self.server._run_watchdog()
def _expired_timer(self, pid, scope):
expired = time.time() - 60
return TimerRequest(worker_id=pid, scope_id=scope, expiration_time=expired)
def _valid_timer(self, pid, scope):
valid = time.time() + 60
return TimerRequest(worker_id=pid, scope_id=scope, expiration_time=valid)
def _release_timer(self, pid, scope):
return TimerRequest(worker_id=pid, scope_id=scope, expiration_time=-1)
@mock.patch("os.kill")
def test_expired_timers(self, mock_os_kill):
"""
tests that a single expired timer on a process should terminate
the process and clean up all pending timers that was owned by the process
"""
test_pid = -3
self.mp_queue.put(self._expired_timer(pid=test_pid, scope="test1"))
self.mp_queue.put(self._valid_timer(pid=test_pid, scope="test2"))
self.server._run_watchdog()
self.assertEqual(0, len(self.server._timers))
mock_os_kill.assert_called_once_with(test_pid, signal.SIGKILL)
@mock.patch("os.kill")
def test_acquire_release(self, mock_os_kill):
"""
tests that:
1. a timer can be acquired then released (should not terminate process)
2. a timer can be vacuously released (e.g. no-op)
"""
test_pid = -3
self.mp_queue.put(self._valid_timer(pid=test_pid, scope="test1"))
self.mp_queue.put(self._release_timer(pid=test_pid, scope="test1"))
self.mp_queue.put(self._release_timer(pid=test_pid, scope="test2"))
self.server._run_watchdog()
self.assertEqual(0, len(self.server._timers))
mock_os_kill.assert_not_called()
@mock.patch("os.kill")
def test_valid_timers(self, mock_os_kill):
"""
tests that valid timers are processed correctly and the process is left alone
"""
self.mp_queue.put(self._valid_timer(pid=-3, scope="test1"))
self.mp_queue.put(self._valid_timer(pid=-3, scope="test2"))
self.mp_queue.put(self._valid_timer(pid=-2, scope="test1"))
self.mp_queue.put(self._valid_timer(pid=-2, scope="test2"))
self.server._run_watchdog()
self.assertEqual(4, len(self.server._timers))
self.assertTrue((-3, "test1") in self.server._timers)
self.assertTrue((-3, "test2") in self.server._timers)
self.assertTrue((-2, "test1") in self.server._timers)
self.assertTrue((-2, "test2") in self.server._timers)
mock_os_kill.assert_not_called()
|
server.py
|
from socket import socket
from threading import Thread
from multiprocessing import Process
from bitgov.protocol.utilities import process_incoming, process_outgoing, switch
def server_config(IPv, PROTOCOL, host, port):
print("\033[1;33mSetting up the server.. \033[0;0m", end="")
try:
with socket(IPv, PROTOCOL) as sock:
sock.bind((host, port))
sock.listen()
server = Process(target=server_accept, args=(sock,))
server.start()
print("\033[1;32mSuccess!\033[0;0m 👍")
print("\033[1;33mServer listening on port: \033[1;32m{}\033[0;0m\n".format(str(port)))
return server
except:
print("\033[1;31mConfiguration Error!\033[0;0m ⛔\n")
def server_accept(sock):
while True:
connection, address = sock.accept()
Thread(target=server_connection, args=(connection, address)).start()
def server_connection(connection, address):
with connection:
print("\033[1;33mConnected with: \033[1;32m{}:{}\033[0;0m".format(address[0], address[1]))
request = process_incoming(connection)
print("\033[1;33mReceived:\033[1;32m {}\033[0;0m\n".format(request))
if request:
response = switch(request, address)
else:
response = None
connection.sendall(process_outgoing(response))
|
store_utils.py
|
# Copyright 2011 OpenStack Foundation
# Copyright 2012 Red Hat, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility methods to set testcases up for Swift tests.
"""
from __future__ import print_function
import threading
from oslo_utils import units
from six.moves import BaseHTTPServer
FIVE_KB = 5 * units.Ki
class RemoteSubjectHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_HEAD(self):
"""
Respond to an subject HEAD request fake metadata
"""
if 'subjects' in self.path:
self.send_response(200)
self.send_header('Content-Type', 'application/octet-stream')
self.send_header('Content-Length', FIVE_KB)
self.end_headers()
return
else:
self.send_error(404, 'File Not Found: %s' % self.path)
return
def do_GET(self):
"""
Respond to an subject GET request with fake subject content.
"""
if 'subjects' in self.path:
self.send_response(200)
self.send_header('Content-Type', 'application/octet-stream')
self.send_header('Content-Length', FIVE_KB)
self.end_headers()
subject_data = '*' * FIVE_KB
self.wfile.write(subject_data)
self.wfile.close()
return
else:
self.send_error(404, 'File Not Found: %s' % self.path)
return
def log_message(self, format, *args):
"""
Simple override to prevent writing crap to stderr...
"""
pass
def setup_http(test):
server_class = BaseHTTPServer.HTTPServer
remote_server = server_class(('127.0.0.1', 0), RemoteSubjectHandler)
remote_ip, remote_port = remote_server.server_address
def serve_requests(httpd):
httpd.serve_forever()
threading.Thread(target=serve_requests, args=(remote_server,)).start()
test.http_server = remote_server
test.http_ip = remote_ip
test.http_port = remote_port
test.addCleanup(test.http_server.shutdown)
def get_http_uri(test, subject_id):
uri = ('http://%(http_ip)s:%(http_port)d/subjects/' %
{'http_ip': test.http_ip, 'http_port': test.http_port})
uri += subject_id
return uri
|
absinthe_server.py
|
import threading
import pyjsonrpc
import json
import os
from geventwebsocket import WebSocketServer, WebSocketApplication, Resource
from absinthe.tools.commands import CommandRequestHandler, external_jsonrpc_command
from absinthe.message import Message
from absinthe.tools.utils import SimpleResponse
from absinthe.tools.remote_process_base import RemoteProcessBase
# gevent socket in thread, need to patch...
from gevent import monkey
monkey.patch_all()
class Client(WebSocketApplication):
def __init__(self, ws, manager):
WebSocketApplication.__init__(self, ws)
self.manager = manager
self.address = '%(REMOTE_ADDR)s:%(REMOTE_PORT)s' % ws.environ
def send(self, message):
self.ws.send(str(message))
def on_open(self):
pass
def on_message(self, message):
if message is None:
return
self.manager.on_message(message, self)
def on_close(self, reason):
self.manager.on_close(self)
class Session(object):
def __init__(self, name, client, path):
self.name = name
self.client = client
self.path = path
class PathHandler(object):
def __init__(self, name, path):
self.name = name
self.path = path
def parse(self, filename):
if filename.find(self.path) != 0:
raise Exception('Path mismatch')
fn = filename[len(self.path):]
return fn.split(os.sep)
class ClientManager(object):
def __init__(self, session_manager):
self.session_manager = session_manager
def __call__(self, ws):
return Client(ws, self.session_manager)
class SessionManager(object):
def __init__(self, logger):
self.logger = logger
self.sessions = {}
self.paths = {}
def register_path(self, path):
self.paths[path.name] = path
def find_sessions(self, path_name):
sessions = []
for name in self.sessions:
for session in self.sessions[name]:
if session.path.name == path_name:
sessions.append(session)
return sessions
@external_jsonrpc_command
def set_focus(self, path_name):
sessions = self.find_sessions(path_name)
for session in sessions:
session.client.send(Message(session.name, 'set_focus'))
return SimpleResponse(True)
@external_jsonrpc_command
def open_file(self, path_name, filename, line):
self.logger.debug('Open file %s in %s' % (path_name, filename))
msgs = []
try:
sessions = self.find_sessions(path_name)
if len(sessions) == 0:
msg = 'There is no client for this path %s' % path_name
self.logger.warning(msg)
return SimpleResponse(False, [msg])
msgs.append('Session found: %s' % path_name)
for session in sessions:
file_parts = session.path.parse(filename)
session.client.send(Message(session.name, 'open_file', dict(filename = file_parts, line = line)))
msgs.append('File open request sent to %s' % session.client.address)
for msg in msgs:
self.logger.debug(msg)
except Exception as e:
self.logger.exception(e);
msgs.append(e);
return SimpleResponse(True, msgs)
def on_message(self, message, client):
try:
msg = Message.from_str(message)
except Exception as e:
self.logger.error('Malformed message received via websocket: %s, %s' % (e, message))
return
if hasattr(self, msg.command):
func = getattr(self, msg.command)
func(msg.name, client, **msg.arguments)
else:
self.logger.warning('Undefined command received: %s' % msg.command)
def on_close(self, client):
for name in self.sessions:
for session in self.sessions[name]:
if session.client == client:
self.sessions[name].remove(session)
self.logger.info('Session close: %s from %s' % (name, client.address))
def session_start(self, name, client, remote_path):
self.logger.info('Session start: %s from %s' % (name, client.address))
session = Session(name, client, self.paths[remote_path])
if name not in self.sessions:
self.sessions[name] = []
self.sessions[name].append(session)
class AbsintheServer(RemoteProcessBase):
def __init__(self, config, logger):
self.logger = logger
self.config = config
self.session_manager = SessionManager(self.logger)
self.client_manager = ClientManager(self.session_manager)
@external_jsonrpc_command
def init(self):
for name in self.config.data['paths']:
self.session_manager.register_path(PathHandler(name, self.config.data['paths'][name].value))
server_address = self.config.data['agent_server']
self.server = WebSocketServer((server_address['host'].value, server_address['port'].value), Resource({'/': self.client_manager}))
th = threading.Thread(target=self.server.serve_forever)
th.setDaemon(True)
th.start()
self.logger.debug('init')
return SimpleResponse(True)
# Initialize the command server to receive IPC commands.
def start_command_server(self):
try:
command_server_address = self.config.data['command_server']
self.command_server = pyjsonrpc.ThreadingHttpServer(
server_address = (command_server_address['host'].value, command_server_address['port'].value),
RequestHandlerClass = CommandRequestHandler
)
except Exception as e:
self.logger.error('Exception occured during the command server initalization: ' + str(e) + traceback.format_exc())
return
CommandRequestHandler.logger = self.logger
CommandRequestHandler.externals.extend([self, self.session_manager])
self.logger.debug('command server starting...')
self.command_server.serve_forever()
|
main.py
|
import configparser
import json
import os
import sys
import threading
import time
import telebot
import vk_api
from telebot import types
config = configparser.ConfigParser()
config.read("settings.ini")
vk_login = config["VK"]["login"]
vk_password = config["VK"]["password"]
telegram_token = config["Telegram"]["token"]
telegram_chat = config["Telegram"]["chat"]
time_check = int(config["Settings"]["time_check"])
retries_max = int(config["Settings"]["retries_max"])
retries_time = int(config["Settings"]["retries_time"])
module = sys.modules[__name__]
if os.path.isfile('latest.log'):
os.remove('latest.log')
def logger(log):
log = time.strftime(f'[%H:%M:%S] {log}')
print(log)
with open('latest.log', 'a', encoding='utf-8') as f:
f.write(f'{log}\n')
def captcha_handler(captcha):
key = input('Enter Captcha {0}: '.format(captcha.get_url())).strip()
return captcha.try_again(key)
def auth_handler():
key = input('Enter authentication code: ')
remember_device = True
return key, remember_device
def init_telegram():
module.bot = telebot.TeleBot(telegram_token)
logger('Successfully logged in in telegram!')
def init_vk():
vk_session = vk_api.VkApi(
login=vk_login,
password=vk_password,
auth_handler=auth_handler,
captcha_handler=captcha_handler,
)
module.vk = vk_session.get_api()
try:
vk_session.auth()
logger('Successfully logged in in VK!')
except vk_api.AuthError as e:
logger('VK: ' + str(e))
checker(int(time.time()))
def checker(start_time):
while True:
time.sleep(time_check)
newsfeed = module.vk.newsfeed.get(
count=100, start_time=start_time, max_photos=10
)
posts = (json.loads(json.dumps(newsfeed))).get('items')
if posts:
start_time = posts[0]['date'] + 1
logger('New posts was founded!')
for post in posts[::-1]:
check_attachments(post)
def check_attachments(post):
if post.get('photos'):
return
if post.get('copy_history'):
post = post['copy_history'][0]
if not (post.get('attachments')):
logger('Post without attachments.')
else:
logger('From post...')
transfer_attachments_to_telegram(get_attachments(post))
def get_sizes(size_path):
photo_size = None
for photoType in size_path[0:]:
if photoType.get('type') == 'x':
photo_size = photoType.get('url')
if photoType.get('type') == 'y':
photo_size = photoType.get('url')
if photoType.get('type') == 'z':
photo_size = photoType.get('url')
if photoType.get('type') == 'w':
photo_size = photoType.get('url')
return photo_size
def get_attachments(post):
attach_list = []
photo_group = []
for att in post['attachments'][0:]:
att_type = att.get('type')
attachment = att[att_type]
attachments = None
title = None
preview = None
if att_type == 'photo':
photo_size = get_sizes(attachment.get('sizes'))
photo_group.append(photo_size)
continue
elif att_type == 'video':
retries = 0
photos = {}
owner_id = str(attachment.get('owner_id'))
video_id = str(attachment.get('id'))
access_key = str(attachment.get('access_key'))
for key, value in attachment.items():
if key.startswith('photo_'):
photos[key] = value
preview = attachment[max(photos)]
title = attachment.get('title')
full_url = str(owner_id + '_' + video_id + '_' + access_key)
while retries_max > retries:
attachments = module.vk.video.get(videos=full_url)['items'][0].get('player')
if attachments is not None:
break
else:
retries += 1
logger(f'VK did not process the video. Retry {retries}/{retries_max}...')
time.sleep(retries_time)
continue
else:
logger(f'Unable to get video link after {retries_max} retries.')
elif att_type == 'doc':
title = attachment.get('title')
doc_type = attachment.get('type')
if doc_type != 3 and doc_type != 4 and doc_type != 5:
att_type = 'other'
attachments = attachment.get('url')
elif att_type == 'album':
preview = get_sizes(attachment['thumb'].get('sizes'))
title = attachment.get('title')
owner_id = str(attachment.get('owner_id'))
album_id = str(attachment.get('id'))
attachments = str(f'https://vk.com/album{owner_id}_{album_id}')
elif att_type == 'link' and attachment.get('description') == 'Статья':
preview = get_sizes(attachment['photo'].get('sizes'))
title = attachment.get('title')
attachments = str(attachment.get('url'))
if attachments is not None:
attach_list.append({'type': att_type, 'link': attachments, 'title': title, 'preview': preview})
else:
logger(f'Undefined type of attachment: {att_type}')
logger(attachment)
if photo_group:
attach_list.append({'type': 'photo', 'link': photo_group})
return attach_list
def transfer_attachments_to_telegram(attachments):
for attach_element in attachments[0:]:
retries = 0
att_type = attach_element.get('type')
link = attach_element.get('link')
title = attach_element.get('title')
preview = attach_element.get('preview')
while retries_max > retries:
try:
if att_type == 'photo':
media_photo = []
for photo_url in link[0:]:
media_photo.append(types.InputMediaPhoto(photo_url))
module.bot.send_media_group(telegram_chat, media_photo)
logger('Send photo group.')
elif att_type == 'video' or att_type == 'album' or att_type == 'link':
module.bot.send_media_group(
telegram_chat,
[types.InputMediaPhoto(preview, caption=f'{title}\n{link}')],
)
logger(f'Send {att_type} group.')
elif att_type == 'doc' or att_type == 'gif':
module.bot.send_document(telegram_chat, link)
logger('Send document group.')
elif att_type == 'other':
module.bot.send_message(telegram_chat, f'{title}\n{link}')
logger('Send other group.')
break
except Exception as e:
retries += 1
if 'Too Many Requests: retry after' in str(e):
wait = str(e).split()[-1]
logger(f'[{retries}/{retries_max}] Detect telegram api timeout. Wait: {wait}s')
time.sleep(int(wait))
continue
elif 'Bad Request: group send failed' in str(e):
logger(f'[{retries}/{retries_max}] Detect telegram error of group send failed.')
time.sleep(retries_time)
continue
elif 'Read timed out.' in str(e):
logger(f'[{retries}/{retries_max}] Detect telegram error of read timed out.')
time.sleep(retries_time)
continue
elif 'Bad Request: failed to get HTTP URL content' in str(e):
logger(f'[{retries}/{retries_max}] Detect telegram error to get URL content, maybe it too heavy...')
att_type = 'other'
continue
else:
logger(f'[{retries}/{retries_max}] {e}')
logger(attachments)
continue
else:
logger(f'Unable to send attachment after {retries_max} retries.')
t1 = threading.Thread(target=init_vk)
t2 = threading.Thread(target=init_telegram)
t1.start()
t2.start()
t1.join()
t2.join()
|
test_socket.py
|
import unittest
from test import support
import errno
import io
import itertools
import socket
import select
import tempfile
import time
import traceback
import queue
import sys
import os
import array
import contextlib
from weakref import proxy
import signal
import math
import pickle
import struct
import random
import string
try:
import multiprocessing
except ImportError:
multiprocessing = False
try:
import fcntl
except ImportError:
fcntl = None
HOST = support.HOST
MSG = 'Michael Gilfix was here\u1234\r\n'.encode('utf-8') ## test unicode string and carriage return
try:
import _thread as thread
import threading
except ImportError:
thread = None
threading = None
try:
import _socket
except ImportError:
_socket = None
def _have_socket_can():
"""Check whether CAN sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_rds():
"""Check whether RDS sockets are supported on this host."""
try:
s = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_alg():
"""Check whether AF_ALG sockets are supported on this host."""
try:
s = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
HAVE_SOCKET_CAN = _have_socket_can()
HAVE_SOCKET_RDS = _have_socket_rds()
HAVE_SOCKET_ALG = _have_socket_alg()
# Size in bytes of the int type
SIZEOF_INT = array.array("i").itemsize
class SocketTCPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.serv)
self.serv.listen()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.port = support.bind_port(self.serv)
def tearDown(self):
self.serv.close()
self.serv = None
class ThreadSafeCleanupTestCase(unittest.TestCase):
"""Subclass of unittest.TestCase with thread-safe cleanup methods.
This subclass protects the addCleanup() and doCleanups() methods
with a recursive lock.
"""
if threading:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cleanup_lock = threading.RLock()
def addCleanup(self, *args, **kwargs):
with self._cleanup_lock:
return super().addCleanup(*args, **kwargs)
def doCleanups(self, *args, **kwargs):
with self._cleanup_lock:
return super().doCleanups(*args, **kwargs)
class SocketCANTest(unittest.TestCase):
"""To be able to run this test, a `vcan0` CAN interface can be created with
the following commands:
# modprobe vcan
# ip link add dev vcan0 type vcan
# ifconfig vcan0 up
"""
interface = 'vcan0'
bufsize = 128
"""The CAN frame structure is defined in <linux/can.h>:
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
__u8 can_dlc; /* data length code: 0 .. 8 */
__u8 data[8] __attribute__((aligned(8)));
};
"""
can_frame_fmt = "=IB3x8s"
can_frame_size = struct.calcsize(can_frame_fmt)
"""The Broadcast Management Command frame structure is defined
in <linux/can/bcm.h>:
struct bcm_msg_head {
__u32 opcode;
__u32 flags;
__u32 count;
struct timeval ival1, ival2;
canid_t can_id;
__u32 nframes;
struct can_frame frames[0];
}
`bcm_msg_head` must be 8 bytes aligned because of the `frames` member (see
`struct can_frame` definition). Must use native not standard types for packing.
"""
bcm_cmd_msg_fmt = "@3I4l2I"
bcm_cmd_msg_fmt += "x" * (struct.calcsize(bcm_cmd_msg_fmt) % 8)
def setUp(self):
self.s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
self.addCleanup(self.s.close)
try:
self.s.bind((self.interface,))
except OSError:
self.skipTest('network interface `%s` does not exist' %
self.interface)
class SocketRDSTest(unittest.TestCase):
"""To be able to run this test, the `rds` kernel module must be loaded:
# modprobe rds
"""
bufsize = 8192
def setUp(self):
self.serv = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
self.addCleanup(self.serv.close)
try:
self.port = support.bind_port(self.serv)
except OSError:
self.skipTest('unable to bind RDS socket')
class ThreadableTest:
"""Threadable Test class
The ThreadableTest class makes it easy to create a threaded
client/server pair from an existing unit test. To create a
new threaded class from an existing unit test, use multiple
inheritance:
class NewClass (OldClass, ThreadableTest):
pass
This class defines two new fixture functions with obvious
purposes for overriding:
clientSetUp ()
clientTearDown ()
Any new test functions within the class must then define
tests in pairs, where the test name is preceded with a
'_' to indicate the client portion of the test. Ex:
def testFoo(self):
# Server portion
def _testFoo(self):
# Client portion
Any exceptions raised by the clients during their tests
are caught and transferred to the main thread to alert
the testing framework.
Note, the server setup function cannot call any blocking
functions that rely on the client thread during setup,
unless serverExplicitReady() is called just before
the blocking call (such as in setting up a client/server
connection and performing the accept() in setUp().
"""
def __init__(self):
# Swap the true setup function
self.__setUp = self.setUp
self.__tearDown = self.tearDown
self.setUp = self._setUp
self.tearDown = self._tearDown
def serverExplicitReady(self):
"""This method allows the server to explicitly indicate that
it wants the client thread to proceed. This is useful if the
server is about to execute a blocking routine that is
dependent upon the client thread during its setup routine."""
self.server_ready.set()
def _setUp(self):
self.server_ready = threading.Event()
self.client_ready = threading.Event()
self.done = threading.Event()
self.queue = queue.Queue(1)
self.server_crashed = False
# Do some munging to start the client test.
methodname = self.id()
i = methodname.rfind('.')
methodname = methodname[i+1:]
test_method = getattr(self, '_' + methodname)
self.client_thread = thread.start_new_thread(
self.clientRun, (test_method,))
try:
self.__setUp()
except:
self.server_crashed = True
raise
finally:
self.server_ready.set()
self.client_ready.wait()
def _tearDown(self):
self.__tearDown()
self.done.wait()
if self.queue.qsize():
exc = self.queue.get()
raise exc
def clientRun(self, test_func):
self.server_ready.wait()
self.clientSetUp()
self.client_ready.set()
if self.server_crashed:
self.clientTearDown()
return
if not hasattr(test_func, '__call__'):
raise TypeError("test_func must be a callable function")
try:
test_func()
except BaseException as e:
self.queue.put(e)
finally:
self.clientTearDown()
def clientSetUp(self):
raise NotImplementedError("clientSetUp must be implemented.")
def clientTearDown(self):
self.done.set()
thread.exit()
class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedUDPSocketTest(SocketUDPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedCANSocketTest(SocketCANTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketCANTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
try:
self.cli.bind((self.interface,))
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedRDSSocketTest(SocketRDSTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketRDSTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
try:
# RDS sockets must be bound explicitly to send or receive data
self.cli.bind((HOST, 0))
self.cli_addr = self.cli.getsockname()
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class SocketConnectedTest(ThreadedTCPSocketTest):
"""Socket tests for client-server connection.
self.cli_conn is a client socket connected to the server. The
setUp() method guarantees that it is connected to the server.
"""
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def setUp(self):
ThreadedTCPSocketTest.setUp(self)
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
ThreadedTCPSocketTest.tearDown(self)
def clientSetUp(self):
ThreadedTCPSocketTest.clientSetUp(self)
self.cli.connect((HOST, self.port))
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
ThreadedTCPSocketTest.clientTearDown(self)
class SocketPairTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv, self.cli = socket.socketpair()
def tearDown(self):
self.serv.close()
self.serv = None
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
# The following classes are used by the sendmsg()/recvmsg() tests.
# Combining, for instance, ConnectedStreamTestMixin and TCPTestBase
# gives a drop-in replacement for SocketConnectedTest, but different
# address families can be used, and the attributes serv_addr and
# cli_addr will be set to the addresses of the endpoints.
class SocketTestBase(unittest.TestCase):
"""A base class for socket tests.
Subclasses must provide methods newSocket() to return a new socket
and bindSock(sock) to bind it to an unused address.
Creates a socket self.serv and sets self.serv_addr to its address.
"""
def setUp(self):
self.serv = self.newSocket()
self.bindServer()
def bindServer(self):
"""Bind server socket and set self.serv_addr to its address."""
self.bindSock(self.serv)
self.serv_addr = self.serv.getsockname()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketListeningTestMixin(SocketTestBase):
"""Mixin to listen on the server socket."""
def setUp(self):
super().setUp()
self.serv.listen()
class ThreadedSocketTestMixin(ThreadSafeCleanupTestCase, SocketTestBase,
ThreadableTest):
"""Mixin to add client socket and allow client/server tests.
Client socket is self.cli and its address is self.cli_addr. See
ThreadableTest for usage information.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = self.newClientSocket()
self.bindClient()
def newClientSocket(self):
"""Return a new socket for use as client."""
return self.newSocket()
def bindClient(self):
"""Bind client socket and set self.cli_addr to its address."""
self.bindSock(self.cli)
self.cli_addr = self.cli.getsockname()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ConnectedStreamTestMixin(SocketListeningTestMixin,
ThreadedSocketTestMixin):
"""Mixin to allow client/server stream tests with connected client.
Server's socket representing connection to client is self.cli_conn
and client's connection to server is self.serv_conn. (Based on
SocketConnectedTest.)
"""
def setUp(self):
super().setUp()
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
super().tearDown()
def clientSetUp(self):
super().clientSetUp()
self.cli.connect(self.serv_addr)
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
super().clientTearDown()
class UnixSocketTestBase(SocketTestBase):
"""Base class for Unix-domain socket tests."""
# This class is used for file descriptor passing tests, so we
# create the sockets in a private directory so that other users
# can't send anything that might be problematic for a privileged
# user running the tests.
def setUp(self):
self.dir_path = tempfile.mkdtemp()
self.addCleanup(os.rmdir, self.dir_path)
super().setUp()
def bindSock(self, sock):
path = tempfile.mktemp(dir=self.dir_path)
sock.bind(path)
self.addCleanup(support.unlink, path)
class UnixStreamBase(UnixSocketTestBase):
"""Base class for Unix-domain SOCK_STREAM tests."""
def newSocket(self):
return socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
class InetTestBase(SocketTestBase):
"""Base class for IPv4 socket tests."""
host = HOST
def setUp(self):
super().setUp()
self.port = self.serv_addr[1]
def bindSock(self, sock):
support.bind_port(sock, host=self.host)
class TCPTestBase(InetTestBase):
"""Base class for TCP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
class UDPTestBase(InetTestBase):
"""Base class for UDP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
class SCTPStreamBase(InetTestBase):
"""Base class for SCTP tests in one-to-one (SOCK_STREAM) mode."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM,
socket.IPPROTO_SCTP)
class Inet6TestBase(InetTestBase):
"""Base class for IPv6 socket tests."""
host = support.HOSTv6
class UDP6TestBase(Inet6TestBase):
"""Base class for UDP-over-IPv6 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
# Test-skipping decorators for use with ThreadableTest.
def skipWithClientIf(condition, reason):
"""Skip decorated test if condition is true, add client_skip decorator.
If the decorated object is not a class, sets its attribute
"client_skip" to a decorator which will return an empty function
if the test is to be skipped, or the original function if it is
not. This can be used to avoid running the client part of a
skipped test when using ThreadableTest.
"""
def client_pass(*args, **kwargs):
pass
def skipdec(obj):
retval = unittest.skip(reason)(obj)
if not isinstance(obj, type):
retval.client_skip = lambda f: client_pass
return retval
def noskipdec(obj):
if not (isinstance(obj, type) or hasattr(obj, "client_skip")):
obj.client_skip = lambda f: f
return obj
return skipdec if condition else noskipdec
def requireAttrs(obj, *attributes):
"""Skip decorated test if obj is missing any of the given attributes.
Sets client_skip attribute as skipWithClientIf() does.
"""
missing = [name for name in attributes if not hasattr(obj, name)]
return skipWithClientIf(
missing, "don't have " + ", ".join(name for name in missing))
def requireSocket(*args):
"""Skip decorated test if a socket cannot be created with given arguments.
When an argument is given as a string, will use the value of that
attribute of the socket module, or skip the test if it doesn't
exist. Sets client_skip attribute as skipWithClientIf() does.
"""
err = None
missing = [obj for obj in args if
isinstance(obj, str) and not hasattr(socket, obj)]
if missing:
err = "don't have " + ", ".join(name for name in missing)
else:
callargs = [getattr(socket, obj) if isinstance(obj, str) else obj
for obj in args]
try:
s = socket.socket(*callargs)
except OSError as e:
# XXX: check errno?
err = str(e)
else:
s.close()
return skipWithClientIf(
err is not None,
"can't create socket({0}): {1}".format(
", ".join(str(o) for o in args), err))
#######################################################################
## Begin Tests
class GeneralModuleTests(unittest.TestCase):
def test_SocketType_is_socketobject(self):
import _socket
self.assertTrue(socket.SocketType is _socket.socket)
s = socket.socket()
self.assertIsInstance(s, socket.SocketType)
s.close()
def test_repr(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with s:
self.assertIn('fd=%i' % s.fileno(), repr(s))
self.assertIn('family=%s' % socket.AF_INET, repr(s))
self.assertIn('type=%s' % socket.SOCK_STREAM, repr(s))
self.assertIn('proto=0', repr(s))
self.assertNotIn('raddr', repr(s))
s.bind(('127.0.0.1', 0))
self.assertIn('laddr', repr(s))
self.assertIn(str(s.getsockname()), repr(s))
self.assertIn('[closed]', repr(s))
self.assertNotIn('laddr', repr(s))
@unittest.skipUnless(_socket is not None, 'need _socket module')
def test_csocket_repr(self):
s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM)
try:
expected = ('<socket object, fd=%s, family=%s, type=%s, proto=%s>'
% (s.fileno(), s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
finally:
s.close()
expected = ('<socket object, fd=-1, family=%s, type=%s, proto=%s>'
% (s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
def test_weakref(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s.close()
s = None
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
msg = "Error raising socket exception (%s)."
with self.assertRaises(OSError, msg=msg % 'OSError'):
raise OSError
with self.assertRaises(OSError, msg=msg % 'socket.herror'):
raise socket.herror
with self.assertRaises(OSError, msg=msg % 'socket.gaierror'):
raise socket.gaierror
def testSendtoErrors(self):
# Testing that sendto doesn't mask failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None)
self.assertIn('not NoneType',str(cm.exception))
# 3 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 'bar', sockname)
self.assertIn('an integer is required', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None, None)
self.assertIn('an integer is required', str(cm.exception))
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except OSError:
# Probably a similar problem as above; skip this test
self.skipTest('name lookup failure')
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def test_host_resolution(self):
for addr in ['0.1.1.~1', '1+.1.1.1', '::1q', '::1::2',
'1:1:1:1:1:1:1:1:1']:
self.assertRaises(OSError, socket.gethostbyname, addr)
self.assertRaises(OSError, socket.gethostbyaddr, addr)
for addr in [support.HOST, '10.0.0.1', '255.255.255.255']:
self.assertEqual(socket.gethostbyname(addr), addr)
# we don't test support.HOSTv6 because there's a chance it doesn't have
# a matching name entry (e.g. 'ip6-localhost')
for host in [support.HOST]:
self.assertIn(host, socket.gethostbyaddr(host)[2])
@unittest.skipUnless(hasattr(socket, 'sethostname'), "test needs socket.sethostname()")
@unittest.skipUnless(hasattr(socket, 'gethostname'), "test needs socket.gethostname()")
def test_sethostname(self):
oldhn = socket.gethostname()
try:
socket.sethostname('new')
except OSError as e:
if e.errno == errno.EPERM:
self.skipTest("test should be run as root")
else:
raise
try:
# running test as root!
self.assertEqual(socket.gethostname(), 'new')
# Should work with bytes objects too
socket.sethostname(b'bar')
self.assertEqual(socket.gethostname(), 'bar')
finally:
socket.sethostname(oldhn)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInterfaceNameIndex(self):
interfaces = socket.if_nameindex()
for index, name in interfaces:
self.assertIsInstance(index, int)
self.assertIsInstance(name, str)
# interface indices are non-zero integers
self.assertGreater(index, 0)
_index = socket.if_nametoindex(name)
self.assertIsInstance(_index, int)
self.assertEqual(index, _index)
_name = socket.if_indextoname(index)
self.assertIsInstance(_name, str)
self.assertEqual(name, _name)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInvalidInterfaceNameIndex(self):
# test nonexistent interface index/name
self.assertRaises(OSError, socket.if_indextoname, 0)
self.assertRaises(OSError, socket.if_nametoindex, '_DEADBEEF')
# test with invalid values
self.assertRaises(TypeError, socket.if_nametoindex, 0)
self.assertRaises(TypeError, socket.if_indextoname, '_DEADBEEF')
@unittest.skipUnless(hasattr(sys, 'getrefcount'),
'test needs sys.getrefcount()')
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
if sys.getrefcount(__name__) != orig:
self.fail("socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except OSError:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1<<34)
def testNtoHErrors(self):
good_values = [ 1, 2, 3, 1, 2, 3 ]
bad_values = [ -1, -2, -3, -1, -2, -3 ]
for k in good_values:
socket.ntohl(k)
socket.ntohs(k)
socket.htonl(k)
socket.htons(k)
for k in bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htonl, k)
self.assertRaises(OverflowError, socket.htons, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (sys.platform.startswith(('freebsd', 'netbsd', 'gnukfreebsd'))
or sys.platform in ('linux', 'darwin')):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except OSError:
pass
else:
raise OSError
# Try same call with optional protocol omitted
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf if it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except OSError:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Set the default timeout to 10, and see if it propagates
socket.setdefaulttimeout(10)
self.assertEqual(socket.getdefaulttimeout(), 10)
s = socket.socket()
self.assertEqual(s.gettimeout(), 10)
s.close()
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
@unittest.skipUnless(hasattr(socket, 'inet_aton'),
'test needs socket.inet_aton()')
def testIPv4_inet_aton_fourbytes(self):
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual(b'\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual(b'\xff'*4, socket.inet_aton('255.255.255.255'))
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv4toString(self):
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual(b'\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEqual(b'\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual(b'\xff\xff\xff\xff', f('255.255.255.255'))
assertInvalid(f, '0.0.0.')
assertInvalid(f, '300.0.0.0')
assertInvalid(f, 'a.0.0.0')
assertInvalid(f, '1.2.3.4.5')
assertInvalid(f, '::1')
self.assertEqual(b'\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual(b'\xff\xff\xff\xff', g('255.255.255.255'))
assertInvalid(g, '0.0.0.')
assertInvalid(g, '300.0.0.0')
assertInvalid(g, 'a.0.0.0')
assertInvalid(g, '1.2.3.4.5')
assertInvalid(g, '::1')
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv6toString(self):
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_pton(AF_INET6, '::')
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_pton(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual(b'\x00' * 16, f('::'))
self.assertEqual(b'\x00' * 16, f('0::0'))
self.assertEqual(b'\x00\x01' + b'\x00' * 14, f('1::'))
self.assertEqual(
b'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
self.assertEqual(
b'\xad\x42\x0a\xbc' + b'\x00' * 4 + b'\x01\x27\x00\x00\x02\x54\x00\x02',
f('ad42:abc::127:0:254:2')
)
self.assertEqual(b'\x00\x12\x00\x0a' + b'\x00' * 12, f('12:a::'))
assertInvalid('0x20::')
assertInvalid(':::')
assertInvalid('::0::')
assertInvalid('1::abc::')
assertInvalid('1::abc::def')
assertInvalid('1:2:3:4:5:6:')
assertInvalid('1:2:3:4:5:6')
assertInvalid('1:2:3:4:5:6:7:8:')
assertInvalid('1:2:3:4:5:6:7:8:0')
self.assertEqual(b'\x00' * 12 + b'\xfe\x2a\x17\x40',
f('::254.42.23.64')
)
self.assertEqual(
b'\x00\x42' + b'\x00' * 8 + b'\xa2\x9b\xfe\x2a\x17\x40',
f('42::a29b:254.42.23.64')
)
self.assertEqual(
b'\x00\x42\xa8\xb9\x00\x00\x00\x02\xff\xff\xa2\x9b\xfe\x2a\x17\x40',
f('42:a8b9:0:2:ffff:a29b:254.42.23.64')
)
assertInvalid('255.254.253.252')
assertInvalid('1::260.2.3.0')
assertInvalid('1::0.be.e.0')
assertInvalid('1:2:3:4:5:6:7:1.2.3.4')
assertInvalid('::1.2.3.4:0')
assertInvalid('0.100.200.0:3:4:5:6:7:8')
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv4(self):
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual('1.0.1.0', f(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', f(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f(b'\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f(b'\x01\x02\x03\x04'))
assertInvalid(f, b'\x00' * 3)
assertInvalid(f, b'\x00' * 5)
assertInvalid(f, b'\x00' * 16)
self.assertEqual('170.85.170.85', f(bytearray(b'\xaa\x55\xaa\x55')))
self.assertEqual('1.0.1.0', g(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g(b'\xff\xff\xff\xff'))
assertInvalid(g, b'\x00' * 3)
assertInvalid(g, b'\x00' * 5)
assertInvalid(g, b'\x00' * 16)
self.assertEqual('170.85.170.85', g(bytearray(b'\xaa\x55\xaa\x55')))
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv6(self):
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_ntop(AF_INET6, b'\x00' * 16)
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_ntop(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual('::', f(b'\x00' * 16))
self.assertEqual('::1', f(b'\x00' * 15 + b'\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f(b'\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
self.assertEqual('::1', f(bytearray(b'\x00' * 15 + b'\x01')))
assertInvalid(b'\x12' * 15)
assertInvalid(b'\x12' * 17)
assertInvalid(b'\x12' * 4)
# XXX The following don't test module-level functionality...
def testSockName(self):
# Testing getsockname()
port = support.find_unused_port()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.bind(("0.0.0.0", port))
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
sock.close()
self.assertRaises(OSError, sock.send, b"spam")
def testCloseException(self):
sock = socket.socket()
socket.socket(fileno=sock.fileno()).close()
try:
sock.close()
except OSError as err:
# Winsock apparently raises ENOTSOCK
self.assertIn(err.errno, (errno.EBADF, errno.ENOTSOCK))
else:
self.fail("close() should raise EBADF/ENOTSOCK")
def testNewAttributes(self):
# testing .family, .type and .protocol
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.assertEqual(sock.family, socket.AF_INET)
if hasattr(socket, 'SOCK_CLOEXEC'):
self.assertIn(sock.type,
(socket.SOCK_STREAM | socket.SOCK_CLOEXEC,
socket.SOCK_STREAM))
else:
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
sock.close()
def test_getsockaddrarg(self):
sock = socket.socket()
self.addCleanup(sock.close)
port = support.find_unused_port()
big_port = port + 65536
neg_port = port - 65536
self.assertRaises(OverflowError, sock.bind, (HOST, big_port))
self.assertRaises(OverflowError, sock.bind, (HOST, neg_port))
# Since find_unused_port() is inherently subject to race conditions, we
# call it a couple times if necessary.
for i in itertools.count():
port = support.find_unused_port()
try:
sock.bind((HOST, port))
except OSError as e:
if e.errno != errno.EADDRINUSE or i == 5:
raise
else:
break
@unittest.skipUnless(os.name == "nt", "Windows specific")
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(hasattr(socket, 'SIO_LOOPBACK_FAST_PATH'),
'Loopback fast path support required for this test')
def test_sio_loopback_fast_path(self):
s = socket.socket()
self.addCleanup(s.close)
try:
s.ioctl(socket.SIO_LOOPBACK_FAST_PATH, True)
except OSError as exc:
WSAEOPNOTSUPP = 10045
if exc.winerror == WSAEOPNOTSUPP:
self.skipTest("SIO_LOOPBACK_FAST_PATH is defined but "
"doesn't implemented in this Windows version")
raise
self.assertRaises(TypeError, s.ioctl, socket.SIO_LOOPBACK_FAST_PATH, None)
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if support.IPV6_ENABLED:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number or None
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, 80, socket.AF_INET, socket.SOCK_STREAM)
for family, type, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
self.assertEqual(str(family), 'AddressFamily.AF_INET')
self.assertEqual(type, socket.SOCK_STREAM)
self.assertEqual(str(type), 'SocketKind.SOCK_STREAM')
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# test keyword arguments
a = socket.getaddrinfo(HOST, None)
b = socket.getaddrinfo(host=HOST, port=None)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, socket.AF_INET)
b = socket.getaddrinfo(HOST, None, family=socket.AF_INET)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
b = socket.getaddrinfo(HOST, None, type=socket.SOCK_STREAM)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
b = socket.getaddrinfo(HOST, None, proto=socket.SOL_TCP)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
b = socket.getaddrinfo(HOST, None, flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
a = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
b = socket.getaddrinfo(host=None, port=0, family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM, proto=0,
flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
# Issue #6697.
self.assertRaises(UnicodeEncodeError, socket.getaddrinfo, 'localhost', '\uD800')
# Issue 17269: test workaround for OS X platform bug segfault
if hasattr(socket, 'AI_NUMERICSERV'):
try:
# The arguments here are undefined and the call may succeed
# or fail. All we care here is that it doesn't segfault.
socket.getaddrinfo("localhost", None, 0, 0, 0,
socket.AI_NUMERICSERV)
except socket.gaierror:
pass
def test_getnameinfo(self):
# only IP addresses are allowed
self.assertRaises(OSError, socket.getnameinfo, ('mail.python.org',0), 0)
@unittest.skipUnless(support.is_resource_enabled('network'),
'network is not enabled')
def test_idna(self):
# Check for internet access before running test
# (issue #12804, issue #25138).
with support.transient_internet('python.org'):
socket.gethostbyname('python.org')
# these should all be successful
domain = 'испытание.pythontest.net'
socket.gethostbyname(domain)
socket.gethostbyname_ex(domain)
socket.getaddrinfo(domain,0,socket.AF_UNSPEC,socket.SOCK_STREAM)
# this may not work if the forward lookup choses the IPv6 address, as that doesn't
# have a reverse entry yet
# socket.gethostbyaddr('испытание.python.org')
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not strictly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * support.SOCK_MAX_SIZE)
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(socket.timeout, c.sendall,
b"x" * support.SOCK_MAX_SIZE)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
def test_dealloc_warn(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r = repr(sock)
with self.assertWarns(ResourceWarning) as cm:
sock = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# An open socket file object gets dereferenced after the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = sock.makefile('rb')
r = repr(sock)
sock = None
support.gc_collect()
with self.assertWarns(ResourceWarning):
f = None
support.gc_collect()
def test_name_closed_socketio(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
fp = sock.makefile("rb")
fp.close()
self.assertEqual(repr(fp), "<_io.BufferedReader name=-1>")
def test_unusable_closed_socketio(self):
with socket.socket() as sock:
fp = sock.makefile("rb", buffering=0)
self.assertTrue(fp.readable())
self.assertFalse(fp.writable())
self.assertFalse(fp.seekable())
fp.close()
self.assertRaises(ValueError, fp.readable)
self.assertRaises(ValueError, fp.writable)
self.assertRaises(ValueError, fp.seekable)
def test_makefile_mode(self):
for mode in 'r', 'rb', 'rw', 'w', 'wb':
with self.subTest(mode=mode):
with socket.socket() as sock:
with sock.makefile(mode) as fp:
self.assertEqual(fp.mode, mode)
def test_makefile_invalid_mode(self):
for mode in 'rt', 'x', '+', 'a':
with self.subTest(mode=mode):
with socket.socket() as sock:
with self.assertRaisesRegex(ValueError, 'invalid mode'):
sock.makefile(mode)
def test_pickle(self):
sock = socket.socket()
with sock:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises(TypeError, pickle.dumps, sock, protocol)
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
family = pickle.loads(pickle.dumps(socket.AF_INET, protocol))
self.assertEqual(family, socket.AF_INET)
type = pickle.loads(pickle.dumps(socket.SOCK_STREAM, protocol))
self.assertEqual(type, socket.SOCK_STREAM)
def test_listen_backlog(self):
for backlog in 0, -1:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen(backlog)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen()
@support.cpython_only
def test_listen_backlog_overflow(self):
# Issue 15989
import _testcapi
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.bind((HOST, 0))
self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1)
srv.close()
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
(support.HOSTv6, 0, 0xffffffff), 0)
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
self.assertRaises(OverflowError, s.bind, (support.HOSTv6, 0, -10))
def test_str_for_enums(self):
# Make sure that the AF_* and SOCK_* constants have enum-like string
# reprs.
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
self.assertEqual(str(s.family), 'AddressFamily.AF_INET')
self.assertEqual(str(s.type), 'SocketKind.SOCK_STREAM')
@unittest.skipIf(os.name == 'nt', 'Will not work on Windows')
def test_uknown_socket_family_repr(self):
# Test that when created with a family that's not one of the known
# AF_*/SOCK_* constants, socket.family just returns the number.
#
# To do this we fool socket.socket into believing it already has an
# open fd because on this path it doesn't actually verify the family and
# type and populates the socket object.
#
# On Windows this trick won't work, so the test is skipped.
fd, _ = tempfile.mkstemp()
with socket.socket(family=42424, type=13331, fileno=fd) as s:
self.assertEqual(s.family, 42424)
self.assertEqual(s.type, 13331)
@unittest.skipUnless(hasattr(os, 'sendfile'), 'test needs os.sendfile()')
def test__sendfile_use_sendfile(self):
class File:
def __init__(self, fd):
self.fd = fd
def fileno(self):
return self.fd
with socket.socket() as sock:
fd = os.open(os.curdir, os.O_RDONLY)
os.close(fd)
with self.assertRaises(socket._GiveupOnSendfile):
sock._sendfile_use_sendfile(File(fd))
with self.assertRaises(OverflowError):
sock._sendfile_use_sendfile(File(2**1000))
with self.assertRaises(TypeError):
sock._sendfile_use_sendfile(File(None))
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class BasicCANTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_RAW
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCMConstants(self):
socket.CAN_BCM
# opcodes
socket.CAN_BCM_TX_SETUP # create (cyclic) transmission task
socket.CAN_BCM_TX_DELETE # remove (cyclic) transmission task
socket.CAN_BCM_TX_READ # read properties of (cyclic) transmission task
socket.CAN_BCM_TX_SEND # send one CAN frame
socket.CAN_BCM_RX_SETUP # create RX content filter subscription
socket.CAN_BCM_RX_DELETE # remove RX content filter subscription
socket.CAN_BCM_RX_READ # read properties of RX content filter subscription
socket.CAN_BCM_TX_STATUS # reply to TX_READ request
socket.CAN_BCM_TX_EXPIRED # notification on performed transmissions (count=0)
socket.CAN_BCM_RX_STATUS # reply to RX_READ request
socket.CAN_BCM_RX_TIMEOUT # cyclic message is absent
socket.CAN_BCM_RX_CHANGED # updated CAN frame (detected content change)
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testCreateBCMSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM) as s:
pass
def testBindAny(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.bind(('', ))
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
self.assertRaisesRegex(OSError, 'interface name too long',
s.bind, ('x' * 1024,))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_LOOPBACK"),
'socket.CAN_RAW_LOOPBACK required for this test.')
def testLoopback(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
for loopback in (0, 1):
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK,
loopback)
self.assertEqual(loopback,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_FILTER"),
'socket.CAN_RAW_FILTER required for this test.')
def testFilter(self):
can_id, can_mask = 0x200, 0x700
can_filter = struct.pack("=II", can_id, can_mask)
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, can_filter)
self.assertEqual(can_filter,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, 8))
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, bytearray(can_filter))
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
@unittest.skipUnless(thread, 'Threading required for this test.')
class CANTest(ThreadedCANSocketTest):
def __init__(self, methodName='runTest'):
ThreadedCANSocketTest.__init__(self, methodName=methodName)
@classmethod
def build_can_frame(cls, can_id, data):
"""Build a CAN frame."""
can_dlc = len(data)
data = data.ljust(8, b'\x00')
return struct.pack(cls.can_frame_fmt, can_id, can_dlc, data)
@classmethod
def dissect_can_frame(cls, frame):
"""Dissect a CAN frame."""
can_id, can_dlc, data = struct.unpack(cls.can_frame_fmt, frame)
return (can_id, can_dlc, data[:can_dlc])
def testSendFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
self.assertEqual(addr[0], self.interface)
self.assertEqual(addr[1], socket.AF_CAN)
def _testSendFrame(self):
self.cf = self.build_can_frame(0x00, b'\x01\x02\x03\x04\x05')
self.cli.send(self.cf)
def testSendMaxFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
def _testSendMaxFrame(self):
self.cf = self.build_can_frame(0x00, b'\x07' * 8)
self.cli.send(self.cf)
def testSendMultiFrames(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf1, cf)
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf2, cf)
def _testSendMultiFrames(self):
self.cf1 = self.build_can_frame(0x07, b'\x44\x33\x22\x11')
self.cli.send(self.cf1)
self.cf2 = self.build_can_frame(0x12, b'\x99\x22\x33')
self.cli.send(self.cf2)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def _testBCM(self):
cf, addr = self.cli.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
can_id, can_dlc, data = self.dissect_can_frame(cf)
self.assertEqual(self.can_id, can_id)
self.assertEqual(self.data, data)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCM(self):
bcm = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM)
self.addCleanup(bcm.close)
bcm.connect((self.interface,))
self.can_id = 0x123
self.data = bytes([0xc0, 0xff, 0xee])
self.cf = self.build_can_frame(self.can_id, self.data)
opcode = socket.CAN_BCM_TX_SEND
flags = 0
count = 0
ival1_seconds = ival1_usec = ival2_seconds = ival2_usec = 0
bcm_can_id = 0x0222
nframes = 1
assert len(self.cf) == 16
header = struct.pack(self.bcm_cmd_msg_fmt,
opcode,
flags,
count,
ival1_seconds,
ival1_usec,
ival2_seconds,
ival2_usec,
bcm_can_id,
nframes,
)
header_plus_frame = header + self.cf
bytes_sent = bcm.send(header_plus_frame)
self.assertEqual(bytes_sent, len(header_plus_frame))
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class BasicRDSTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_RDS
socket.PF_RDS
def testCreateSocket(self):
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
pass
def testSocketBufferSize(self):
bufsize = 16384
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, bufsize)
s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, bufsize)
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
@unittest.skipUnless(thread, 'Threading required for this test.')
class RDSTest(ThreadedRDSSocketTest):
def __init__(self, methodName='runTest'):
ThreadedRDSSocketTest.__init__(self, methodName=methodName)
def setUp(self):
super().setUp()
self.evt = threading.Event()
def testSendAndRecv(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
self.assertEqual(self.cli_addr, addr)
def _testSendAndRecv(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testPeek(self):
data, addr = self.serv.recvfrom(self.bufsize, socket.MSG_PEEK)
self.assertEqual(self.data, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testPeek(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
@requireAttrs(socket.socket, 'recvmsg')
def testSendAndRecvMsg(self):
data, ancdata, msg_flags, addr = self.serv.recvmsg(self.bufsize)
self.assertEqual(self.data, data)
@requireAttrs(socket.socket, 'sendmsg')
def _testSendAndRecvMsg(self):
self.data = b'hello ' * 10
self.cli.sendmsg([self.data], (), 0, (HOST, self.port))
def testSendAndRecvMulti(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data1, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data2, data)
def _testSendAndRecvMulti(self):
self.data1 = b'bacon'
self.cli.sendto(self.data1, 0, (HOST, self.port))
self.data2 = b'egg'
self.cli.sendto(self.data2, 0, (HOST, self.port))
def testSelect(self):
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testSelect(self):
self.data = b'select'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testCongestion(self):
# wait until the sender is done
self.evt.wait()
def _testCongestion(self):
# test the behavior in case of congestion
self.data = b'fill'
self.cli.setblocking(False)
try:
# try to lower the receiver's socket buffer size
self.cli.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 16384)
except OSError:
pass
with self.assertRaises(OSError) as cm:
try:
# fill the receiver's socket buffer
while True:
self.cli.sendto(self.data, 0, (HOST, self.port))
finally:
# signal the receiver we're done
self.evt.set()
# sendto() should have failed with ENOBUFS
self.assertEqual(cm.exception.errno, errno.ENOBUFS)
# and we should have received a congestion notification through poll
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicTCPTest(SocketConnectedTest):
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecv(self):
# Testing large receive over TCP
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.serv_conn.send(MSG)
def testOverFlowRecv(self):
# Testing receive in chunks over TCP
seg1 = self.cli_conn.recv(len(MSG) - 3)
seg2 = self.cli_conn.recv(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecv(self):
self.serv_conn.send(MSG)
def testRecvFrom(self):
# Testing large recvfrom() over TCP
msg, addr = self.cli_conn.recvfrom(1024)
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.serv_conn.send(MSG)
def testOverFlowRecvFrom(self):
# Testing recvfrom() in chunks over TCP
seg1, addr = self.cli_conn.recvfrom(len(MSG)-3)
seg2, addr = self.cli_conn.recvfrom(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecvFrom(self):
self.serv_conn.send(MSG)
def testSendAll(self):
# Testing sendall() with a 2048 byte string over TCP
msg = b''
while 1:
read = self.cli_conn.recv(1024)
if not read:
break
msg += read
self.assertEqual(msg, b'f' * 2048)
def _testSendAll(self):
big_chunk = b'f' * 2048
self.serv_conn.sendall(big_chunk)
def testFromFd(self):
# Testing fromfd()
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
self.assertIsInstance(sock, socket.socket)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testFromFd(self):
self.serv_conn.send(MSG)
def testDup(self):
# Testing dup()
sock = self.cli_conn.dup()
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDup(self):
self.serv_conn.send(MSG)
def testShutdown(self):
# Testing shutdown()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
# wait for _testShutdown to finish: on OS X, when the server
# closes the connection the client also becomes disconnected,
# and the client's shutdown call will fail. (Issue #4397.)
self.done.wait()
def _testShutdown(self):
self.serv_conn.send(MSG)
self.serv_conn.shutdown(2)
testShutdown_overflow = support.cpython_only(testShutdown)
@support.cpython_only
def _testShutdown_overflow(self):
import _testcapi
self.serv_conn.send(MSG)
# Issue 15989
self.assertRaises(OverflowError, self.serv_conn.shutdown,
_testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, self.serv_conn.shutdown,
2 + (_testcapi.UINT_MAX + 1))
self.serv_conn.shutdown(2)
def testDetach(self):
# Testing detach()
fileno = self.cli_conn.fileno()
f = self.cli_conn.detach()
self.assertEqual(f, fileno)
# cli_conn cannot be used anymore...
self.assertTrue(self.cli_conn._closed)
self.assertRaises(OSError, self.cli_conn.recv, 1024)
self.cli_conn.close()
# ...but we can create another socket using the (still open)
# file descriptor
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=f)
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDetach(self):
self.serv_conn.send(MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicUDPTest(ThreadedUDPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPSocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDP
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDP
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
# Tests for the sendmsg()/recvmsg() interface. Where possible, the
# same test code is used with different families and types of socket
# (e.g. stream, datagram), and tests using recvmsg() are repeated
# using recvmsg_into().
#
# The generic test classes such as SendmsgTests and
# RecvmsgGenericTests inherit from SendrecvmsgBase and expect to be
# supplied with sockets cli_sock and serv_sock representing the
# client's and the server's end of the connection respectively, and
# attributes cli_addr and serv_addr holding their (numeric where
# appropriate) addresses.
#
# The final concrete test classes combine these with subclasses of
# SocketTestBase which set up client and server sockets of a specific
# type, and with subclasses of SendrecvmsgBase such as
# SendrecvmsgDgramBase and SendrecvmsgConnectedBase which map these
# sockets to cli_sock and serv_sock and override the methods and
# attributes of SendrecvmsgBase to fill in destination addresses if
# needed when sending, check for specific flags in msg_flags, etc.
#
# RecvmsgIntoMixin provides a version of doRecvmsg() implemented using
# recvmsg_into().
# XXX: like the other datagram (UDP) tests in this module, the code
# here assumes that datagram delivery on the local machine will be
# reliable.
class SendrecvmsgBase(ThreadSafeCleanupTestCase):
# Base class for sendmsg()/recvmsg() tests.
# Time in seconds to wait before considering a test failed, or
# None for no timeout. Not all tests actually set a timeout.
fail_timeout = 3.0
def setUp(self):
self.misc_event = threading.Event()
super().setUp()
def sendToServer(self, msg):
# Send msg to the server.
return self.cli_sock.send(msg)
# Tuple of alternative default arguments for sendmsg() when called
# via sendmsgToServer() (e.g. to include a destination address).
sendmsg_to_server_defaults = ()
def sendmsgToServer(self, *args):
# Call sendmsg() on self.cli_sock with the given arguments,
# filling in any arguments which are not supplied with the
# corresponding items of self.sendmsg_to_server_defaults, if
# any.
return self.cli_sock.sendmsg(
*(args + self.sendmsg_to_server_defaults[len(args):]))
def doRecvmsg(self, sock, bufsize, *args):
# Call recvmsg() on sock with given arguments and return its
# result. Should be used for tests which can use either
# recvmsg() or recvmsg_into() - RecvmsgIntoMixin overrides
# this method with one which emulates it using recvmsg_into(),
# thus allowing the same test to be used for both methods.
result = sock.recvmsg(bufsize, *args)
self.registerRecvmsgResult(result)
return result
def registerRecvmsgResult(self, result):
# Called by doRecvmsg() with the return value of recvmsg() or
# recvmsg_into(). Can be overridden to arrange cleanup based
# on the returned ancillary data, for instance.
pass
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer.
self.assertEqual(addr1, addr2)
# Flags that are normally unset in msg_flags
msg_flags_common_unset = 0
for name in ("MSG_CTRUNC", "MSG_OOB"):
msg_flags_common_unset |= getattr(socket, name, 0)
# Flags that are normally set
msg_flags_common_set = 0
# Flags set when a complete record has been received (e.g. MSG_EOR
# for SCTP)
msg_flags_eor_indicator = 0
# Flags set when a complete record has not been received
# (e.g. MSG_TRUNC for datagram sockets)
msg_flags_non_eor_indicator = 0
def checkFlags(self, flags, eor=None, checkset=0, checkunset=0, ignore=0):
# Method to check the value of msg_flags returned by recvmsg[_into]().
#
# Checks that all bits in msg_flags_common_set attribute are
# set in "flags" and all bits in msg_flags_common_unset are
# unset.
#
# The "eor" argument specifies whether the flags should
# indicate that a full record (or datagram) has been received.
# If "eor" is None, no checks are done; otherwise, checks
# that:
#
# * if "eor" is true, all bits in msg_flags_eor_indicator are
# set and all bits in msg_flags_non_eor_indicator are unset
#
# * if "eor" is false, all bits in msg_flags_non_eor_indicator
# are set and all bits in msg_flags_eor_indicator are unset
#
# If "checkset" and/or "checkunset" are supplied, they require
# the given bits to be set or unset respectively, overriding
# what the attributes require for those bits.
#
# If any bits are set in "ignore", they will not be checked,
# regardless of the other inputs.
#
# Will raise Exception if the inputs require a bit to be both
# set and unset, and it is not ignored.
defaultset = self.msg_flags_common_set
defaultunset = self.msg_flags_common_unset
if eor:
defaultset |= self.msg_flags_eor_indicator
defaultunset |= self.msg_flags_non_eor_indicator
elif eor is not None:
defaultset |= self.msg_flags_non_eor_indicator
defaultunset |= self.msg_flags_eor_indicator
# Function arguments override defaults
defaultset &= ~checkunset
defaultunset &= ~checkset
# Merge arguments with remaining defaults, and check for conflicts
checkset |= defaultset
checkunset |= defaultunset
inboth = checkset & checkunset & ~ignore
if inboth:
raise Exception("contradictory set, unset requirements for flags "
"{0:#x}".format(inboth))
# Compare with given msg_flags value
mask = (checkset | checkunset) & ~ignore
self.assertEqual(flags & mask, checkset & mask)
class RecvmsgIntoMixin(SendrecvmsgBase):
# Mixin to implement doRecvmsg() using recvmsg_into().
def doRecvmsg(self, sock, bufsize, *args):
buf = bytearray(bufsize)
result = sock.recvmsg_into([buf], *args)
self.registerRecvmsgResult(result)
self.assertGreaterEqual(result[0], 0)
self.assertLessEqual(result[0], bufsize)
return (bytes(buf[:result[0]]),) + result[1:]
class SendrecvmsgDgramFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for datagram sockets.
@property
def msg_flags_non_eor_indicator(self):
return super().msg_flags_non_eor_indicator | socket.MSG_TRUNC
class SendrecvmsgSCTPFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for SCTP sockets.
@property
def msg_flags_eor_indicator(self):
return super().msg_flags_eor_indicator | socket.MSG_EOR
class SendrecvmsgConnectionlessBase(SendrecvmsgBase):
# Base class for tests on connectionless-mode sockets. Users must
# supply sockets on attributes cli and serv to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.serv
@property
def cli_sock(self):
return self.cli
@property
def sendmsg_to_server_defaults(self):
return ([], [], 0, self.serv_addr)
def sendToServer(self, msg):
return self.cli_sock.sendto(msg, self.serv_addr)
class SendrecvmsgConnectedBase(SendrecvmsgBase):
# Base class for tests on connected sockets. Users must supply
# sockets on attributes serv_conn and cli_conn (representing the
# connections *to* the server and the client), to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.cli_conn
@property
def cli_sock(self):
return self.serv_conn
def checkRecvmsgAddress(self, addr1, addr2):
# Address is currently "unspecified" for a connected socket,
# so we don't examine it
pass
class SendrecvmsgServerTimeoutBase(SendrecvmsgBase):
# Base class to set a timeout on server's socket.
def setUp(self):
super().setUp()
self.serv_sock.settimeout(self.fail_timeout)
class SendmsgTests(SendrecvmsgServerTimeoutBase):
# Tests for sendmsg() which can use any socket type and do not
# involve recvmsg() or recvmsg_into().
def testSendmsg(self):
# Send a simple message with sendmsg().
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG]), len(MSG))
def testSendmsgDataGenerator(self):
# Send from buffer obtained from a generator (not a sequence).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgDataGenerator(self):
self.assertEqual(self.sendmsgToServer((o for o in [MSG])),
len(MSG))
def testSendmsgAncillaryGenerator(self):
# Gather (empty) ancillary data from a generator.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgAncillaryGenerator(self):
self.assertEqual(self.sendmsgToServer([MSG], (o for o in [])),
len(MSG))
def testSendmsgArray(self):
# Send data from an array instead of the usual bytes object.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgArray(self):
self.assertEqual(self.sendmsgToServer([array.array("B", MSG)]),
len(MSG))
def testSendmsgGather(self):
# Send message data from more than one buffer (gather write).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgGather(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
def testSendmsgBadArgs(self):
# Check that sendmsg() rejects invalid arguments.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadArgs(self):
self.assertRaises(TypeError, self.cli_sock.sendmsg)
self.assertRaises(TypeError, self.sendmsgToServer,
b"not in an iterable")
self.assertRaises(TypeError, self.sendmsgToServer,
object())
self.assertRaises(TypeError, self.sendmsgToServer,
[object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG, object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], 0, object())
self.sendToServer(b"done")
def testSendmsgBadCmsg(self):
# Check that invalid ancillary data items are rejected.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(object(), 0, b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, object(), b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, object())])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0)])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b"data", 42)])
self.sendToServer(b"done")
@requireAttrs(socket, "CMSG_SPACE")
def testSendmsgBadMultiCmsg(self):
# Check that invalid ancillary data items are rejected when
# more than one item is present.
self.assertEqual(self.serv_sock.recv(1000), b"done")
@testSendmsgBadMultiCmsg.client_skip
def _testSendmsgBadMultiCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [0, 0, b""])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b""), object()])
self.sendToServer(b"done")
def testSendmsgExcessCmsgReject(self):
# Check that sendmsg() rejects excess ancillary data items
# when the number that can be sent is limited.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgExcessCmsgReject(self):
if not hasattr(socket, "CMSG_SPACE"):
# Can only send one item
with self.assertRaises(OSError) as cm:
self.sendmsgToServer([MSG], [(0, 0, b""), (0, 0, b"")])
self.assertIsNone(cm.exception.errno)
self.sendToServer(b"done")
def testSendmsgAfterClose(self):
# Check that sendmsg() fails on a closed socket.
pass
def _testSendmsgAfterClose(self):
self.cli_sock.close()
self.assertRaises(OSError, self.sendmsgToServer, [MSG])
class SendmsgStreamTests(SendmsgTests):
# Tests for sendmsg() which require a stream socket and do not
# involve recvmsg() or recvmsg_into().
def testSendmsgExplicitNoneAddr(self):
# Check that peer address can be specified as None.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgExplicitNoneAddr(self):
self.assertEqual(self.sendmsgToServer([MSG], [], 0, None), len(MSG))
def testSendmsgTimeout(self):
# Check that timeout works with sendmsg().
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
def _testSendmsgTimeout(self):
try:
self.cli_sock.settimeout(0.03)
with self.assertRaises(socket.timeout):
while True:
self.sendmsgToServer([b"a"*512])
finally:
self.misc_event.set()
# XXX: would be nice to have more tests for sendmsg flags argument.
# Linux supports MSG_DONTWAIT when sending, but in general, it
# only works when receiving. Could add other platforms if they
# support it too.
@skipWithClientIf(sys.platform not in {"linux"},
"MSG_DONTWAIT not known to work on this platform when "
"sending")
def testSendmsgDontWait(self):
# Check that MSG_DONTWAIT in flags causes non-blocking behaviour.
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@testSendmsgDontWait.client_skip
def _testSendmsgDontWait(self):
try:
with self.assertRaises(OSError) as cm:
while True:
self.sendmsgToServer([b"a"*512], [], socket.MSG_DONTWAIT)
self.assertIn(cm.exception.errno,
(errno.EAGAIN, errno.EWOULDBLOCK))
finally:
self.misc_event.set()
class SendmsgConnectionlessTests(SendmsgTests):
# Tests for sendmsg() which require a connectionless-mode
# (e.g. datagram) socket, and do not involve recvmsg() or
# recvmsg_into().
def testSendmsgNoDestAddr(self):
# Check that sendmsg() fails when no destination address is
# given for unconnected socket.
pass
def _testSendmsgNoDestAddr(self):
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG])
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG], [], 0, None)
class RecvmsgGenericTests(SendrecvmsgBase):
# Tests for recvmsg() which can also be emulated using
# recvmsg_into(), and can use any socket type.
def testRecvmsg(self):
# Receive a simple message with recvmsg[_into]().
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsg(self):
self.sendToServer(MSG)
def testRecvmsgExplicitDefaults(self):
# Test recvmsg[_into]() with default arguments provided explicitly.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgExplicitDefaults(self):
self.sendToServer(MSG)
def testRecvmsgShorter(self):
# Receive a message smaller than buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) + 42)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShorter(self):
self.sendToServer(MSG)
# FreeBSD < 8 doesn't always set the MSG_TRUNC flag when a truncated
# datagram is received (issue #13001).
@support.requires_freebsd_version(8)
def testRecvmsgTrunc(self):
# Receive part of message, check for truncation indicators.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
@support.requires_freebsd_version(8)
def _testRecvmsgTrunc(self):
self.sendToServer(MSG)
def testRecvmsgShortAncillaryBuf(self):
# Test ancillary data buffer too small to hold any ancillary data.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShortAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgLongAncillaryBuf(self):
# Test large ancillary data buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgLongAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgAfterClose(self):
# Check that recvmsg[_into]() fails on a closed socket.
self.serv_sock.close()
self.assertRaises(OSError, self.doRecvmsg, self.serv_sock, 1024)
def _testRecvmsgAfterClose(self):
pass
def testRecvmsgTimeout(self):
# Check that timeout works.
try:
self.serv_sock.settimeout(0.03)
self.assertRaises(socket.timeout,
self.doRecvmsg, self.serv_sock, len(MSG))
finally:
self.misc_event.set()
def _testRecvmsgTimeout(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@requireAttrs(socket, "MSG_PEEK")
def testRecvmsgPeek(self):
# Check that MSG_PEEK in flags enables examination of pending
# data without consuming it.
# Receive part of data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3, 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
# Ignoring MSG_TRUNC here (so this test is the same for stream
# and datagram sockets). Some wording in POSIX seems to
# suggest that it needn't be set when peeking, but that may
# just be a slip.
self.checkFlags(flags, eor=False,
ignore=getattr(socket, "MSG_TRUNC", 0))
# Receive all data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
# Check that the same data can still be received normally.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgPeek.client_skip
def _testRecvmsgPeek(self):
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
def testRecvmsgFromSendmsg(self):
# Test receiving with recvmsg[_into]() when message is sent
# using sendmsg().
self.serv_sock.settimeout(self.fail_timeout)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgFromSendmsg.client_skip
def _testRecvmsgFromSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
class RecvmsgGenericStreamTests(RecvmsgGenericTests):
# Tests which require a stream socket and can use either recvmsg()
# or recvmsg_into().
def testRecvmsgEOF(self):
# Receive end-of-stream indicator (b"", peer socket closed).
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.assertEqual(msg, b"")
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=None) # Might not have end-of-record marker
def _testRecvmsgEOF(self):
self.cli_sock.close()
def testRecvmsgOverflow(self):
# Receive a message in more than one chunk.
seg1, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
seg2, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testRecvmsgOverflow(self):
self.sendToServer(MSG)
class RecvmsgTests(RecvmsgGenericTests):
# Tests for recvmsg() which can use any socket type.
def testRecvmsgBadArgs(self):
# Check that recvmsg() rejects invalid arguments.
self.assertRaises(TypeError, self.serv_sock.recvmsg)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
-1, 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
len(MSG), -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
[bytearray(10)], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
object(), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), 0, object())
msg, ancdata, flags, addr = self.serv_sock.recvmsg(len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgBadArgs(self):
self.sendToServer(MSG)
class RecvmsgIntoTests(RecvmsgIntoMixin, RecvmsgGenericTests):
# Tests for recvmsg_into() which can use any socket type.
def testRecvmsgIntoBadArgs(self):
# Check that recvmsg_into() rejects invalid arguments.
buf = bytearray(len(MSG))
self.assertRaises(TypeError, self.serv_sock.recvmsg_into)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
len(MSG), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
buf, 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[object()], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[b"I'm not writable"], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf, object()], 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg_into,
[buf], -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], 0, object())
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf], 0, 0)
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoBadArgs(self):
self.sendToServer(MSG)
def testRecvmsgIntoGenerator(self):
# Receive into buffer obtained from a generator (not a sequence).
buf = bytearray(len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
(o for o in [buf]))
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoGenerator(self):
self.sendToServer(MSG)
def testRecvmsgIntoArray(self):
# Receive into an array rather than the usual bytearray.
buf = array.array("B", [0] * len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf])
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf.tobytes(), MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoArray(self):
self.sendToServer(MSG)
def testRecvmsgIntoScatter(self):
# Receive into multiple buffers (scatter write).
b1 = bytearray(b"----")
b2 = bytearray(b"0123456789")
b3 = bytearray(b"--------------")
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
[b1, memoryview(b2)[2:9], b3])
self.assertEqual(nbytes, len(b"Mary had a little lamb"))
self.assertEqual(b1, bytearray(b"Mary"))
self.assertEqual(b2, bytearray(b"01 had a 9"))
self.assertEqual(b3, bytearray(b"little lamb---"))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoScatter(self):
self.sendToServer(b"Mary had a little lamb")
class CmsgMacroTests(unittest.TestCase):
# Test the functions CMSG_LEN() and CMSG_SPACE(). Tests
# assumptions used by sendmsg() and recvmsg[_into](), which share
# code with these functions.
# Match the definition in socketmodule.c
try:
import _testcapi
except ImportError:
socklen_t_limit = 0x7fffffff
else:
socklen_t_limit = min(0x7fffffff, _testcapi.INT_MAX)
@requireAttrs(socket, "CMSG_LEN")
def testCMSG_LEN(self):
# Test CMSG_LEN() with various valid and invalid values,
# checking the assumptions used by recvmsg() and sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_LEN(0) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(socket.CMSG_LEN(0), array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_LEN(n)
# This is how recvmsg() calculates the data size
self.assertEqual(ret - socket.CMSG_LEN(0), n)
self.assertLessEqual(ret, self.socklen_t_limit)
self.assertRaises(OverflowError, socket.CMSG_LEN, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_LEN, toobig)
self.assertRaises(OverflowError, socket.CMSG_LEN, sys.maxsize)
@requireAttrs(socket, "CMSG_SPACE")
def testCMSG_SPACE(self):
# Test CMSG_SPACE() with various valid and invalid values,
# checking the assumptions used by sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_SPACE(1) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
last = socket.CMSG_SPACE(0)
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(last, array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_SPACE(n)
self.assertGreaterEqual(ret, last)
self.assertGreaterEqual(ret, socket.CMSG_LEN(n))
self.assertGreaterEqual(ret, n + socket.CMSG_LEN(0))
self.assertLessEqual(ret, self.socklen_t_limit)
last = ret
self.assertRaises(OverflowError, socket.CMSG_SPACE, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_SPACE, toobig)
self.assertRaises(OverflowError, socket.CMSG_SPACE, sys.maxsize)
class SCMRightsTest(SendrecvmsgServerTimeoutBase):
# Tests for file descriptor passing on Unix-domain sockets.
# Invalid file descriptor value that's unlikely to evaluate to a
# real FD even if one of its bytes is replaced with a different
# value (which shouldn't actually happen).
badfd = -0x5555
def newFDs(self, n):
# Return a list of n file descriptors for newly-created files
# containing their list indices as ASCII numbers.
fds = []
for i in range(n):
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
self.addCleanup(os.close, fd)
os.write(fd, str(i).encode())
fds.append(fd)
return fds
def checkFDs(self, fds):
# Check that the file descriptors in the given list contain
# their correct list indices as ASCII numbers.
for n, fd in enumerate(fds):
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(os.read(fd, 1024), str(n).encode())
def registerRecvmsgResult(self, result):
self.addCleanup(self.closeRecvmsgFDs, result)
def closeRecvmsgFDs(self, recvmsg_result):
# Close all file descriptors specified in the ancillary data
# of the given return value from recvmsg() or recvmsg_into().
for cmsg_level, cmsg_type, cmsg_data in recvmsg_result[1]:
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
for fd in fds:
os.close(fd)
def createAndSendFDs(self, n):
# Send n new file descriptors created by newFDs() to the
# server, with the constant MSG as the non-ancillary data.
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(n)))]),
len(MSG))
def checkRecvmsgFDs(self, numfds, result, maxcmsgs=1, ignoreflags=0):
# Check that constant MSG was received with numfds file
# descriptors in a maximum of maxcmsgs control messages (which
# must contain only complete integers). By default, check
# that MSG_CTRUNC is unset, but ignore any flags in
# ignoreflags.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertIsInstance(ancdata, list)
self.assertLessEqual(len(ancdata), maxcmsgs)
fds = array.array("i")
for item in ancdata:
self.assertIsInstance(item, tuple)
cmsg_level, cmsg_type, cmsg_data = item
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data) % SIZEOF_INT, 0)
fds.frombytes(cmsg_data)
self.assertEqual(len(fds), numfds)
self.checkFDs(fds)
def testFDPassSimple(self):
# Pass a single FD (array read from bytes object).
self.checkRecvmsgFDs(1, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testFDPassSimple(self):
self.assertEqual(
self.sendmsgToServer(
[MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(1)).tobytes())]),
len(MSG))
def testMultipleFDPass(self):
# Pass multiple FDs in a single array.
self.checkRecvmsgFDs(4, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testMultipleFDPass(self):
self.createAndSendFDs(4)
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassCMSG_SPACE(self):
# Test using CMSG_SPACE() to calculate ancillary buffer size.
self.checkRecvmsgFDs(
4, self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(4 * SIZEOF_INT)))
@testFDPassCMSG_SPACE.client_skip
def _testFDPassCMSG_SPACE(self):
self.createAndSendFDs(4)
def testFDPassCMSG_LEN(self):
# Test using CMSG_LEN() to calculate ancillary buffer size.
self.checkRecvmsgFDs(1,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(4 * SIZEOF_INT)),
# RFC 3542 says implementations may set
# MSG_CTRUNC if there isn't enough space
# for trailing padding.
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassCMSG_LEN(self):
self.createAndSendFDs(1)
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparate(self):
# Pass two FDs in two separate arrays. Arrays may be combined
# into a single control message by the OS.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG), 10240),
maxcmsgs=2)
@testFDPassSeparate.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
def _testFDPassSeparate(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparateMinSpace(self):
# Pass two FDs in two separate arrays, receiving them into the
# minimum space for two arrays.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(SIZEOF_INT)),
maxcmsgs=2, ignoreflags=socket.MSG_CTRUNC)
@testFDPassSeparateMinSpace.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
def _testFDPassSeparateMinSpace(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
def sendAncillaryIfPossible(self, msg, ancdata):
# Try to send msg and ancdata to server, but if the system
# call fails, just send msg with no ancillary data.
try:
nbytes = self.sendmsgToServer([msg], ancdata)
except OSError as e:
# Check that it was the system call that failed
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer([msg])
self.assertEqual(nbytes, len(msg))
@unittest.skipIf(sys.platform == "darwin", "see issue #24725")
def testFDPassEmpty(self):
# Try to pass an empty FD array. Can receive either no array
# or an empty array.
self.checkRecvmsgFDs(0, self.doRecvmsg(self.serv_sock,
len(MSG), 10240),
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassEmpty(self):
self.sendAncillaryIfPossible(MSG, [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
b"")])
def testFDPassPartialInt(self):
# Try to pass a truncated FD array.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertLess(len(cmsg_data), SIZEOF_INT)
def _testFDPassPartialInt(self):
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [self.badfd]).tobytes()[:-1])])
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassPartialIntInMiddle(self):
# Try to pass two FD arrays, the first of which is truncated.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 2)
fds = array.array("i")
# Arrays may have been combined in a single control message
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.assertLessEqual(len(fds), 2)
self.checkFDs(fds)
@testFDPassPartialIntInMiddle.client_skip
def _testFDPassPartialIntInMiddle(self):
fd0, fd1 = self.newFDs(2)
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0, self.badfd]).tobytes()[:-1]),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))])
def checkTruncatedHeader(self, result, ignoreflags=0):
# Check that no ancillary data items are returned when data is
# truncated inside the cmsghdr structure.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no buffer size
# is specified.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG)),
# BSD seems to set MSG_CTRUNC only
# if an item has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTruncNoBufSize(self):
self.createAndSendFDs(1)
def testCmsgTrunc0(self):
# Check that no ancillary data is received when buffer size is 0.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 0),
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTrunc0(self):
self.createAndSendFDs(1)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
def testCmsgTrunc1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 1))
def _testCmsgTrunc1(self):
self.createAndSendFDs(1)
def testCmsgTrunc2Int(self):
# The cmsghdr structure has at least three members, two of
# which are ints, so we still shouldn't see any ancillary
# data.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
SIZEOF_INT * 2))
def _testCmsgTrunc2Int(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Minus1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(0) - 1))
def _testCmsgTruncLen0Minus1(self):
self.createAndSendFDs(1)
# The following tests try to truncate the control message in the
# middle of the FD array.
def checkTruncatedArray(self, ancbuf, maxdata, mindata=0):
# Check that file descriptor data is truncated to between
# mindata and maxdata bytes when received with buffer size
# ancbuf, and that any complete file descriptor numbers are
# valid.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbuf)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
if mindata == 0 and ancdata == []:
return
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertGreaterEqual(len(cmsg_data), mindata)
self.assertLessEqual(len(cmsg_data), maxdata)
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.checkFDs(fds)
def testCmsgTruncLen0(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0), maxdata=0)
def _testCmsgTruncLen0(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Plus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0) + 1, maxdata=1)
def _testCmsgTruncLen0Plus1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(SIZEOF_INT),
maxdata=SIZEOF_INT)
def _testCmsgTruncLen1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen2Minus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(2 * SIZEOF_INT) - 1,
maxdata=(2 * SIZEOF_INT) - 1)
def _testCmsgTruncLen2Minus1(self):
self.createAndSendFDs(2)
class RFC3542AncillaryTest(SendrecvmsgServerTimeoutBase):
# Test sendmsg() and recvmsg[_into]() using the ancillary data
# features of the RFC 3542 Advanced Sockets API for IPv6.
# Currently we can only handle certain data items (e.g. traffic
# class, hop limit, MTU discovery and fragmentation settings)
# without resorting to unportable means such as the struct module,
# but the tests here are aimed at testing the ancillary data
# handling in sendmsg() and recvmsg() rather than the IPv6 API
# itself.
# Test value to use when setting hop limit of packet
hop_limit = 2
# Test value to use when setting traffic class of packet.
# -1 means "use kernel default".
traffic_class = -1
def ancillaryMapping(self, ancdata):
# Given ancillary data list ancdata, return a mapping from
# pairs (cmsg_level, cmsg_type) to corresponding cmsg_data.
# Check that no (level, type) pair appears more than once.
d = {}
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertNotIn((cmsg_level, cmsg_type), d)
d[(cmsg_level, cmsg_type)] = cmsg_data
return d
def checkHopLimit(self, ancbufsize, maxhop=255, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space. Check that data is MSG, ancillary data is not
# truncated (but ignore any flags in ignoreflags), and hop
# limit is between 0 and maxhop inclusive.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
self.assertIsInstance(ancdata[0], tuple)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimit(self):
# Test receiving the packet hop limit as ancillary data.
self.checkHopLimit(ancbufsize=10240)
@testRecvHopLimit.client_skip
def _testRecvHopLimit(self):
# Need to wait until server has asked to receive ancillary
# data, as implementations are not required to buffer it
# otherwise.
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimitCMSG_SPACE(self):
# Test receiving hop limit, using CMSG_SPACE to calculate buffer size.
self.checkHopLimit(ancbufsize=socket.CMSG_SPACE(SIZEOF_INT))
@testRecvHopLimitCMSG_SPACE.client_skip
def _testRecvHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Could test receiving into buffer sized using CMSG_LEN, but RFC
# 3542 says portable applications must provide space for trailing
# padding. Implementations may set MSG_CTRUNC if there isn't
# enough space for the padding.
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSetHopLimit(self):
# Test setting hop limit on outgoing packet and receiving it
# at the other end.
self.checkHopLimit(ancbufsize=10240, maxhop=self.hop_limit)
@testSetHopLimit.client_skip
def _testSetHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
def checkTrafficClassAndHopLimit(self, ancbufsize, maxhop=255,
ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space. Check that data is MSG, ancillary
# data is not truncated (but ignore any flags in ignoreflags),
# and traffic class and hop limit are in range (hop limit no
# more than maxhop).
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 2)
ancmap = self.ancillaryMapping(ancdata)
tcdata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS)]
self.assertEqual(len(tcdata), SIZEOF_INT)
a = array.array("i")
a.frombytes(tcdata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
hldata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT)]
self.assertEqual(len(hldata), SIZEOF_INT)
a = array.array("i")
a.frombytes(hldata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimit(self):
# Test receiving traffic class and hop limit as ancillary data.
self.checkTrafficClassAndHopLimit(ancbufsize=10240)
@testRecvTrafficClassAndHopLimit.client_skip
def _testRecvTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
# Test receiving traffic class and hop limit, using
# CMSG_SPACE() to calculate buffer size.
self.checkTrafficClassAndHopLimit(
ancbufsize=socket.CMSG_SPACE(SIZEOF_INT) * 2)
@testRecvTrafficClassAndHopLimitCMSG_SPACE.client_skip
def _testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSetTrafficClassAndHopLimit(self):
# Test setting traffic class and hop limit on outgoing packet,
# and receiving them at the other end.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testSetTrafficClassAndHopLimit.client_skip
def _testSetTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testOddCmsgSize(self):
# Try to send ancillary data with first item one byte too
# long. Fall back to sending with correct size if this fails,
# and check that second item was handled correctly.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testOddCmsgSize.client_skip
def _testOddCmsgSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
try:
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class]).tobytes() + b"\x00"),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
except OSError as e:
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
self.assertEqual(nbytes, len(MSG))
# Tests for proper handling of truncated ancillary data
def checkHopLimitTruncatedHeader(self, ancbufsize, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space, which should be too small to contain the ancillary
# data header (if ancbufsize is None, pass no second argument
# to recvmsg()). Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and no ancillary data is
# returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
args = () if ancbufsize is None else (ancbufsize,)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), *args)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no ancillary
# buffer size is provided.
self.checkHopLimitTruncatedHeader(ancbufsize=None,
# BSD seems to set
# MSG_CTRUNC only if an item
# has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
@testCmsgTruncNoBufSize.client_skip
def _testCmsgTruncNoBufSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc0(self):
# Check that no ancillary data is received when ancillary
# buffer size is zero.
self.checkHopLimitTruncatedHeader(ancbufsize=0,
ignoreflags=socket.MSG_CTRUNC)
@testSingleCmsgTrunc0.client_skip
def _testSingleCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=1)
@testSingleCmsgTrunc1.client_skip
def _testSingleCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc2Int(self):
self.checkHopLimitTruncatedHeader(ancbufsize=2 * SIZEOF_INT)
@testSingleCmsgTrunc2Int.client_skip
def _testSingleCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncLen0Minus1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=socket.CMSG_LEN(0) - 1)
@testSingleCmsgTruncLen0Minus1.client_skip
def _testSingleCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncInData(self):
# Test truncation of a control message inside its associated
# data. The message may be returned with its data truncated,
# or not returned at all.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG), socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertLess(len(cmsg_data), SIZEOF_INT)
@testSingleCmsgTruncInData.client_skip
def _testSingleCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
def checkTruncatedSecondHeader(self, ancbufsize, ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space, which should be large enough to
# contain the first item, but too small to contain the header
# of the second. Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and only one ancillary
# data item is returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertIn(cmsg_type, {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT})
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
# Try the above test with various buffer sizes.
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc0(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT),
ignoreflags=socket.MSG_CTRUNC)
@testSecondCmsgTrunc0.client_skip
def _testSecondCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) + 1)
@testSecondCmsgTrunc1.client_skip
def _testSecondCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc2Int(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
2 * SIZEOF_INT)
@testSecondCmsgTrunc2Int.client_skip
def _testSecondCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTruncLen0Minus1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(0) - 1)
@testSecondCmsgTruncLen0Minus1.client_skip
def _testSecondCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecomdCmsgTruncInData(self):
# Test truncation of the second of two control messages inside
# its associated data.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) + socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
cmsg_types = {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT}
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertLess(len(cmsg_data), SIZEOF_INT)
self.assertEqual(ancdata, [])
@testSecomdCmsgTruncInData.client_skip
def _testSecomdCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Derive concrete test classes for different socket types.
class SendrecvmsgUDPTestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgUDPTest(SendmsgConnectionlessTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgUDPTest(RecvmsgTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoUDPTest(RecvmsgIntoTests, SendrecvmsgUDPTestBase):
pass
class SendrecvmsgUDP6TestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDP6TestBase):
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer, ignoring scope ID
self.assertEqual(addr1[:-1], addr2[:-1])
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgUDP6Test(SendmsgConnectionlessTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgUDP6Test(RecvmsgTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoUDP6Test(RecvmsgIntoTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgRFC3542AncillaryUDP6Test(RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoRFC3542AncillaryUDP6Test(RecvmsgIntoMixin,
RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
class SendrecvmsgTCPTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, TCPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgTCPTest(SendmsgStreamTests, SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgTCPTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoTCPTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
class SendrecvmsgSCTPStreamTestBase(SendrecvmsgSCTPFlagsBase,
SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, SCTPStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgSCTPStreamTest(SendmsgStreamTests, SendrecvmsgSCTPStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgSCTPStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
@requireAttrs(socket.socket, "recvmsg_into")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoSCTPStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgIntoSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
class SendrecvmsgUnixStreamTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, UnixStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "AF_UNIX")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgUnixStreamTest(SendmsgStreamTests, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireAttrs(socket, "AF_UNIX")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgUnixStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@requireAttrs(socket, "AF_UNIX")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoUnixStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgSCMRightsStreamTest(SCMRightsTest, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg_into")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoSCMRightsStreamTest(RecvmsgIntoMixin, SCMRightsTest,
SendrecvmsgUnixStreamTestBase):
pass
# Test interrupting the interruptible send/receive methods with a
# signal when a timeout is set. These tests avoid having multiple
# threads alive during the test so that the OS cannot deliver the
# signal to the wrong one.
class InterruptedTimeoutBase(unittest.TestCase):
# Base class for interrupted send/receive tests. Installs an
# empty handler for SIGALRM and removes it on teardown, along with
# any scheduled alarms.
def setUp(self):
super().setUp()
orig_alrm_handler = signal.signal(signal.SIGALRM,
lambda signum, frame: 1 / 0)
self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler)
self.addCleanup(self.setAlarm, 0)
# Timeout for socket operations
timeout = 4.0
# Provide setAlarm() method to schedule delivery of SIGALRM after
# given number of seconds, or cancel it if zero, and an
# appropriate time value to use. Use setitimer() if available.
if hasattr(signal, "setitimer"):
alarm_time = 0.05
def setAlarm(self, seconds):
signal.setitimer(signal.ITIMER_REAL, seconds)
else:
# Old systems may deliver the alarm up to one second early
alarm_time = 2
def setAlarm(self, seconds):
signal.alarm(seconds)
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedRecvTimeoutTest(InterruptedTimeoutBase, UDPTestBase):
# Test interrupting the recv*() methods with signals when a
# timeout is set.
def setUp(self):
super().setUp()
self.serv.settimeout(self.timeout)
def checkInterruptedRecv(self, func, *args, **kwargs):
# Check that func(*args, **kwargs) raises
# errno of EINTR when interrupted by a signal.
self.setAlarm(self.alarm_time)
with self.assertRaises(ZeroDivisionError) as cm:
func(*args, **kwargs)
def testInterruptedRecvTimeout(self):
self.checkInterruptedRecv(self.serv.recv, 1024)
def testInterruptedRecvIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recv_into, bytearray(1024))
def testInterruptedRecvfromTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom, 1024)
def testInterruptedRecvfromIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom_into, bytearray(1024))
@requireAttrs(socket.socket, "recvmsg")
def testInterruptedRecvmsgTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg, 1024)
@requireAttrs(socket.socket, "recvmsg_into")
def testInterruptedRecvmsgIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg_into, [bytearray(1024)])
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
@unittest.skipUnless(thread, 'Threading required for this test.')
class InterruptedSendTimeoutTest(InterruptedTimeoutBase,
ThreadSafeCleanupTestCase,
SocketListeningTestMixin, TCPTestBase):
# Test interrupting the interruptible send*() methods with signals
# when a timeout is set.
def setUp(self):
super().setUp()
self.serv_conn = self.newSocket()
self.addCleanup(self.serv_conn.close)
# Use a thread to complete the connection, but wait for it to
# terminate before running the test, so that there is only one
# thread to accept the signal.
cli_thread = threading.Thread(target=self.doConnect)
cli_thread.start()
self.cli_conn, addr = self.serv.accept()
self.addCleanup(self.cli_conn.close)
cli_thread.join()
self.serv_conn.settimeout(self.timeout)
def doConnect(self):
self.serv_conn.connect(self.serv_addr)
def checkInterruptedSend(self, func, *args, **kwargs):
# Check that func(*args, **kwargs), run in a loop, raises
# OSError with an errno of EINTR when interrupted by a
# signal.
with self.assertRaises(ZeroDivisionError) as cm:
while True:
self.setAlarm(self.alarm_time)
func(*args, **kwargs)
# Issue #12958: The following tests have problems on OS X prior to 10.7
@support.requires_mac_ver(10, 7)
def testInterruptedSendTimeout(self):
self.checkInterruptedSend(self.serv_conn.send, b"a"*512)
@support.requires_mac_ver(10, 7)
def testInterruptedSendtoTimeout(self):
# Passing an actual address here as Python's wrapper for
# sendto() doesn't allow passing a zero-length one; POSIX
# requires that the address is ignored since the socket is
# connection-mode, however.
self.checkInterruptedSend(self.serv_conn.sendto, b"a"*512,
self.serv_addr)
@support.requires_mac_ver(10, 7)
@requireAttrs(socket.socket, "sendmsg")
def testInterruptedSendmsgTimeout(self):
self.checkInterruptedSend(self.serv_conn.sendmsg, [b"a"*512])
@unittest.skipUnless(thread, 'Threading required for this test.')
class TCPCloserTest(ThreadedTCPSocketTest):
def testClose(self):
conn, addr = self.serv.accept()
conn.close()
sd = self.cli
read, write, err = select.select([sd], [], [], 1.0)
self.assertEqual(read, [sd])
self.assertEqual(sd.recv(1), b'')
# Calling close() many times should be safe.
conn.close()
conn.close()
def _testClose(self):
self.cli.connect((HOST, self.port))
time.sleep(1.0)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicSocketPairTest(SocketPairTest):
def __init__(self, methodName='runTest'):
SocketPairTest.__init__(self, methodName=methodName)
def _check_defaults(self, sock):
self.assertIsInstance(sock, socket.socket)
if hasattr(socket, 'AF_UNIX'):
self.assertEqual(sock.family, socket.AF_UNIX)
else:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def _testDefaults(self):
self._check_defaults(self.cli)
def testDefaults(self):
self._check_defaults(self.serv)
def testRecv(self):
msg = self.serv.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.send(MSG)
def testSend(self):
self.serv.send(MSG)
def _testSend(self):
msg = self.cli.recv(1024)
self.assertEqual(msg, MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class NonBlockingTCPTests(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def testSetBlocking(self):
# Testing whether set blocking works
self.serv.setblocking(True)
self.assertIsNone(self.serv.gettimeout())
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
start = time.time()
try:
self.serv.accept()
except OSError:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error setting non-blocking mode.")
def _testSetBlocking(self):
pass
@support.cpython_only
def testSetBlocking_overflow(self):
# Issue 15989
import _testcapi
if _testcapi.UINT_MAX >= _testcapi.ULONG_MAX:
self.skipTest('needs UINT_MAX < ULONG_MAX')
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
self.serv.setblocking(_testcapi.UINT_MAX + 1)
self.assertIsNone(self.serv.gettimeout())
_testSetBlocking_overflow = support.cpython_only(_testSetBlocking)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'test needs socket.SOCK_NONBLOCK')
@support.requires_linux_version(2, 6, 28)
def testInitNonBlocking(self):
# reinit server socket
self.serv.close()
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM |
socket.SOCK_NONBLOCK)
self.port = support.bind_port(self.serv)
self.serv.listen()
# actual testing
start = time.time()
try:
self.serv.accept()
except OSError:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error creating with non-blocking mode.")
def _testInitNonBlocking(self):
pass
def testInheritFlags(self):
# Issue #7995: when calling accept() on a listening socket with a
# timeout, the resulting socket should not be non-blocking.
self.serv.settimeout(10)
try:
conn, addr = self.serv.accept()
message = conn.recv(len(MSG))
finally:
conn.close()
self.serv.settimeout(None)
def _testInheritFlags(self):
time.sleep(0.1)
self.cli.connect((HOST, self.port))
time.sleep(0.5)
self.cli.send(MSG)
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(0)
try:
conn, addr = self.serv.accept()
except OSError:
pass
else:
self.fail("Error trying to do non-blocking accept.")
read, write, err = select.select([self.serv], [], [])
if self.serv in read:
conn, addr = self.serv.accept()
self.assertIsNone(conn.gettimeout())
conn.close()
else:
self.fail("Error trying to do accept after select.")
def _testAccept(self):
time.sleep(0.1)
self.cli.connect((HOST, self.port))
def testConnect(self):
# Testing non-blocking connect
conn, addr = self.serv.accept()
conn.close()
def _testConnect(self):
self.cli.settimeout(10)
self.cli.connect((HOST, self.port))
def testRecv(self):
# Testing non-blocking recv
conn, addr = self.serv.accept()
conn.setblocking(0)
try:
msg = conn.recv(len(MSG))
except OSError:
pass
else:
self.fail("Error trying to do non-blocking recv.")
read, write, err = select.select([conn], [], [])
if conn in read:
msg = conn.recv(len(MSG))
conn.close()
self.assertEqual(msg, MSG)
else:
self.fail("Error during select call to non-blocking socket.")
def _testRecv(self):
self.cli.connect((HOST, self.port))
time.sleep(0.1)
self.cli.send(MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class FileObjectClassTestCase(SocketConnectedTest):
"""Unit tests for the object returned by socket.makefile()
self.read_file is the io object returned by makefile() on
the client connection. You can read from this file to
get output from the server.
self.write_file is the io object returned by makefile() on the
server connection. You can write to this file to send output
to the client.
"""
bufsize = -1 # Use default buffer size
encoding = 'utf-8'
errors = 'strict'
newline = None
read_mode = 'rb'
read_msg = MSG
write_mode = 'wb'
write_msg = MSG
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def setUp(self):
self.evt1, self.evt2, self.serv_finished, self.cli_finished = [
threading.Event() for i in range(4)]
SocketConnectedTest.setUp(self)
self.read_file = self.cli_conn.makefile(
self.read_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def tearDown(self):
self.serv_finished.set()
self.read_file.close()
self.assertTrue(self.read_file.closed)
self.read_file = None
SocketConnectedTest.tearDown(self)
def clientSetUp(self):
SocketConnectedTest.clientSetUp(self)
self.write_file = self.serv_conn.makefile(
self.write_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def clientTearDown(self):
self.cli_finished.set()
self.write_file.close()
self.assertTrue(self.write_file.closed)
self.write_file = None
SocketConnectedTest.clientTearDown(self)
def testReadAfterTimeout(self):
# Issue #7322: A file object must disallow further reads
# after a timeout has occurred.
self.cli_conn.settimeout(1)
self.read_file.read(3)
# First read raises a timeout
self.assertRaises(socket.timeout, self.read_file.read, 1)
# Second read is disallowed
with self.assertRaises(OSError) as ctx:
self.read_file.read(1)
self.assertIn("cannot read from timed out object", str(ctx.exception))
def _testReadAfterTimeout(self):
self.write_file.write(self.write_msg[0:3])
self.write_file.flush()
self.serv_finished.wait()
def testSmallRead(self):
# Performing small file read test
first_seg = self.read_file.read(len(self.read_msg)-3)
second_seg = self.read_file.read(3)
msg = first_seg + second_seg
self.assertEqual(msg, self.read_msg)
def _testSmallRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testFullRead(self):
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testFullRead(self):
self.write_file.write(self.write_msg)
self.write_file.close()
def testUnbufferedRead(self):
# Performing unbuffered file read test
buf = type(self.read_msg)()
while 1:
char = self.read_file.read(1)
if not char:
break
buf += char
self.assertEqual(buf, self.read_msg)
def _testUnbufferedRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testReadline(self):
# Performing file readline test
line = self.read_file.readline()
self.assertEqual(line, self.read_msg)
def _testReadline(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testCloseAfterMakefile(self):
# The file returned by makefile should keep the socket open.
self.cli_conn.close()
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testCloseAfterMakefile(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileAfterMakefileClose(self):
self.read_file.close()
msg = self.cli_conn.recv(len(MSG))
if isinstance(self.read_msg, str):
msg = msg.decode()
self.assertEqual(msg, self.read_msg)
def _testMakefileAfterMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testClosedAttr(self):
self.assertTrue(not self.read_file.closed)
def _testClosedAttr(self):
self.assertTrue(not self.write_file.closed)
def testAttributes(self):
self.assertEqual(self.read_file.mode, self.read_mode)
self.assertEqual(self.read_file.name, self.cli_conn.fileno())
def _testAttributes(self):
self.assertEqual(self.write_file.mode, self.write_mode)
self.assertEqual(self.write_file.name, self.serv_conn.fileno())
def testRealClose(self):
self.read_file.close()
self.assertRaises(ValueError, self.read_file.fileno)
self.cli_conn.close()
self.assertRaises(OSError, self.cli_conn.getsockname)
def _testRealClose(self):
pass
class UnbufferedFileObjectClassTestCase(FileObjectClassTestCase):
"""Repeat the tests from FileObjectClassTestCase with bufsize==0.
In this case (and in this case only), it should be possible to
create a file object, read a line from it, create another file
object, read another line from it, without loss of data in the
first file object's buffer. Note that http.client relies on this
when reading multiple requests from the same socket."""
bufsize = 0 # Use unbuffered mode
def testUnbufferedReadline(self):
# Read a line, create a new file object, read another line with it
line = self.read_file.readline() # first line
self.assertEqual(line, b"A. " + self.write_msg) # first line
self.read_file = self.cli_conn.makefile('rb', 0)
line = self.read_file.readline() # second line
self.assertEqual(line, b"B. " + self.write_msg) # second line
def _testUnbufferedReadline(self):
self.write_file.write(b"A. " + self.write_msg)
self.write_file.write(b"B. " + self.write_msg)
self.write_file.flush()
def testMakefileClose(self):
# The file returned by makefile should keep the socket open...
self.cli_conn.close()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, self.read_msg)
# ...until the file is itself closed
self.read_file.close()
self.assertRaises(OSError, self.cli_conn.recv, 1024)
def _testMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileCloseSocketDestroy(self):
refcount_before = sys.getrefcount(self.cli_conn)
self.read_file.close()
refcount_after = sys.getrefcount(self.cli_conn)
self.assertEqual(refcount_before - 1, refcount_after)
def _testMakefileCloseSocketDestroy(self):
pass
# Non-blocking ops
# NOTE: to set `read_file` as non-blocking, we must call
# `cli_conn.setblocking` and vice-versa (see setUp / clientSetUp).
def testSmallReadNonBlocking(self):
self.cli_conn.setblocking(False)
self.assertEqual(self.read_file.readinto(bytearray(10)), None)
self.assertEqual(self.read_file.read(len(self.read_msg) - 3), None)
self.evt1.set()
self.evt2.wait(1.0)
first_seg = self.read_file.read(len(self.read_msg) - 3)
if first_seg is None:
# Data not arrived (can happen under Windows), wait a bit
time.sleep(0.5)
first_seg = self.read_file.read(len(self.read_msg) - 3)
buf = bytearray(10)
n = self.read_file.readinto(buf)
self.assertEqual(n, 3)
msg = first_seg + buf[:n]
self.assertEqual(msg, self.read_msg)
self.assertEqual(self.read_file.readinto(bytearray(16)), None)
self.assertEqual(self.read_file.read(1), None)
def _testSmallReadNonBlocking(self):
self.evt1.wait(1.0)
self.write_file.write(self.write_msg)
self.write_file.flush()
self.evt2.set()
# Avoid cloding the socket before the server test has finished,
# otherwise system recv() will return 0 instead of EWOULDBLOCK.
self.serv_finished.wait(5.0)
def testWriteNonBlocking(self):
self.cli_finished.wait(5.0)
# The client thread can't skip directly - the SkipTest exception
# would appear as a failure.
if self.serv_skipped:
self.skipTest(self.serv_skipped)
def _testWriteNonBlocking(self):
self.serv_skipped = None
self.serv_conn.setblocking(False)
# Try to saturate the socket buffer pipe with repeated large writes.
BIG = b"x" * support.SOCK_MAX_SIZE
LIMIT = 10
# The first write() succeeds since a chunk of data can be buffered
n = self.write_file.write(BIG)
self.assertGreater(n, 0)
for i in range(LIMIT):
n = self.write_file.write(BIG)
if n is None:
# Succeeded
break
self.assertGreater(n, 0)
else:
# Let us know that this test didn't manage to establish
# the expected conditions. This is not a failure in itself but,
# if it happens repeatedly, the test should be fixed.
self.serv_skipped = "failed to saturate the socket buffer"
class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 1 # Default-buffered for reading; line-buffered for writing
class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 2 # Exercise the buffering code
class UnicodeReadFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'wb'
write_msg = MSG
newline = ''
class UnicodeWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'rb'
read_msg = MSG
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class UnicodeReadWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class NetworkConnectionTest(object):
"""Prove network connection."""
def clientSetUp(self):
# We're inherited below by BasicTCPTest2, which also inherits
# BasicTCPTest, which defines self.port referenced below.
self.cli = socket.create_connection((HOST, self.port))
self.serv_conn = self.cli
class BasicTCPTest2(NetworkConnectionTest, BasicTCPTest):
"""Tests that NetworkConnection does not break existing TCP functionality.
"""
class NetworkConnectionNoServer(unittest.TestCase):
class MockSocket(socket.socket):
def connect(self, *args):
raise socket.timeout('timed out')
@contextlib.contextmanager
def mocked_socket_module(self):
"""Return a socket which times out on connect"""
old_socket = socket.socket
socket.socket = self.MockSocket
try:
yield
finally:
socket.socket = old_socket
def test_connect(self):
port = support.find_unused_port()
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(cli.close)
with self.assertRaises(OSError) as cm:
cli.connect((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection(self):
# Issue #9792: errors raised by create_connection() should have
# a proper errno attribute.
port = support.find_unused_port()
with self.assertRaises(OSError) as cm:
socket.create_connection((HOST, port))
# Issue #16257: create_connection() calls getaddrinfo() against
# 'localhost'. This may result in an IPV6 addr being returned
# as well as an IPV4 one:
# >>> socket.getaddrinfo('localhost', port, 0, SOCK_STREAM)
# >>> [(2, 2, 0, '', ('127.0.0.1', 41230)),
# (26, 2, 0, '', ('::1', 41230, 0, 0))]
#
# create_connection() enumerates through all the addresses returned
# and if it doesn't successfully bind to any of them, it propagates
# the last exception it encountered.
#
# On Solaris, ENETUNREACH is returned in this circumstance instead
# of ECONNREFUSED. So, if that errno exists, add it to our list of
# expected errnos.
expected_errnos = [ errno.ECONNREFUSED, ]
if hasattr(errno, 'ENETUNREACH'):
expected_errnos.append(errno.ENETUNREACH)
self.assertIn(cm.exception.errno, expected_errnos)
def test_create_connection_timeout(self):
# Issue #9792: create_connection() should not recast timeout errors
# as generic socket errors.
with self.mocked_socket_module():
with self.assertRaises(socket.timeout):
socket.create_connection((HOST, 1234))
@unittest.skipUnless(thread, 'Threading required for this test.')
class NetworkConnectionAttributesTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.source_port = support.find_unused_port()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def _justAccept(self):
conn, addr = self.serv.accept()
conn.close()
testFamily = _justAccept
def _testFamily(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.family, 2)
testSourceAddress = _justAccept
def _testSourceAddress(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30,
source_address=('', self.source_port))
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.getsockname()[1], self.source_port)
# The port number being used is sufficient to show that the bind()
# call happened.
testTimeoutDefault = _justAccept
def _testTimeoutDefault(self):
# passing no explicit timeout uses socket's global default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(42)
try:
self.cli = socket.create_connection((HOST, self.port))
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), 42)
testTimeoutNone = _justAccept
def _testTimeoutNone(self):
# None timeout means the same as sock.settimeout(None)
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
self.cli = socket.create_connection((HOST, self.port), timeout=None)
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), None)
testTimeoutValueNamed = _justAccept
def _testTimeoutValueNamed(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.assertEqual(self.cli.gettimeout(), 30)
testTimeoutValueNonamed = _justAccept
def _testTimeoutValueNonamed(self):
self.cli = socket.create_connection((HOST, self.port), 30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.gettimeout(), 30)
@unittest.skipUnless(thread, 'Threading required for this test.')
class NetworkConnectionBehaviourTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def testInsideTimeout(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
time.sleep(3)
conn.send(b"done!")
testOutsideTimeout = testInsideTimeout
def _testInsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port))
data = sock.recv(5)
self.assertEqual(data, b"done!")
def _testOutsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port), timeout=1)
self.assertRaises(socket.timeout, lambda: sock.recv(5))
class TCPTimeoutTest(SocketTCPTest):
def testTCPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.accept()
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (TCP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of error (TCP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (TCP)")
if not ok:
self.fail("accept() returned success when we did not expect it")
@unittest.skipUnless(hasattr(signal, 'alarm'),
'test needs signal.alarm()')
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# plaform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
def alarm_handler(signal, frame):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
signal.alarm(2) # POSIX allows alarm to be up to 1 second early
try:
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(sys.exc_info()[:2] + (traceback.format_exc(),)))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
class UDPTimeoutTest(SocketUDPTest):
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except socket.timeout:
self.fail("caught timeout instead of error (UDP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (UDP)")
if not ok:
self.fail("recv() returned success when we did not expect it")
class TestExceptions(unittest.TestCase):
def testExceptionTree(self):
self.assertTrue(issubclass(OSError, Exception))
self.assertTrue(issubclass(socket.herror, OSError))
self.assertTrue(issubclass(socket.gaierror, OSError))
self.assertTrue(issubclass(socket.timeout, OSError))
@unittest.skipUnless(sys.platform == 'linux', 'Linux specific test')
class TestLinuxAbstractNamespace(unittest.TestCase):
UNIX_PATH_MAX = 108
def testLinuxAbstractNamespace(self):
address = b"\x00python-test-hello\x00\xff"
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s1:
s1.bind(address)
s1.listen()
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s2:
s2.connect(s1.getsockname())
with s1.accept()[0] as s3:
self.assertEqual(s1.getsockname(), address)
self.assertEqual(s2.getpeername(), address)
def testMaxName(self):
address = b"\x00" + b"h" * (self.UNIX_PATH_MAX - 1)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testNameOverflow(self):
address = "\x00" + "h" * self.UNIX_PATH_MAX
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
self.assertRaises(OSError, s.bind, address)
def testStrName(self):
# Check that an abstract name can be passed as a string.
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
s.bind("\x00python\x00test\x00")
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
finally:
s.close()
def testBytearrayName(self):
# Check that an abstract name can be passed as a bytearray.
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(bytearray(b"\x00python\x00test\x00"))
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'test needs socket.AF_UNIX')
class TestUnixDomain(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
def tearDown(self):
self.sock.close()
def encoded(self, path):
# Return the given path encoded in the file system encoding,
# or skip the test if this is not possible.
try:
return os.fsencode(path)
except UnicodeEncodeError:
self.skipTest(
"Pathname {0!a} cannot be represented in file "
"system encoding {1!r}".format(
path, sys.getfilesystemencoding()))
def bind(self, sock, path):
# Bind the socket
try:
sock.bind(path)
except OSError as e:
if str(e) == "AF_UNIX path too long":
self.skipTest(
"Pathname {0!a} is too long to serve as an AF_UNIX path"
.format(path))
else:
raise
def testStrAddr(self):
# Test binding to and retrieving a normal string pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testBytesAddr(self):
# Test binding to a bytes pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, self.encoded(path))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testSurrogateescapeBind(self):
# Test binding to a valid non-ASCII pathname, with the
# non-ASCII bytes supplied using surrogateescape encoding.
path = os.path.abspath(support.TESTFN_UNICODE)
b = self.encoded(path)
self.bind(self.sock, b.decode("ascii", "surrogateescape"))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testUnencodableAddr(self):
# Test binding to a pathname that cannot be encoded in the
# file system encoding.
if support.TESTFN_UNENCODABLE is None:
self.skipTest("No unencodable filename available")
path = os.path.abspath(support.TESTFN_UNENCODABLE)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BufferIOTest(SocketConnectedTest):
"""
Test the buffer versions of socket.recv() and socket.send().
"""
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecvIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvIntoBytearray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoBytearray = _testRecvIntoArray
def testRecvIntoMemoryview(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoMemoryview = _testRecvIntoArray
def testRecvFromIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvFromIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvFromIntoBytearray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoBytearray = _testRecvFromIntoArray
def testRecvFromIntoMemoryview(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoMemoryview = _testRecvFromIntoArray
def testRecvFromIntoSmallBuffer(self):
# See issue #20246.
buf = bytearray(8)
self.assertRaises(ValueError, self.cli_conn.recvfrom_into, buf, 1024)
def _testRecvFromIntoSmallBuffer(self):
self.serv_conn.send(MSG)
def testRecvFromIntoEmptyBuffer(self):
buf = bytearray()
self.cli_conn.recvfrom_into(buf)
self.cli_conn.recvfrom_into(buf, 0)
_testRecvFromIntoEmptyBuffer = _testRecvFromIntoArray
TIPC_STYPE = 2000
TIPC_LOWER = 200
TIPC_UPPER = 210
def isTipcAvailable():
"""Check if the TIPC module is loaded
The TIPC module is not loaded automatically on Ubuntu and probably
other Linux distros.
"""
if not hasattr(socket, "AF_TIPC"):
return False
if not os.path.isfile("/proc/modules"):
return False
with open("/proc/modules") as f:
for line in f:
if line.startswith("tipc "):
return True
return False
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCTest(unittest.TestCase):
def testRDM(self):
srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
self.addCleanup(srv.close)
self.addCleanup(cli.close)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
srv.bind(srvaddr)
sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
cli.sendto(MSG, sendaddr)
msg, recvaddr = srv.recvfrom(1024)
self.assertEqual(cli.getsockname(), recvaddr)
self.assertEqual(msg, MSG)
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCThreadableTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName = 'runTest'):
unittest.TestCase.__init__(self, methodName = methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.srv.close)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
self.srv.bind(srvaddr)
self.srv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.srv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
# There is a hittable race between serverExplicitReady() and the
# accept() call; sleep a little while to avoid it, otherwise
# we could get an exception
time.sleep(0.1)
self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
self.cli.connect(addr)
self.cliaddr = self.cli.getsockname()
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
self.assertEqual(self.cliaddr, self.connaddr)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
@unittest.skipUnless(thread, 'Threading required for this test.')
class ContextManagersTest(ThreadedTCPSocketTest):
def _testSocketClass(self):
# base test
with socket.socket() as sock:
self.assertFalse(sock._closed)
self.assertTrue(sock._closed)
# close inside with block
with socket.socket() as sock:
sock.close()
self.assertTrue(sock._closed)
# exception inside with block
with socket.socket() as sock:
self.assertRaises(OSError, sock.sendall, b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionBase(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionBase(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
self.assertFalse(sock._closed)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionClose(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionClose(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
sock.close()
self.assertTrue(sock._closed)
self.assertRaises(OSError, sock.sendall, b'foo')
class InheritanceTest(unittest.TestCase):
@unittest.skipUnless(hasattr(socket, "SOCK_CLOEXEC"),
"SOCK_CLOEXEC not defined")
@support.requires_linux_version(2, 6, 28)
def test_SOCK_CLOEXEC(self):
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_CLOEXEC) as s:
self.assertTrue(s.type & socket.SOCK_CLOEXEC)
self.assertFalse(s.get_inheritable())
def test_default_inheritable(self):
sock = socket.socket()
with sock:
self.assertEqual(sock.get_inheritable(), False)
def test_dup(self):
sock = socket.socket()
with sock:
newsock = sock.dup()
sock.close()
with newsock:
self.assertEqual(newsock.get_inheritable(), False)
def test_set_inheritable(self):
sock = socket.socket()
with sock:
sock.set_inheritable(True)
self.assertEqual(sock.get_inheritable(), True)
sock.set_inheritable(False)
self.assertEqual(sock.get_inheritable(), False)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_get_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(sock.get_inheritable(), False)
# clear FD_CLOEXEC flag
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
self.assertEqual(sock.get_inheritable(), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_set_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
fcntl.FD_CLOEXEC)
sock.set_inheritable(True)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
0)
@unittest.skipUnless(hasattr(socket, "socketpair"),
"need socket.socketpair()")
def test_socketpair(self):
s1, s2 = socket.socketpair()
self.addCleanup(s1.close)
self.addCleanup(s2.close)
self.assertEqual(s1.get_inheritable(), False)
self.assertEqual(s2.get_inheritable(), False)
@unittest.skipUnless(hasattr(socket, "SOCK_NONBLOCK"),
"SOCK_NONBLOCK not defined")
class NonblockConstantTest(unittest.TestCase):
def checkNonblock(self, s, nonblock=True, timeout=0.0):
if nonblock:
self.assertTrue(s.type & socket.SOCK_NONBLOCK)
self.assertEqual(s.gettimeout(), timeout)
else:
self.assertFalse(s.type & socket.SOCK_NONBLOCK)
self.assertEqual(s.gettimeout(), None)
@support.requires_linux_version(2, 6, 28)
def test_SOCK_NONBLOCK(self):
# a lot of it seems silly and redundant, but I wanted to test that
# changing back and forth worked ok
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK) as s:
self.checkNonblock(s)
s.setblocking(1)
self.checkNonblock(s, False)
s.setblocking(0)
self.checkNonblock(s)
s.settimeout(None)
self.checkNonblock(s, False)
s.settimeout(2.0)
self.checkNonblock(s, timeout=2.0)
s.setblocking(1)
self.checkNonblock(s, False)
# defaulttimeout
t = socket.getdefaulttimeout()
socket.setdefaulttimeout(0.0)
with socket.socket() as s:
self.checkNonblock(s)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(2.0)
with socket.socket() as s:
self.checkNonblock(s, timeout=2.0)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(t)
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(multiprocessing, "need multiprocessing")
class TestSocketSharing(SocketTCPTest):
# This must be classmethod and not staticmethod or multiprocessing
# won't be able to bootstrap it.
@classmethod
def remoteProcessServer(cls, q):
# Recreate socket from shared data
sdata = q.get()
message = q.get()
s = socket.fromshare(sdata)
s2, c = s.accept()
# Send the message
s2.sendall(message)
s2.close()
s.close()
def testShare(self):
# Transfer the listening server socket to another process
# and service it from there.
# Create process:
q = multiprocessing.Queue()
p = multiprocessing.Process(target=self.remoteProcessServer, args=(q,))
p.start()
# Get the shared socket data
data = self.serv.share(p.pid)
# Pass the shared socket to the other process
addr = self.serv.getsockname()
self.serv.close()
q.put(data)
# The data that the server will send us
message = b"slapmahfro"
q.put(message)
# Connect
s = socket.create_connection(addr)
# listen for the data
m = []
while True:
data = s.recv(100)
if not data:
break
m.append(data)
s.close()
received = b"".join(m)
self.assertEqual(received, message)
p.join()
def testShareLength(self):
data = self.serv.share(os.getpid())
self.assertRaises(ValueError, socket.fromshare, data[:-1])
self.assertRaises(ValueError, socket.fromshare, data+b"foo")
def compareSockets(self, org, other):
# socket sharing is expected to work only for blocking socket
# since the internal python timeout value isn't transferred.
self.assertEqual(org.gettimeout(), None)
self.assertEqual(org.gettimeout(), other.gettimeout())
self.assertEqual(org.family, other.family)
self.assertEqual(org.type, other.type)
# If the user specified "0" for proto, then
# internally windows will have picked the correct value.
# Python introspection on the socket however will still return
# 0. For the shared socket, the python value is recreated
# from the actual value, so it may not compare correctly.
if org.proto != 0:
self.assertEqual(org.proto, other.proto)
def testShareLocal(self):
data = self.serv.share(os.getpid())
s = socket.fromshare(data)
try:
self.compareSockets(self.serv, s)
finally:
s.close()
def testTypes(self):
families = [socket.AF_INET, socket.AF_INET6]
types = [socket.SOCK_STREAM, socket.SOCK_DGRAM]
for f in families:
for t in types:
try:
source = socket.socket(f, t)
except OSError:
continue # This combination is not supported
try:
data = source.share(os.getpid())
shared = socket.fromshare(data)
try:
self.compareSockets(source, shared)
finally:
shared.close()
finally:
source.close()
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendfileUsingSendTest(ThreadedTCPSocketTest):
"""
Test the send() implementation of socket.sendfile().
"""
FILESIZE = (10 * 1024 * 1024) # 10MB
BUFSIZE = 8192
FILEDATA = b""
TIMEOUT = 2
@classmethod
def setUpClass(cls):
def chunks(total, step):
assert total >= step
while total > step:
yield step
total -= step
if total:
yield total
chunk = b"".join([random.choice(string.ascii_letters).encode()
for i in range(cls.BUFSIZE)])
with open(support.TESTFN, 'wb') as f:
for csize in chunks(cls.FILESIZE, cls.BUFSIZE):
f.write(chunk)
with open(support.TESTFN, 'rb') as f:
cls.FILEDATA = f.read()
assert len(cls.FILEDATA) == cls.FILESIZE
@classmethod
def tearDownClass(cls):
support.unlink(support.TESTFN)
def accept_conn(self):
self.serv.settimeout(self.TIMEOUT)
conn, addr = self.serv.accept()
conn.settimeout(self.TIMEOUT)
self.addCleanup(conn.close)
return conn
def recv_data(self, conn):
received = []
while True:
chunk = conn.recv(self.BUFSIZE)
if not chunk:
break
received.append(chunk)
return b''.join(received)
def meth_from_sock(self, sock):
# Depending on the mixin class being run return either send()
# or sendfile() method implementation.
return getattr(sock, "_sendfile_use_send")
# regular file
def _testRegularFile(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
def testRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# non regular file
def _testNonRegularFile(self):
address = self.serv.getsockname()
file = io.BytesIO(self.FILEDATA)
with socket.create_connection(address) as sock, file as file:
sent = sock.sendfile(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
self.assertRaises(socket._GiveupOnSendfile,
sock._sendfile_use_sendfile, file)
def testNonRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# empty file
def _testEmptyFileSend(self):
address = self.serv.getsockname()
filename = support.TESTFN + "2"
with open(filename, 'wb'):
self.addCleanup(support.unlink, filename)
file = open(filename, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, 0)
self.assertEqual(file.tell(), 0)
def testEmptyFileSend(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(data, b"")
# offset
def _testOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file, offset=5000)
self.assertEqual(sent, self.FILESIZE - 5000)
self.assertEqual(file.tell(), self.FILESIZE)
def testOffset(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE - 5000)
self.assertEqual(data, self.FILEDATA[5000:])
# count
def _testCount(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 5000007
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCount(self):
count = 5000007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count small
def _testCountSmall(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 1
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCountSmall(self):
count = 1
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count + offset
def _testCountWithOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 100007
meth = self.meth_from_sock(sock)
sent = meth(file, offset=2007, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count + 2007)
def testCountWithOffset(self):
count = 100007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[2007:count+2007])
# non blocking sockets are not supposed to work
def _testNonBlocking(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
sock.setblocking(False)
meth = self.meth_from_sock(sock)
self.assertRaises(ValueError, meth, file)
self.assertRaises(ValueError, sock.sendfile, file)
def testNonBlocking(self):
conn = self.accept_conn()
if conn.recv(8192):
self.fail('was not supposed to receive any data')
# timeout (non-triggered)
def _testWithTimeout(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
def testWithTimeout(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# timeout (triggered)
def _testWithTimeoutTriggeredSend(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=0.01) as sock, \
file as file:
meth = self.meth_from_sock(sock)
self.assertRaises(socket.timeout, meth, file)
def testWithTimeoutTriggeredSend(self):
conn = self.accept_conn()
conn.recv(88192)
# errors
def _test_errors(self):
pass
def test_errors(self):
with open(support.TESTFN, 'rb') as file:
with socket.socket(type=socket.SOCK_DGRAM) as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "SOCK_STREAM", meth, file)
with open(support.TESTFN, 'rt') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "binary mode", meth, file)
with open(support.TESTFN, 'rb') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count='2')
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count=0.1)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=0)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=-1)
@unittest.skipUnless(thread, 'Threading required for this test.')
@unittest.skipUnless(hasattr(os, "sendfile"),
'os.sendfile() required for this test.')
class SendfileUsingSendfileTest(SendfileUsingSendTest):
"""
Test the sendfile() implementation of socket.sendfile().
"""
def meth_from_sock(self, sock):
return getattr(sock, "_sendfile_use_sendfile")
@unittest.skipUnless(HAVE_SOCKET_ALG, 'AF_ALG required')
class LinuxKernelCryptoAPI(unittest.TestCase):
# tests for AF_ALG
def create_alg(self, typ, name):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
try:
sock.bind((typ, name))
except FileNotFoundError as e:
# type / algorithm is not available
sock.close()
raise unittest.SkipTest(str(e), typ, name)
else:
return sock
def test_sha256(self):
expected = bytes.fromhex("ba7816bf8f01cfea414140de5dae2223b00361a396"
"177a9cb410ff61f20015ad")
with self.create_alg('hash', 'sha256') as algo:
op, _ = algo.accept()
with op:
op.sendall(b"abc")
self.assertEqual(op.recv(512), expected)
op, _ = algo.accept()
with op:
op.send(b'a', socket.MSG_MORE)
op.send(b'b', socket.MSG_MORE)
op.send(b'c', socket.MSG_MORE)
op.send(b'')
self.assertEqual(op.recv(512), expected)
def test_hmac_sha1(self):
expected = bytes.fromhex("effcdf6ae5eb2fa2d27416d5f184df9c259a7c79")
with self.create_alg('hash', 'hmac(sha1)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, b"Jefe")
op, _ = algo.accept()
with op:
op.sendall(b"what do ya want for nothing?")
self.assertEqual(op.recv(512), expected)
# Although it should work with 3.19 and newer the test blocks on
# Ubuntu 15.10 with Kernel 4.2.0-19.
@support.requires_linux_version(4, 3)
def test_aes_cbc(self):
key = bytes.fromhex('06a9214036b8a15b512e03d534120006')
iv = bytes.fromhex('3dafba429d9eb430b422da802c9fac41')
msg = b"Single block msg"
ciphertext = bytes.fromhex('e353779c1079aeb82708942dbe77181a')
msglen = len(msg)
with self.create_alg('skcipher', 'cbc(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
flags=socket.MSG_MORE)
op.sendall(msg)
self.assertEqual(op.recv(msglen), ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([ciphertext],
op=socket.ALG_OP_DECRYPT, iv=iv)
self.assertEqual(op.recv(msglen), msg)
# long message
multiplier = 1024
longmsg = [msg] * multiplier
op, _ = algo.accept()
with op:
op.sendmsg_afalg(longmsg,
op=socket.ALG_OP_ENCRYPT, iv=iv)
enc = op.recv(msglen * multiplier)
self.assertEqual(len(enc), msglen * multiplier)
self.assertTrue(enc[:msglen], ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([enc],
op=socket.ALG_OP_DECRYPT, iv=iv)
dec = op.recv(msglen * multiplier)
self.assertEqual(len(dec), msglen * multiplier)
self.assertEqual(dec, msg * multiplier)
@support.requires_linux_version(4, 3) # see test_aes_cbc
def test_aead_aes_gcm(self):
key = bytes.fromhex('c939cc13397c1d37de6ae0e1cb7c423c')
iv = bytes.fromhex('b3d8cc017cbb89b39e0f67e2')
plain = bytes.fromhex('c3b3c41f113a31b73d9a5cd432103069')
assoc = bytes.fromhex('24825602bd12a984e0092d3e448eda5f')
expected_ct = bytes.fromhex('93fe7d9e9bfd10348a5606e5cafa7354')
expected_tag = bytes.fromhex('0032a1dc85f1c9786925a2e71d8272dd')
taglen = len(expected_tag)
assoclen = len(assoc)
with self.create_alg('aead', 'gcm(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_AEAD_AUTHSIZE,
None, taglen)
# send assoc, plain and tag buffer in separate steps
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen, flags=socket.MSG_MORE)
op.sendall(assoc, socket.MSG_MORE)
op.sendall(plain, socket.MSG_MORE)
op.sendall(b'\x00' * taglen)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# now with msg
op, _ = algo.accept()
with op:
msg = assoc + plain + b'\x00' * taglen
op.sendmsg_afalg([msg], op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# create anc data manually
pack_uint32 = struct.Struct('I').pack
op, _ = algo.accept()
with op:
msg = assoc + plain + b'\x00' * taglen
op.sendmsg(
[msg],
([socket.SOL_ALG, socket.ALG_SET_OP, pack_uint32(socket.ALG_OP_ENCRYPT)],
[socket.SOL_ALG, socket.ALG_SET_IV, pack_uint32(len(iv)) + iv],
[socket.SOL_ALG, socket.ALG_SET_AEAD_ASSOCLEN, pack_uint32(assoclen)],
)
)
res = op.recv(len(msg))
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# decrypt and verify
op, _ = algo.accept()
with op:
msg = assoc + expected_ct + expected_tag
op.sendmsg_afalg([msg], op=socket.ALG_OP_DECRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(len(msg))
self.assertEqual(plain, res[assoclen:-taglen])
@support.requires_linux_version(4, 3) # see test_aes_cbc
def test_drbg_pr_sha256(self):
# deterministic random bit generator, prediction resistance, sha256
with self.create_alg('rng', 'drbg_pr_sha256') as algo:
extra_seed = os.urandom(32)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, extra_seed)
op, _ = algo.accept()
with op:
rn = op.recv(32)
self.assertEqual(len(rn), 32)
def test_sendmsg_afalg_args(self):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
with sock:
with self.assertRaises(TypeError):
sock.sendmsg_afalg()
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(1)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=-1)
def test_main():
tests = [GeneralModuleTests, BasicTCPTest, TCPCloserTest, TCPTimeoutTest,
TestExceptions, BufferIOTest, BasicTCPTest2, BasicUDPTest, UDPTimeoutTest ]
tests.extend([
NonBlockingTCPTests,
FileObjectClassTestCase,
UnbufferedFileObjectClassTestCase,
LineBufferedFileObjectClassTestCase,
SmallBufferedFileObjectClassTestCase,
UnicodeReadFileObjectClassTestCase,
UnicodeWriteFileObjectClassTestCase,
UnicodeReadWriteFileObjectClassTestCase,
NetworkConnectionNoServer,
NetworkConnectionAttributesTest,
NetworkConnectionBehaviourTest,
ContextManagersTest,
InheritanceTest,
NonblockConstantTest
])
tests.append(BasicSocketPairTest)
tests.append(TestUnixDomain)
tests.append(TestLinuxAbstractNamespace)
tests.extend([TIPCTest, TIPCThreadableTest])
tests.extend([BasicCANTest, CANTest])
tests.extend([BasicRDSTest, RDSTest])
tests.append(LinuxKernelCryptoAPI)
tests.extend([
CmsgMacroTests,
SendmsgUDPTest,
RecvmsgUDPTest,
RecvmsgIntoUDPTest,
SendmsgUDP6Test,
RecvmsgUDP6Test,
RecvmsgRFC3542AncillaryUDP6Test,
RecvmsgIntoRFC3542AncillaryUDP6Test,
RecvmsgIntoUDP6Test,
SendmsgTCPTest,
RecvmsgTCPTest,
RecvmsgIntoTCPTest,
SendmsgSCTPStreamTest,
RecvmsgSCTPStreamTest,
RecvmsgIntoSCTPStreamTest,
SendmsgUnixStreamTest,
RecvmsgUnixStreamTest,
RecvmsgIntoUnixStreamTest,
RecvmsgSCMRightsStreamTest,
RecvmsgIntoSCMRightsStreamTest,
# These are slow when setitimer() is not available
InterruptedRecvTimeoutTest,
InterruptedSendTimeoutTest,
TestSocketSharing,
SendfileUsingSendTest,
SendfileUsingSendfileTest,
])
thread_info = support.threading_setup()
support.run_unittest(*tests)
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
async_producer.py
|
# async_producer.py
#
# Implement same functionality as producer.py but without threads
import time
from collections import deque
import heapq
class Scheduler:
def __init__(self):
self.ready = deque() # Functions ready to execute
self.sleeping = [] # Sleeping functions
self.sequence = 0 # sequence number avoids case when deadlines are identical
def call_soon(self, func):
self.ready.append(func)
def call_later(self, delay, func):
self.sequence += 1
deadline = time.time() + delay # Expiration time
# priority queue
heapq.heappush(self.sleeping, (deadline, self.sequence, func))
def run(self):
while self.ready or self.sleeping:
if not self.ready:
deadline, _, func = heapq.heappop(self.sleeping)
# deadline, func = self.sleeping.pop(0)
delta = deadline - time.time()
if delta > 0:
time.sleep(delta)
self.ready.append(func)
while self.ready:
func = self.ready.popleft()
func()
sched = Scheduler()
# -------------------------------------------------------
class Result:
def __init__(self, value=None, exc=None):
self.value = value
self.exc = exc
def result(self):
if self.exc:
raise self.exc
else:
return self.value
class QueueClosed(Exception):
pass
# Implement a queuing object
class AsyncQueue:
def __init__(self):
self.items = deque()
self.waiting = deque() # All getters waiting for data
self._closed = False # Can queue be used anymore?
def close(self):
self._closed = True
if self.waiting and not self.items:
for func in self.waiting:
sched.call_soon(func)
# We put something on the queue and if something is waiting then pop it off and pas to Scheduler
def put(self, item):
if self._closed:
raise QueueClosed()
self.items.append(item)
if self.waiting:
func = self.waiting.popleft()
# Do we call it right away?
# func() -----> not a good idea as might get deep calls, recursion, etc.
sched.call_soon(func)
def get(self, callback):
# Wait until an item is available. Then return it.
if self.items:
callback(
Result(value=self.items.popleft())) # Trigger a callback if data is available, still runs if "closed"
else:
# No items available (must wait)
if self._closed:
callback(Result(exc=QueueClosed())) # Error result
else:
self.waiting.append(lambda: self.get(callback)) # no data arrange to execute later
def producer(q, count):
# Can't use this for loop as it will block until complete - anti async
# for n in range(count):
# print('Producing', n)
# q.put(n)
# time.sleep(1)
def _run(n):
if n < count:
print('Producing', n)
q.put(n)
sched.call_later(1, lambda: _run(n + 1))
else:
print("Producer done")
q.close() # No more items will be produced
# q.put(None) # 'sentinel' to shut down
_run(0)
def consumer(q):
# def _consume(item): # This is the callback
def _consume(result):
try:
item = result.result()
# if item is None: # <<<<<<< Queue closed check (Error)
# print('Consumer done')
# else:
print('Consuming', item) # <<<<<<<< Queue item (Result)
sched.call_soon(lambda: consumer(q))
except QueueClosed:
print('Consumer done')
q.get(callback=_consume)
q = AsyncQueue()
sched.call_soon(lambda: producer(q, 10))
sched.call_soon(lambda: consumer(q, ))
sched.run()
# while True:
# item = q.get() # PROBLEM HERE: .get() waiting
# if item is None:
# break
# print('Consuming', item)
# print('Consumer done')
#
#
# q = queue.Queue() # Thread safe queue
# threading.Thread(target=producer, args=(q, 10)).start()
# threading.Thread(target=consumer, args=(q,)).start()
|
test_operator_gpu.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import sys
import os
import time
import multiprocessing as mp
import unittest
import mxnet as mx
import numpy as np
import unittest
from mxnet.test_utils import check_consistency, set_default_context, assert_almost_equal
from numpy.testing import assert_allclose
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.insert(0, os.path.join(curr_path, '../unittest'))
from common import setup_module, with_seed
from test_operator import *
from test_optimizer import *
from test_random import *
from test_gluon import *
from test_loss import *
from test_exc_handling import *
#from test_rnn import *
from test_gluon_rnn import *
from test_sparse_ndarray import test_create_csr, test_create_row_sparse, test_sparse_nd_slice
from test_sparse_ndarray import test_create_sparse_nd_empty, test_create_sparse_nd_from_sparse
from test_sparse_ndarray import test_create_sparse_nd_from_dense, test_create_sparse_nd_infer_shape
from test_sparse_ndarray import test_sparse_nd_check_format, test_sparse_nd_copy
from test_sparse_ndarray import test_sparse_nd_setitem, test_sparse_nd_binary_scalar_op
from test_sparse_operator import *
from test_ndarray import *
set_default_context(mx.gpu(0))
del test_support_vector_machine_l1_svm
del test_support_vector_machine_l2_svm
def check_countsketch(in_dim,out_dim,n):
sym = mx.sym.contrib.count_sketch(name='countsketch',out_dim = out_dim)
shape = [(n,in_dim), (1,in_dim),(1,in_dim)] #shape of input x, hash h and hash s
arr = [mx.nd.empty(shape[i]) for i in range(3)]
arr_grad = [mx.nd.empty(shape[i]) for i in range(3)]
x = np.random.uniform(-10, 10, shape[0])
arr[0][:] = x #input x
h = np.random.randint(0, out_dim, shape[1])
arr[1][:] = h #hash h
s = np.random.randint(0, 2, shape[2])*2-np.ones(shape[2])
arr[2][:] = s #hash s
# forward
exe_list = [sym.bind(mx.gpu(0), arr, arr_grad)]
for exe in exe_list:
exe.forward(is_train= True)
out1 = [exe.outputs[0].asnumpy() for exe in exe_list]
a = np.zeros((n,out_dim))
temp = np.multiply(x, s)
for num_sample in np.arange(0,n):
for idx in np.arange(0,in_dim):
a[num_sample][h[0][idx]] += temp[num_sample][idx]
assert_almost_equal(a,out1[0],rtol=1e-3, atol=1e-12)
# backward
out_grad = mx.nd.empty((n,out_dim))
out_grad[:] = np.random.normal(-3, 3, (n,out_dim))
for exe in exe_list:
exe.backward([out_grad])
a = np.zeros((n,in_dim))
for j in np.arange(0,n):
for i in np.arange(0,in_dim):
a[j,i] = out_grad.asnumpy()[j, h[0,i]] * s[0,i]
assert_almost_equal(a,arr_grad[0].asnumpy(),rtol=1e-3, atol=1e-12)
@with_seed(0)
def test_countsketch():
nrepeat = 2
minindim = 40
maxindim = 100
minoutdim = 5
maxoutdim = 30
maxn = 200
for repeat in range(nrepeat):
in_dim = np.random.randint(minindim, maxindim)
out_dim = np.random.randint(minoutdim, maxoutdim)
n = np.random.randint(1,maxn)
check_countsketch(in_dim, out_dim, n)
def check_ifft(shape):
shape_old = shape
if len(shape) == 2:
if shape[1]%2 != 0:
lst = list(shape)
lst[1] = lst[1]*2
shape = tuple(lst)
shape_old = shape
shape = (shape[0],shape[1]*2)
if len(shape) == 4:
if shape[3]%2 != 0:
lst = list(shape)
lst[3] = lst[3]*2
shape = tuple(lst)
shape_old = shape
shape = (shape[0],shape[1],shape[2],shape[3]*2)
sym = mx.sym.contrib.ifft(name='ifft', compute_size = 128)
init = [np.random.normal(size=shape, scale=1.0)]
arr_grad = [mx.nd.empty(shape)]
ctx_list = [{'ctx': mx.gpu(0),'ifft_data': shape, 'type_dict': {'ifft_data': np.float32}}]
exe_list = [sym.simple_bind(args_grad=arr_grad,**ctx) for ctx in ctx_list]
for exe in exe_list:
for arr, iarr in zip(exe.arg_arrays, init):
arr[:] = iarr.astype(arr.dtype)
# forward
for exe in exe_list:
exe.forward(is_train= True)
out1 = [exe.outputs[0].asnumpy() for exe in exe_list]
if len(shape) == 2:
init_complex = np.zeros(shape_old,dtype = np.complex64)
for i in range(0,shape_old[1]):
init_complex.real[:,i] = init[0][:,2*i]
init_complex.imag[:,i] = init[0][:,2*i+1]
a = np.fft.ifft(init_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, out1[0]/shape_old[1],rtol=1e-3, atol=1e-12)
if len(shape) == 4:
init_complex = np.zeros(shape_old,dtype = np.complex64)
for i in range(0,shape_old[3]):
init_complex.real[:,:,:,i] = init[0][:,:,:,2*i]
init_complex.imag[:,:,:,i] = init[0][:,:,:,2*i+1]
a = np.fft.ifft(init_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, out1[0]/shape_old[3],rtol=1e-3, atol=1e-12)
# backward
if len(shape) == 2:
out_grad = mx.nd.empty(shape_old)
out_grad[:] = np.random.normal(-3, 3, shape_old)
for exe in exe_list:
exe.backward([out_grad])
temp = exe.grad_arrays[0].asnumpy()
temp = np.zeros(shape_old)
for i in range(shape_old[1]):
temp[:,i] = exe.grad_arrays[0].asnumpy()[:,2*i]
a = np.fft.fft(out_grad.asnumpy(), n=None, axis=-1, norm=None)
assert_almost_equal(a.real, temp, rtol=1e-3, atol=1e-12)
if len(shape) == 4:
out_grad = mx.nd.empty(shape_old)
out_grad[:] = np.random.normal(-3, 3, shape_old)
for exe in exe_list:
exe.backward([out_grad])
temp = exe.grad_arrays[0].asnumpy()
temp = np.zeros(shape_old)
for i in range(shape_old[3]):
temp[:,:,:,i] = exe.grad_arrays[0].asnumpy()[:,:,:,2*i]
a = np.fft.fft(out_grad.asnumpy(), n=None, axis=-1, norm=None)
assert_almost_equal(a.real, temp, rtol=1e-3, atol=1e-12)
@with_seed(0)
def test_ifft():
nrepeat = 2
maxdim = 10
for repeat in range(nrepeat):
for order in [2,4]:
shape = tuple(np.random.randint(1, maxdim, size=order))
check_ifft(shape)
def check_fft(shape):
sym = mx.sym.contrib.fft(name='fft', compute_size = 128)
if len(shape) == 2:
if shape[1]%2 != 0:
lst = list(shape)
lst[1] = lst[1]*2
shape = tuple(lst)
shape_old = shape
if len(shape) == 4:
if shape[3]%2 != 0:
lst = list(shape)
lst[3] = lst[3]*2
shape = tuple(lst)
shape_old = shape
init = [np.random.normal(size=shape, scale=1.0)]
arr_grad = [mx.nd.empty(shape)]
ctx_list = [{'ctx': mx.gpu(0),'fft_data': shape, 'type_dict': {'fft_data': np.float32}}]
exe_list = [sym.simple_bind(args_grad=arr_grad,**ctx) for ctx in ctx_list]
for exe in exe_list:
for arr, iarr in zip(exe.arg_arrays, init):
arr[:] = iarr.astype(arr.dtype)
#forward
for exe in exe_list:
exe.forward(is_train=True)
out1 = [exe.outputs[0].asnumpy() for exe in exe_list]
out = np.fft.fft(init, n=None, axis=-1, norm=None)
if len(shape) == 2:
out = np.reshape(out,(out.shape[1],out.shape[2]))
out2 = np.append(out.real, out.imag, axis = 1)
a = np.zeros(out1[0].shape)
p = 0
for i in range(out2.shape[1]//2):
a[:,p] = out2[:,i]
a[:,p+1] = out2[:,i+out2.shape[1]//2]
p = p+2
if len(shape) == 4:
out = np.reshape(out,(out.shape[1],out.shape[2],out.shape[3],out.shape[4]))
out2 = np.append(out.real, out.imag, axis = 1)
a = np.zeros(out1[0].shape)
for i in range(out1[0].shape[0]):
for j in range(out1[0].shape[1]):
p = 0
for k in range(out2.shape[3]):
a[i,j,:,p] = out2[i,j,:,k]
a[i,j,:,p+1] = out2[i,j+out1[0].shape[1],:,k]
p = p+2
assert_almost_equal(a, out1[0],rtol=1e-3, atol=1e-6)
# backward
if len(shape) == 2:
out_grad = mx.nd.empty((shape[0],2*shape[1]))
out_grad[:] = np.random.normal(-3, 3, (shape[0],2*shape[1]))
# out_grad_to_complex
out_grad_complex = np.zeros(shape,dtype = np.complex64)
for i in range(0,shape[1]):
out_grad_complex.real[:,i] = out_grad.asnumpy()[:,2*i]
out_grad_complex.imag[:,i] = out_grad.asnumpy()[:,2*i+1]
for exe in exe_list:
exe.backward([out_grad])
a = np.fft.ifft(out_grad_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, exe.grad_arrays[0].asnumpy()/shape[1],rtol=1e-3, atol=1e-8)
if len(shape) == 4:
out_grad = mx.nd.empty(out1[0].shape)
out_grad[:] = np.random.normal(-3, 3, out1[0].shape)
# out_grad_to_complex
out_grad_complex = np.zeros(shape,dtype = np.complex64)
for i in range(0,shape[3]):
out_grad_complex.real[:,:,:,i] = out_grad.asnumpy()[:,:,:,2*i]
out_grad_complex.imag[:,:,:,i] = out_grad.asnumpy()[:,:,:,2*i+1]
for exe in exe_list:
exe.backward([out_grad])
a = np.fft.ifft(out_grad_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, exe.grad_arrays[0].asnumpy()/shape[3],rtol=1e-3, atol=1e-6)
@with_seed(0)
def test_fft():
nrepeat = 2
maxdim = 10
for repeat in range(nrepeat):
for order in [2,4]:
shape = tuple(np.random.randint(1, maxdim, size=order))
check_fft(shape)
@with_seed()
def test_batchnorm_with_type():
ctx_list_v1_2D = [
{'ctx': mx.cpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}},
]
ctx_list_v2_2D = [
{'ctx': mx.cpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.cpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.cpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float64}},
{'ctx': mx.gpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.gpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float64}},
]
ctx_list_v2_1D = [
{'ctx': mx.cpu(0), 'norm_data': (10, 2, 10), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.cpu(0), 'norm_data': (10, 2, 10), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.cpu(0), 'norm_data': (10, 2, 10), 'type_dict': {'norm_data': np.float64}},
{'ctx': mx.gpu(0), 'norm_data': (10, 2, 10), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.gpu(0), 'norm_data': (10, 2, 10), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (10, 2, 10), 'type_dict': {'norm_data': np.float64}},
]
ctx_list_v2_3D = [
{'ctx': mx.cpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.cpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.cpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float64}},
{'ctx': mx.gpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.gpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float64}}
]
# V1, 2D
sym = mx.sym.BatchNorm_v1(name='norm', fix_gamma=False)
check_consistency(sym, ctx_list_v1_2D)
sym = mx.sym.BatchNorm_v1(name='norm', fix_gamma=True)
check_consistency(sym, ctx_list_v1_2D)
# V2, 2D
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
# V2, 1D
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
#
# # V2, 3D
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_3D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_3D)
@with_seed()
def test_batchnorm_versions():
def test_batchnorm_versions_helper(batchnorm_op_list, data, fix_gamma, use_global_stats):
ctx_list = []
sym_list = []
# BatchNormV1 cpu
if 'batchnorm_v1_cpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.cpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm_v1(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm'))
# BatchNormV1 gpu (organic)
if 'batchnorm_v1_gpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm_v1(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm'))
# BatchNorm cpu
if 'batchnorm_cpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.cpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm'))
# BatchNorm gpu (organic)
if 'batchnorm_gpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm', cudnn_off=True))
# BatchNorm gpu cudnn (if cudnn is enabled)
if 'batchnorm_cudnn' in batchnorm_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm', cudnn_off=False))
check_consistency(sym_list, ctx_list)
def test_1d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 20)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu',
'batchnorm_gpu', 'batchnorm_cudnn'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)
def test_2d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 10, 10)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_v1_cpu', 'batchnorm_v1_gpu',
'batchnorm_cpu',
'batchnorm_gpu', 'batchnorm_cudnn'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)
def test_3d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 3, 5, 5)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu',
'batchnorm_gpu'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)
test_1d_batchnorm(True, False)
test_1d_batchnorm(False, False)
test_1d_batchnorm(False, True)
test_1d_batchnorm(True, True)
test_2d_batchnorm(True, False)
test_2d_batchnorm(False, False)
test_2d_batchnorm(False, True)
test_2d_batchnorm(True, True)
test_3d_batchnorm(True, False)
test_3d_batchnorm(False, False)
test_3d_batchnorm(False, True)
test_3d_batchnorm(True, True)
@with_seed(1234)
def test_convolution_with_type():
sym1 = mx.sym.Convolution(num_filter=3, kernel=(3,3), name='conv')
data = mx.sym.Variable('conv_data')
w = mx.sym.Variable('conv_weight')
b = mx.sym.Variable('conv_bias')
w = mx.sym.transpose(w, axes=(0,2,3,1))
sym2 = mx.sym.transpose(data, axes=(0,2,3,1))
sym2 = mx.sym.Convolution(sym2, w, b, layout='NHWC', num_filter=3, kernel=(3,3))
sym2 = mx.sym.transpose(sym2, axes=(0,3,1,2), name='conv')
sym = [sym1, sym1, sym1, sym1, sym1, sym2, sym2]
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float16}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},
# NHWC
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3),
'type_dict': {'conv_data': np.float32, 'conv_weight': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3),
'type_dict': {'conv_data': np.float16, 'conv_weight': np.float16}}
]
# wider tolerance needed for true-fp16 NCHW test above
tol = {np.dtype(np.float16): 0.5,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, tol=tol)
# test ability to turn off training on bias
check_consistency(sym, ctx_list, grad_req={'conv_data': 'write', 'conv_weight': 'write', 'conv_bias': 'null'}, tol=tol)
# Apply N symbols against each of M contexts, checking that all NxM combinations match.
def check_consistency_NxM(sym_list, ctx_list):
# e.g. if sym_list=[sym1, sym2] and ctx_list=[ctx1, ctx2, ctx3], then resulting lists are:
# sym_list=[sym1, sym1, sym1, sym2, sym2, sym2] and ctx_list=[ctx1, ctx2, ctx3, ctx1, ctx2, ctx3]
check_consistency(np.repeat(sym_list, len(ctx_list)), ctx_list * len(sym_list))
@with_seed()
def test_convolution_options():
# 1D convolution
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float16}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), pad=(1,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), pad=(1,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), stride=(2,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), stride=(2,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), dilate=(2,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), dilate=(2,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 1x1 convolution
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(1,), pad=(0,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,), pad=(0,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 2D convolution
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float16}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), stride=(2,2), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), stride=(2,2), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), dilate=(2,2), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), dilate=(2,2), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 1x1 convolution
sym = mx.sym.Convolution(num_filter=3, kernel=(1,1), pad=(0,0), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,1), pad=(0,0), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 3D convolution
ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 1x1 convolution
sym = mx.sym.Convolution(num_filter=3, kernel=(1,1,1), pad=(0,0,0), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,1,1), pad=(0,0,0), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
@with_seed()
def test_convolution_versions():
# 2D convolution NCHW
ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}]
conv_v1_cpu = mx.sym.Convolution_v1(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
conv_v1_gpu = mx.sym.Convolution_v1(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
conv_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
conv_cpu = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
conv_gpu = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
syms = [conv_v1_cpu, conv_v1_gpu, conv_cudnn, conv_cpu, conv_gpu]
check_consistency(syms, ctx_list)
# 3D convolution NCDHW
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
conv_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
conv_cpu = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
conv_gpu = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
syms = [conv_cudnn, conv_cpu, conv_gpu]
check_consistency(syms, ctx_list)
@with_seed()
def test_pooling_with_type():
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float32}},
{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float16}},
{'ctx': mx.cpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.cpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float32}}]
sym = mx.sym.Pooling(kernel=(3,3), pool_type='max', pooling_convention='valid', name='pool')
check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(kernel=(3,3), pool_type='max', pooling_convention='full', name='pool')
check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(kernel=(300,300), pool_type='max', global_pool=True, name='pool')
check_consistency(sym, ctx_list)
@with_seed()
def test_deconvolution_with_type():
# Test basic deconvolution without exercising stride, pad or dilation.
# 1D deconvolution
sym = mx.sym.Deconvolution(num_filter=3, kernel=(3,), name='deconv')
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}}]
# wider tolerance needed for true-fp16 test above
tol = {np.dtype(np.float16): 0.3,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, tol=tol)
check_consistency(sym, ctx_list, tol=tol, grad_req="add")
# 2D deconvolution
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), name='deconv')
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}}]
# wider tolerance needed for true-fp16 test above
tol = {np.dtype(np.float16): 0.3,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, tol=tol)
check_consistency(sym, ctx_list, tol=tol, grad_req="add")
@with_seed()
def test_deconvolution_options():
# 1D deconvolution
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), pad=(1,), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), pad=(1,), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), stride=(2,), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), stride=(2,), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), dilate=(2,), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), dilate=(2,), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 2D deconvolution
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), pad=(1,1), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), pad=(1,1), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), stride=(2,2), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), stride=(2,2), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), dilate=(2,2), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), dilate=(2,2), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# # 3D deconvolution (not yet enabled)
# ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
# # Pad > 0
# sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
# sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
# check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# # Stride > 1
# sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), name='conv')
# sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), cudnn_off=True, name='conv')
# check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
@with_seed(1234)
def test_bilinear_sampler_with_type():
data = mx.sym.Variable('data')
grid = mx.sym.Variable('grid')
sym = mx.sym.BilinearSampler(data=data, grid=grid)
ctx_list = [{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float64}},
{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float32}},
{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float16}},
{'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float64}},
{'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
@with_seed()
def test_grid_generator_with_type():
data = mx.sym.Variable('data')
sym = mx.sym.GridGenerator(data=data, transform_type='affine', target_shape=(20, 20))
ctx_list = [{'ctx': mx.gpu(0), 'data': (3, 6), 'type_dict': {'data': np.float32}},
{'ctx': mx.cpu(0), 'data': (3, 6), 'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
sym = mx.sym.GridGenerator(data=data, transform_type='warp', target_shape=(20, 20))
ctx_list = [{'ctx': mx.gpu(0), 'data': (3, 2, 20, 20), 'type_dict': {'data': np.float32}},
{'ctx': mx.cpu(0), 'data': (3, 2, 20, 20), 'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/7645")
@with_seed(1234)
def test_spatial_transformer_with_type():
data = mx.sym.Variable('data')
loc = mx.sym.Flatten(data)
loc = mx.sym.FullyConnected(data=loc, num_hidden=10)
loc = mx.sym.Activation(data=loc, act_type='relu')
loc = mx.sym.FullyConnected(data=loc, num_hidden=6)
sym = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=(10, 10),
transform_type="affine", sampler_type="bilinear")
ctx_list = [{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'type_dict': {'data': np.float32}},
{'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
# Checking max pooling consistency over the data sets of different float types is problematic
# as one max value in a float32 data set may not be the max value in a float16 data set.
# This function will not be called.
@with_seed(1234)
def test_pooling_with_type():
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float32}},
{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float16}},
{'ctx': mx.cpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.cpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float32}}]
sym = mx.sym.Pooling(name='pool', kernel=(3,3), stride=(2,2), pool_type='max')
check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(name='pool', kernel=(3,3), pad=(1,1), pool_type='avg')
check_consistency(sym, ctx_list)
# this is unstable
# sym = mx.sym.Pooling(name='pool', kernel=(5,5), pad=(2,2), pool_type='max')
# check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(name='pool', kernel=(3,3), pad=(1,1), pool_type='sum')
check_consistency(sym, ctx_list)
@with_seed()
def test_pooling_versions():
def test_pooling_versions_helper(pool_op_list, data, kernel, pool_type, pad, stride,
pooling_convention='valid', global_pool=False):
ctx_list = []
sym_list = []
# PoolingV1 cpu
if 'pool_v1_cpu' in pool_op_list:
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
if not global_pool:
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, name='pool'))
else:
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pool_type=pool_type, global_pool=True, name='pool'))
# PoolingV1 gpu
if 'pool_v1_gpu' in pool_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
if not global_pool:
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, name='pool'))
else:
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pool_type=pool_type, global_pool=True, name='pool'))
# Pooling cpu
if 'pool_cpu' in pool_op_list:
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
if not global_pool:
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, name='pool'))
else:
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, global_pool=True, name='pool'))
# Pooling gpu
if 'pool_gpu' in pool_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
if not global_pool:
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, cudnn_off=True, name='pool'))
else:
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, global_pool=True, cudnn_off=True,
name='pool'))
# CuDNNPooling
if 'pool_cudnn' in pool_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
if not global_pool:
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, cudnn_off=False, name='pool'))
else:
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, global_pool=True, cudnn_off=False,
name='pool'))
check_consistency(sym_list, ctx_list)
def test_1d_pooling(pool_type):
data = (2, 3, 20)
kernel = (4,)
pad = (0,)
stride = (1,)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='valid', global_pool=False)
pad = (2,)
stride = (2,)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='valid', global_pool=False)
pad = (0,)
stride = (1,)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False)
pad = (2,)
stride = (2,)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
global_pool=True)
def test_2d_pooling(pool_type):
data = (2, 3, 20, 20)
kernel = (4, 5)
pad = (0, 0)
stride = (1, 1)
test_pooling_versions_helper(pool_op_list=['pool_v1_cpu', 'pool_v1_gpu', 'pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='valid', global_pool=False)
# pool_v1 has bugs when pad is not 0, do not test PoolingV1 here
pad = (2, 3)
stride = (2, 3)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='valid', global_pool=False)
pad = (0, 0)
stride = (1, 1)
test_pooling_versions_helper(pool_op_list=['pool_v1_cpu', 'pool_v1_gpu', 'pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False)
# pool_v1 has bugs when pad is not 0, do not test PoolingV1 here
pad = (2, 3)
stride = (2, 3)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False)
test_pooling_versions_helper(pool_op_list=['pool_v1_cpu', 'pool_v1_gpu', 'pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
global_pool=True)
def test_3d_pooling(pool_type):
data = (2, 3, 20, 20, 20)
kernel = (4, 5, 3)
pad = (0, 0, 0)
stride = (1, 1, 1)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='valid', global_pool=False)
pad = (2, 3, 3)
stride = (2, 3, 1)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='valid', global_pool=False)
pad = (0, 0, 0)
stride = (1, 1, 1)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False)
pad = (2, 3, 3)
stride = (2, 3, 1)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention='full', global_pool=False)
test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
global_pool=True)
test_1d_pooling('max')
test_1d_pooling('avg')
test_1d_pooling('sum')
test_2d_pooling('max')
test_2d_pooling('avg')
test_2d_pooling('sum')
test_3d_pooling('max')
test_3d_pooling('avg')
test_3d_pooling('sum')
@with_seed()
def test_global_pooling():
def test_1d_pooling(pool_type):
data = (2, 3, 20)
kernel = (4,)
pad = (2,)
stride = (2,)
ctx_list = []
sym_list = []
pooling_convention = 'valid'
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, cudnn_off=True, name='pool'))
check_consistency(sym_list, ctx_list)
def test_2d_pooling(pool_type):
data = (2, 3, 20, 20)
kernel = (4, 4)
pad = (2, 2)
stride = (2, 2)
ctx_list = []
sym_list = []
pooling_convention = 'valid'
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, cudnn_off=True, name='pool'))
check_consistency(sym_list, ctx_list)
test_1d_pooling('max')
test_1d_pooling('avg')
test_1d_pooling('sum')
test_2d_pooling('max')
test_2d_pooling('avg')
test_2d_pooling('sum')
@with_seed()
def test_upsampling_with_type():
sym = mx.sym.UpSampling(scale=2, num_filter=2, name='up', sample_type='nearest', num_args=1)
ctx_list = [{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float64}},
{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float32}},
{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float16}},
{'ctx': mx.cpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float64}},
{'ctx': mx.cpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_upsampling_bilinear_with_type():
sym = mx.sym.UpSampling(scale=2, num_filter=2, name='up', sample_type='bilinear', num_args=1)
ctx_list = [{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float64}},
{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float32}},
{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float16}},
{'ctx': mx.cpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float64}},
{'ctx': mx.cpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_concat_with_type():
sym = mx.sym.Concat(name='concat', num_args=2)
ctx_list = [{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},
{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}},
{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float16, 'concat_arg1': np.float16}},
{'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},
{'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_elementwisesum_with_type():
dev_types = [[mx.gpu(0), [np.float64, np.float32, np.float16]],
[mx.cpu(0), [np.float64, np.float32]] ]
for num_args in range(1, 6):
ews_arg_shape = {}
for i in range(num_args):
ews_arg_shape['ews_arg'+str(i)] = (2, 10)
sym = mx.sym.ElementWiseSum(name='ews', num_args=num_args)
ctx_list = []
for dev, types in dev_types:
for dtype in types:
ews_arg_dtype = {'type_dict':{}}
for i in range(num_args):
ews_arg_dtype['type_dict']['ews_arg'+str(i)] = dtype
ctx_elem = {'ctx': dev}
ctx_elem.update(ews_arg_shape)
ctx_elem.update(ews_arg_dtype)
ctx_list.append(ctx_elem)
check_consistency(sym, ctx_list)
@with_seed()
def test_reshape_with_type():
sym = mx.sym.Reshape(name='reshape', shape=(-1,1,1,0))
ctx_list = [{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float64}},
{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float32}},
{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float16}},
{'ctx': mx.cpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float64}},
{'ctx': mx.cpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_blockgrad_with_type():
sym = mx.sym.BlockGrad(name='bg')
ctx_list = [{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float64}},
{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float32}},
{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float16}},
{'ctx': mx.cpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float64}},
{'ctx': mx.cpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_swapaxis_with_type():
sym = mx.sym.SwapAxis(name='swap', dim1=1)
ctx_list = [{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float64}},
{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float32}},
{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float16}},
{'ctx': mx.cpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float64}},
{'ctx': mx.cpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_fullyconnected_with_type():
sym = mx.sym.FullyConnected(num_hidden=3, name='inner')
ctx_list = [{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float64}},
{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float32}},
{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float16}},
{'ctx': mx.cpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float64}},
{'ctx': mx.cpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float32}}]
check_consistency(sym, ctx_list)
# Sizes are divisible by 8 to test TensorCore on Volta GPU.
sym = mx.sym.FullyConnected(num_hidden=8, name='inner')
ctx_list = [{'ctx': mx.gpu(0), 'inner_data': (16, 24), 'type_dict': {'inner_data': np.float16}},
{'ctx': mx.cpu(0), 'inner_data': (16, 24), 'type_dict': {'inner_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_activation_with_type():
sym = mx.sym.Activation(name='act', act_type='sigmoid')
ctx_list = [{'ctx': mx.gpu(0), 'act_data': (2, 2, 10, 10), 'type_dict': {'act_data': np.float64}},
{'ctx': mx.gpu(0), 'act_data': (2, 2, 10, 10), 'type_dict': {'act_data': np.float32}},
{'ctx': mx.gpu(0), 'act_data': (2, 2, 10, 10), 'type_dict': {'act_data': np.float16}},
{'ctx': mx.cpu(0), 'act_data': (2, 2, 10, 10), 'type_dict': {'act_data': np.float64}},
{'ctx': mx.cpu(0), 'act_data': (2, 2, 10, 10), 'type_dict': {'act_data': np.float32}},
{'ctx': mx.cpu(0), 'act_data': (2, 2, 10, 10), 'type_dict': {'act_data': np.float16}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_lrn():
sym = mx.sym.LRN(alpha=0.0001, beta=0.75, knorm=2, nsize=5, name='lrn')
ctx_list = [{'ctx': mx.gpu(0), 'lrn_data': (2, 6, 10, 10), 'type_dict': {'lrn_data': np.float32}},
{'ctx': mx.cpu(0), 'lrn_data': (2, 6, 10, 10), 'type_dict': {'lrn_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_embedding_with_type():
def test_embedding_helper(data_types, weight_types, low_pad, high_pad):
NVD = [[20, 10, 20], [200, 10, 300]]
for N, V, D in NVD:
sym = mx.sym.Embedding(name='embedding', input_dim=V, output_dim=D)
ctx_list = []
for data_type in data_types:
for weight_type in weight_types:
ctx_list.append({'ctx': mx.gpu(0), 'embedding_data': (N,),
'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}})
ctx_list.append({'ctx': mx.cpu(0), 'embedding_data': (N,),
'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}})
arg_params = {'embedding_data': np.random.randint(low=-low_pad, high=V+high_pad, size=(N,))}
check_consistency(sym, ctx_list, grad_req={'embedding_data': 'null','embedding_weight': 'write'},
arg_params=arg_params)
data_types = [np.float16, np.float32, np.float64, np.int32]
weight_types = [np.float16, np.float32, np.float64]
test_embedding_helper(data_types, weight_types, 5, 5)
data_types = [np.uint8]
weight_types = [np.float16, np.float32, np.float64]
test_embedding_helper(data_types, weight_types, 0, 5)
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/8288")
@with_seed()
def test_svmoutput_with_type():
sym = mx.sym.SVMOutput(name='svmoutput', use_linear=True)
ctx_list = [{'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float64}},
{'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float32}},
{'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float16}},
{'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float64}},
{'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float32}},
{'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float16}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_take_with_type():
sym = mx.sym.take(name='take')
for data_ndim in range(2, 5):
for idx_ndim in range(1, 4):
data_shape = ()
for _ in range(data_ndim):
data_shape += (np.random.randint(low=3, high=6), )
idx_shape = ()
for _ in range(idx_ndim):
idx_shape += (np.random.randint(low=3, high=5), )
ctx_list = [{'ctx': mx.gpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float64,
'take_a': np.float64}},
{'ctx': mx.gpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float32,
'take_a': np.float32}},
{'ctx': mx.gpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float16,
'take_a': np.float16}},
{'ctx': mx.cpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float64,
'take_a': np.float64}},
{'ctx': mx.cpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float32,
'take_a': np.float32}},
{'ctx': mx.cpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float16,
'take_a': np.float16}}]
arg_params = {'take_indices': np.random.randint(low=0,
high=data_shape[0],
size=idx_shape),
'take_a': np.random.normal(size=data_shape)}
check_consistency(sym, ctx_list,
grad_req={'take_indices': 'null',
'take_a': 'write'},
arg_params=arg_params)
def check_rnn_consistency(cell1, cell2):
dshape = (32, 5, 200)
data = mx.sym.Variable('data')
sym1, _ = cell1.unroll(5, data, merge_outputs=True)
mod1 = mx.mod.Module(sym1, label_names=None, context=mx.gpu(0))
mod1.bind(data_shapes=[('data', dshape)], label_shapes=None)
sym2, _ = cell2.unroll(5, data, merge_outputs=True)
mod2 = mx.mod.Module(sym2, label_names=None, context=mx.gpu(0))
mod2.bind(data_shapes=[('data', dshape)], label_shapes=None)
mod1.init_params()
args, auxs = mod1.get_params()
args = cell1.unpack_weights(args)
args = cell2.pack_weights(args)
mod2.set_params(args, auxs)
batch=mx.io.DataBatch(data=[mx.random.uniform(shape=dshape)], label=[])
mod1.forward(batch, is_train=False)
mod2.forward(batch, is_train=False)
assert_allclose(mod1.get_outputs()[0].asnumpy(), mod2.get_outputs()[0].asnumpy(), rtol=1e-2, atol=1e-4)
@with_seed()
def test_rnn():
fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='rnn_relu', prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.RNNCell(100, activation='relu', prefix='l0_'))
stack.add(mx.rnn.RNNCell(100, activation='relu', prefix='l1_'))
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
def test_lstm():
fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='lstm', prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.LSTMCell(100, prefix='l0_'))
stack.add(mx.rnn.LSTMCell(100, prefix='l1_'))
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
def test_lstm_forget_bias():
forget_bias = 2.0
fused = mx.rnn.FusedRNNCell(10, forget_bias=forget_bias, num_layers=2, mode='lstm', prefix='')
dshape = (32, 1, 20)
data = mx.sym.Variable('data')
sym, _ = fused.unroll(1, data, merge_outputs=True)
mod = mx.mod.Module(sym, label_names=None, context=mx.gpu(0))
mod.bind(data_shapes=[('data', dshape)], label_shapes=None)
mod.init_params()
args, auxs = mod.get_params()
args = fused.unpack_weights(args)
bias_name = next(x for x in args if x.endswith('f_bias'))
expected_bias = forget_bias * np.ones(10, )
assert_allclose(args[bias_name].asnumpy(), expected_bias)
@with_seed()
def test_gru():
fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='gru', prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.GRUCell(100, prefix='l0_'))
stack.add(mx.rnn.GRUCell(100, prefix='l1_'))
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
def test_bidirectional():
fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='gru', prefix='',
bidirectional=True)
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(100, prefix='l0_'),
mx.rnn.GRUCell(100, prefix='r0_'),
output_prefix='bi_gru_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(100, prefix='l1_'),
mx.rnn.GRUCell(100, prefix='r1_'),
output_prefix='bi_gru_1_'))
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
def test_unfuse():
for mode in ['rnn_tanh', 'rnn_relu', 'lstm', 'gru']:
fused = mx.rnn.FusedRNNCell(
100, num_layers=2, mode=mode,
prefix='test_%s'%mode,
bidirectional=True,
dropout=0.5)
stack = fused.unfuse()
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed(1234)
def test_psroipooling_with_type():
arg_params = {
'psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# plain psroipooling
sym = mx.sym.contrib.PSROIPooling(spatial_scale=0.0625, output_dim=2, pooled_size=3, name='psroipool')
ctx_list = [{'ctx': mx.gpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float64, 'psroipool_rois': np.float64}},
{'ctx': mx.gpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float32, 'psroipool_rois': np.float32}},
{'ctx': mx.gpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float16, 'psroipool_rois': np.float16}},
]
check_consistency(sym, ctx_list, grad_req={'psroipool_data': 'write',
'psroipool_rois': 'null'}, arg_params=arg_params)
@with_seed(1234)
def test_deformable_psroipooling_with_type():
arg_params = {
'deformable_psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# deformable psroipooling
sym = mx.sym.contrib.DeformablePSROIPooling(spatial_scale=0.0625, sample_per_part=4, group_size=3, pooled_size=3,
output_dim=2, trans_std=0.1, no_trans=False, name='deformable_psroipool')
ctx_list = [{'ctx': mx.gpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float64, 'deformable_psroipool_rois': np.float64,
'deformable_psroipool_trans': np.float64}},
{'ctx': mx.gpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float32, 'deformable_psroipool_rois': np.float32,
'deformable_psroipool_trans': np.float32}},
{'ctx': mx.gpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float16, 'deformable_psroipool_rois': np.float16,
'deformable_psroipool_trans': np.float16}},
]
check_consistency(sym, ctx_list, grad_req={'deformable_psroipool_data': 'write',
'deformable_psroipool_rois': 'null',
'deformable_psroipool_trans': 'write'}, arg_params=arg_params)
@with_seed(1234)
def test_deformable_convolution_with_type():
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), name='deformable_conv')
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 10, 10),
'deformable_conv_offset': (2, 18, 8, 8),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 10, 10),
'deformable_conv_offset': (2, 18, 8, 8),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
# {'ctx': mx.gpu(0),
# 'deformable_conv_data': (2, 2, 10, 10),
# 'deformable_conv_offset': (2, 18, 8, 8),
# 'type_dict': {'deformable_conv_data': np.float16, 'deformable_conv_offset': np.float16}},
]
# wider tolerance needed for true-fp16 NCHW test above
tol = {np.dtype(np.float16): 0.5,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, tol=tol)
# test ability to turn off training on bias
check_consistency(sym, ctx_list, grad_req={'deformable_conv_data': 'write',
'deformable_conv_offset': 'write',
'deformable_conv_weight': 'write',
'deformable_conv_bias': 'null'}, tol=tol)
@with_seed()
def test_deformable_convolution_options():
# 2D convolution
# Pad > 0
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 7, 7),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 7, 7),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
# {'ctx': mx.gpu(0),
# 'deformable_conv_data': (2, 2, 7, 7),
# 'deformable_offset': (2, 18, 7, 7),
# 'type_dict': {'deformable_conv_data': np.float16, 'deformable_offset': np.float16}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), pad=(1,1), name='deformable_conv')
check_consistency(sym, ctx_list)
# Stride > 1
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
# {'ctx': mx.gpu(0),
# 'deformable_conv_data': (2, 2, 7, 7),
# 'deformable_conv_offset': (2, 18, 3, 3),
# 'type_dict': {'deformable_conv_data': np.float16, 'deformable_offset': np.float16}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), stride=(2,2), name='deformable_conv')
check_consistency(sym, ctx_list)
# Dilate > 1
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
# {'ctx': mx.gpu(0),
# 'deformable_conv_data': (2, 2, 7, 7),
# 'deformable_conv_offset': (2, 18, 3, 3),
# 'type_dict': {'deformable_conv_data': np.float16, 'deformable_offset': np.float16}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), dilate=(2,2), name='deformable_conv')
check_consistency(sym, ctx_list)
# Deformable group > 1
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 36, 5, 5),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 36, 5, 5),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
# {'ctx': mx.gpu(0),
# 'deformable_conv_data': (2, 2, 7, 7),
# 'deformable_conv_offset': (2, 36, 5, 5),
# 'type_dict': {'deformable_conv_data': np.float16, 'deformable_offset': np.float16}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=4, kernel=(3,3), num_deformable_group=2,
name='deformable_conv')
@with_seed()
def test_residual_fused():
cell = mx.rnn.ResidualCell(
mx.rnn.FusedRNNCell(50, num_layers=3, mode='lstm',
prefix='rnn_', dropout=0.5))
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(2)]
outputs, _ = cell.unroll(2, inputs, merge_outputs=None)
assert sorted(cell.params._params.keys()) == \
['rnn_parameters']
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10, 50), rnn_t1_data=(10, 50))
assert outs == [(10, 2, 50)]
outputs = outputs.eval(ctx=mx.gpu(0),
rnn_t0_data=mx.nd.ones((10, 50), ctx=mx.gpu(0))+5,
rnn_t1_data=mx.nd.ones((10, 50), ctx=mx.gpu(0))+5,
rnn_parameters=mx.nd.zeros((61200,), ctx=mx.gpu(0)))
expected_outputs = np.ones((10, 2, 50))+5
assert np.array_equal(outputs[0].asnumpy(), expected_outputs)
def check_rnn_layer(layer):
layer.collect_params().initialize(ctx=[mx.cpu(0), mx.gpu(0)])
with mx.gpu(0):
x = mx.nd.ones((10, 16, 30))
states = layer.begin_state(16)
go, gs = layer(x, states)
with mx.cpu(0):
x = mx.nd.ones((10, 16, 30))
states = layer.begin_state(16)
co, cs = layer(x, states)
# atol of 1e-6 required, as exposed by seed 2124685726
assert_almost_equal(go.asnumpy(), co.asnumpy(), rtol=1e-2, atol=1e-6)
for g, c in zip(gs, cs):
assert_almost_equal(g.asnumpy(), c.asnumpy(), rtol=1e-2, atol=1e-6)
def check_rnn_layer_w_rand_inputs(layer):
layer.collect_params().initialize(ctx=[mx.cpu(0), mx.gpu(0)])
x = mx.nd.uniform(shape=(10, 16, 30))
with mx.gpu(0):
x = x.copyto(mx.gpu(0))
states = layer.begin_state(16)
go, gs = layer(x, states)
with mx.cpu(0):
x = x.copyto(mx.cpu(0))
states = layer.begin_state(16)
co, cs = layer(x, states)
assert_almost_equal(go.asnumpy(), co.asnumpy(), rtol=1e-2, atol=1e-6)
for g, c in zip(gs, cs):
assert_almost_equal(g.asnumpy(), c.asnumpy(), rtol=1e-2, atol=1e-6)
@with_seed()
def test_rnn_layer():
check_rnn_layer(gluon.rnn.RNN(100, num_layers=3))
check_rnn_layer(gluon.rnn.RNN(100, activation='tanh', num_layers=3))
check_rnn_layer(gluon.rnn.LSTM(100, num_layers=3))
check_rnn_layer(gluon.rnn.GRU(100, num_layers=3))
check_rnn_layer(gluon.rnn.LSTM(100, num_layers=3, bidirectional=True))
check_rnn_layer_w_rand_inputs(gluon.rnn.LSTM(100, num_layers=3, bidirectional=True))
@with_seed()
def test_sequence_reverse():
check_sequence_reverse(mx.gpu(0))
@unittest.skip("Test fails intermittently. Temporarily disabled until fixed. Tracked at https://github.com/apache/incubator-mxnet/issues/8211")
@with_seed()
def test_autograd_save_memory():
x = mx.nd.zeros((128, 512, 512), ctx=mx.gpu(0))
x.attach_grad()
with mx.autograd.record():
for i in range(200):
x = x + 1
x.wait_to_read()
x.backward()
@with_seed()
def test_gluon_ctc_consistency():
loss = mx.gluon.loss.CTCLoss()
data = mx.nd.arange(0, 4, repeat=40, ctx=mx.gpu(0)).reshape((2,20,4)).flip(axis=0)
cpu_label = mx.nd.array([[2,1,-1,-1],[3,2,2,-1]], ctx=mx.cpu(0))
gpu_label = mx.nd.array([[2,1,-1,-1],[3,2,2,-1]], ctx=mx.gpu(0))
cpu_data = data.copy().as_in_context(mx.cpu(0))
cpu_data.attach_grad()
with mx.autograd.record():
l_cpu = loss(cpu_data, cpu_label)
l_cpu.backward()
gpu_data = data.copyto(mx.gpu(0))
gpu_data.attach_grad()
with mx.autograd.record():
l_gpu = loss(gpu_data, gpu_label)
l_gpu.backward()
assert_almost_equal(cpu_data.grad.asnumpy(), gpu_data.grad.asnumpy(), atol=1e-3, rtol=1e-3)
@with_seed()
def test_cuda_rtc():
source = r'''
extern "C" __global__ void axpy(const float *x, float *y, float alpha) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
y[i] += alpha * x[i];
}
extern "C" __global__ void saxpy(const float *x, float *y, float alpha) {
extern __shared__ float smem[];
int i = threadIdx.x + blockIdx.x * blockDim.x;
smem[threadIdx.x] = x[i];
y[i] += alpha * smem[threadIdx.x];
}
'''
module = mx.rtc.CudaModule(source)
axpy = module.get_kernel("axpy", "const float *x, float *y, float alpha")
x = mx.nd.ones((10,), ctx=mx.gpu(0))
y = mx.nd.zeros((10,), ctx=mx.gpu(0))
axpy.launch([x, y, 3.0], mx.gpu(0), (1, 1, 1), (10, 1, 1))
assert (y.asnumpy() == 3).all()
saxpy = module.get_kernel("saxpy", "const float *x, float *y, float alpha")
saxpy.launch([x, y, 4.0], mx.gpu(0), (1, 1, 1), (10, 1, 1), 10)
assert (y.asnumpy() == 7).all()
saxpy.launch([x, y, 5.0], mx.gpu(0), (2, 1, 1), (5, 1, 1), 5)
assert (y.asnumpy() == 12).all()
@with_seed()
def test_global_norm_clip_multi_device():
x1 = mx.nd.ones((3,3), ctx=mx.gpu(0))
x2 = mx.nd.ones((4,4), ctx=mx.cpu(0))
norm = gluon.utils.clip_global_norm([x1, x2], 1.0)
assert norm == 5.0
assert_almost_equal(x1.asnumpy(), np.ones((3,3))/5)
assert_almost_equal(x2.asnumpy(), np.ones((4,4))/5)
@with_seed()
def test_cross_device_autograd():
x = mx.nd.random.uniform(shape=(10,))
x.attach_grad()
with mx.autograd.record():
y = mx.nd.tanh(x)
y = y.copyto(mx.gpu(0))
y = mx.nd.tanh(y)
y = y.copyto(mx.cpu(0))
y = mx.nd.tanh(y)
y = y.copyto(mx.gpu(0))
y = y.copyto(mx.gpu(0))
y.backward()
dx = x.grad.asnumpy()
x.grad[:] = 0
with mx.autograd.record():
y = x
for i in range(3):
y = mx.nd.tanh(y)
y.backward()
assert_almost_equal(dx, x.grad.asnumpy())
# The following 2 functions launch 0-thread kernels, an error that should be caught and signaled.
def kernel_error_check_imperative():
os.environ['MXNET_ENGINE_TYPE'] = 'NaiveEngine'
a = mx.nd.array([1,2,3],ctx=mx.gpu(0))
b = mx.nd.array([],ctx=mx.gpu(0))
c = (a / b).asnumpy()
def kernel_error_check_symbolic():
os.environ['MXNET_ENGINE_TYPE'] = 'NaiveEngine'
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
c = a / b
f = c.bind(mx.gpu(0), { 'a':mx.nd.array([1,2,3],ctx=mx.gpu(0)),
'b':mx.nd.array([],ctx=mx.gpu(0))})
f.forward()
g = f.outputs[0].asnumpy()
def test_kernel_error_checking():
# Running tests that may throw exceptions out of worker threads will stop CI testing
# if not run in a separate process (with its own address space for CUDA compatibility).
try:
mpctx = mp.get_context('spawn')
except:
print('SKIP: python%s.%s lacks the required process fork-exec support ... ' %
sys.version_info[0:2], file=sys.stderr, end='')
else:
with discard_stderr():
for f in [kernel_error_check_imperative, kernel_error_check_symbolic]:
p = mpctx.Process(target=f)
p.start()
p.join()
assert p.exitcode != 0,\
"Expected a synchronous kernel error from %s(), none seen." % f.__name__
if __name__ == '__main__':
import nose
nose.runmodule()
|
callbacks_test.py
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras callbacks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import json
import os
import re
import shutil
import sys
import threading
import time
import unittest
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule
from tensorflow.python.keras.utils import np_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import save_options as save_options_lib
from tensorflow.python.training import adam
from tensorflow.python.training.saving import checkpoint_options as checkpoint_options_lib
from tensorflow.python import ipu
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
TRAIN_SAMPLES = 10
TEST_SAMPLES = 10
NUM_CLASSES = 2
INPUT_DIM = 3
NUM_HIDDEN = 5
BATCH_SIZE = 5
class Counter(keras.callbacks.Callback):
"""Counts the number of times each callback method was run.
Attributes:
method_counts: dict. Contains the counts of time each callback method was
run.
"""
def __init__(self): # pylint: disable=super-init-not-called
self.method_counts = collections.defaultdict(int)
methods_to_count = [
'on_batch_begin', 'on_batch_end', 'on_epoch_begin', 'on_epoch_end',
'on_predict_batch_begin', 'on_predict_batch_end', 'on_predict_begin',
'on_predict_end', 'on_test_batch_begin', 'on_test_batch_end',
'on_test_begin', 'on_test_end', 'on_train_batch_begin',
'on_train_batch_end', 'on_train_begin', 'on_train_end'
]
for method_name in methods_to_count:
setattr(self, method_name,
self.wrap_with_counts(method_name, getattr(self, method_name)))
def wrap_with_counts(self, method_name, method):
def _call_and_count(*args, **kwargs):
self.method_counts[method_name] += 1
return method(*args, **kwargs)
return _call_and_count
def _get_numpy():
return np.ones((10, 10)), np.ones((10, 1))
@keras_parameterized.run_with_all_model_types(exclude_models='subclass')
@keras_parameterized.run_all_keras_modes(always_skip_eager=True,
always_skip_v1=True)
class CallbackCountsTest(keras_parameterized.TestCase):
def setUp(self):
super(CallbackCountsTest, self).setUp()
cfg = ipu.config.IPUConfig()
cfg.auto_select_ipus = 1
cfg.ipu_model.compile_ipu_code = False
cfg.ipu_model.tiles_per_ipu = 1
cfg.configure_ipu_system()
self._ipu_strategy = ipu.ipu_strategy.IPUStrategyV1()
self._ipu_strategy_scope = self._ipu_strategy.scope()
self._ipu_strategy_scope.__enter__()
def tearDown(self):
self._ipu_strategy_scope.__exit__(None, None, None)
super(CallbackCountsTest, self).tearDown()
def _check_counts(self, counter, expected_counts):
"""Checks that the counts registered by `counter` are those expected."""
for method_name, expected_count in expected_counts.items():
self.assertEqual(counter.method_counts[method_name],
expected_count,
msg='For method {}: expected {}, got: {}'.format(
method_name, expected_count,
counter.method_counts[method_name]))
def _get_model(self):
layers = [
keras.layers.Dense(10, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10,))
model.compile(adam.AdamOptimizer(0.001),
'binary_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
return model
def test_callback_hooks_are_called_in_evaluate(self):
x, y = _get_numpy()
model = self._get_model()
counter = Counter()
model.evaluate(x, y, batch_size=2, steps=5, callbacks=[counter])
self._check_counts(
counter, {
'on_test_batch_begin': 5,
'on_test_batch_end': 5,
'on_test_begin': 1,
'on_test_end': 1
})
def test_callback_hooks_are_called_in_predict(self):
x = _get_numpy()[0]
model = self._get_model()
counter = Counter()
model.predict(x, batch_size=2, steps=5, callbacks=[counter])
self._check_counts(
counter, {
'on_predict_batch_begin': 5,
'on_predict_batch_end': 5,
'on_predict_begin': 1,
'on_predict_end': 1
})
def test_callback_list_methods(self):
counter = Counter()
callback_list = keras.callbacks.CallbackList([counter])
batch = 0
callback_list.on_test_batch_begin(batch)
callback_list.on_test_batch_end(batch)
callback_list.on_predict_batch_begin(batch)
callback_list.on_predict_batch_end(batch)
self._check_counts(
counter, {
'on_test_batch_begin': 1,
'on_test_batch_end': 1,
'on_predict_batch_begin': 1,
'on_predict_batch_end': 1
})
class KerasCallbacksTest(keras_parameterized.TestCase):
def setUp(self):
super(KerasCallbacksTest, self).setUp()
cfg = ipu.config.IPUConfig()
cfg.auto_select_ipus = 1
cfg.ipu_model.compile_ipu_code = False
cfg.ipu_model.tiles_per_ipu = 1
cfg.configure_ipu_system()
self._ipu_strategy = ipu.ipu_strategy.IPUStrategyV1()
self._ipu_strategy_scope = self._ipu_strategy.scope()
self._ipu_strategy_scope.__enter__()
def tearDown(self):
self._ipu_strategy_scope.__exit__(None, None, None)
super(KerasCallbacksTest, self).tearDown()
def _get_model(self, input_shape=None):
layers = [
keras.layers.Dense(3, activation='relu'),
keras.layers.Dense(2, activation='softmax')
]
model = testing_utils.get_model_from_layers(layers,
input_shape=input_shape)
model.compile(loss='mse',
optimizer='rmsprop',
metrics=[keras.metrics.CategoricalAccuracy(name='my_acc')],
run_eagerly=testing_utils.should_run_eagerly())
return model
@keras_parameterized.run_with_all_model_types(exclude_models='subclass')
@keras_parameterized.run_all_keras_modes(always_skip_eager=True,
always_skip_v1=True)
def test_progbar_logging(self):
model = self._get_model(input_shape=(3,))
x = array_ops.ones((200, 3))
y = array_ops.zeros((200, 2))
dataset = dataset_ops.Dataset.from_tensor_slices(
(x, y)).batch(10, drop_remainder=True)
expected_log = r'(.*- loss:.*- my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(dataset, epochs=2, steps_per_epoch=10)
self.assertRegex(printed.contents(), expected_log)
@keras_parameterized.run_all_keras_modes(always_skip_eager=True,
always_skip_v1=True)
def test_callback_warning(self):
class SleepCallback(keras.callbacks.Callback):
def on_train_batch_end(self, batch, logs=None):
time.sleep(0.1)
model = sequential.Sequential()
model.add(keras.layers.Dense(1))
model.compile('sgd',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
warning_messages = []
def warning(msg):
warning_messages.append(msg)
with test.mock.patch.object(logging, 'warning', warning):
model.fit(np.ones((16, 1), 'float32'),
np.ones((16, 1), 'float32'),
batch_size=1,
epochs=1,
callbacks=[SleepCallback()])
warning_msg = ('Callback method `on_train_batch_end` is slow compared '
'to the batch time')
self.assertIn(warning_msg, '\n'.join(warning_messages))
@keras_parameterized.run_all_keras_modes(always_skip_eager=True,
always_skip_v1=True)
def test_default_callbacks_no_warning(self):
# Test that without the callback no warning is raised
model = sequential.Sequential()
model.add(keras.layers.Dense(1))
model.compile('sgd',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
warning_messages = []
def warning(msg):
warning_messages.append(msg)
with test.mock.patch.object(logging, 'warning', warning):
model.fit(np.ones((16, 1), 'float32'),
np.ones((16, 1), 'float32'),
batch_size=1,
epochs=1)
self.assertListEqual(warning_messages, [])
@keras_parameterized.run_with_all_model_types(
exclude_models=['subclass', 'functional'])
@keras_parameterized.run_all_keras_modes(always_skip_eager=True,
always_skip_v1=True)
def test_progbar_logging_deferred_model_build(self):
model = self._get_model()
self.assertFalse(model.built)
x = array_ops.ones((200, 3))
y = array_ops.zeros((200, 2))
dataset = dataset_ops.Dataset.from_tensor_slices(
(x, y)).batch(10, drop_remainder=True)
expected_log = r'(.*- loss:.*- my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(dataset, epochs=2, steps_per_epoch=10)
self.assertRegex(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types(exclude_models='subclass')
@keras_parameterized.run_all_keras_modes(always_skip_eager=True,
always_skip_v1=True)
def test_progbar_logging_validation_data(self):
model = self._get_model(input_shape=(3,))
x = array_ops.ones((50, 3))
y = array_ops.zeros((50, 2))
training_dataset = dataset_ops.Dataset.from_tensor_slices(
(x, y)).batch(10, drop_remainder=True)
val_dataset = dataset_ops.Dataset.from_tensor_slices(
(x, y)).batch(10, drop_remainder=True)
expected_log = r'(.*5/5.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(training_dataset, epochs=2, validation_data=val_dataset)
self.assertRegex(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types(exclude_models='subclass')
@keras_parameterized.run_all_keras_modes(always_skip_eager=True,
always_skip_v1=True)
def test_progbar_logging_validation_split(self):
model = self._get_model(input_shape=(3,))
x = np.ones((100, 3))
y = np.zeros((100, 2))
expected_log = (
r'(?s).*1/2.*8/8.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:'
r'.*2/2.*8/8.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*')
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(x, y, batch_size=10, epochs=2, validation_split=0.2)
self.assertRegex(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types(exclude_models='subclass')
@keras_parameterized.run_all_keras_modes(always_skip_eager=True,
always_skip_v1=True)
def test_progbar_logging_training_validation(self):
model = self._get_model(input_shape=(2,))
def generator():
for _ in range(100):
yield [1, 1], 1
training = dataset_ops.Dataset \
.from_generator(
generator=generator,
output_types=('float64', 'float64'),
output_shapes=([2], [])) \
.batch(2, drop_remainder=True) \
.repeat()
validation = dataset_ops.Dataset \
.from_generator(
generator=generator,
output_types=('float64', 'float64'),
output_shapes=([2], [])) \
.batch(2, drop_remainder=True)
expected_log = (
r'(?s).*1/2.*20/20.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:'
r'.*2/2.*20/20.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*')
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(x=training,
validation_data=validation,
epochs=2,
steps_per_epoch=20,
validation_steps=10)
self.assertRegex(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types(exclude_models='subclass')
def test_ModelCheckpoint(self):
if h5py is None:
return # Skip test if models cannot be saved.
layers = [
keras.layers.Dense(NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'),
keras.layers.Dense(NUM_CLASSES, activation='softmax')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10,))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['acc'])
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'checkpoint.h5')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
# case 1
monitor = 'val_loss'
save_best_only = False
mode = 'auto'
model = keras.models.Sequential()
model.add(
keras.layers.Dense(NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['acc'])
cbks = [
keras.callbacks.ModelCheckpoint(filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 2
mode = 'min'
cbks = [
keras.callbacks.ModelCheckpoint(filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 3
mode = 'max'
monitor = 'val_acc'
cbks = [
keras.callbacks.ModelCheckpoint(filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 4
save_best_only = True
cbks = [
keras.callbacks.ModelCheckpoint(filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# Case: metric not available.
cbks = [
keras.callbacks.ModelCheckpoint(filepath,
monitor='unknown',
save_best_only=True)
]
model.fit(x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# File won't be written.
assert not os.path.exists(filepath)
# case 5
save_best_only = False
period = 2
mode = 'auto'
filepath = os.path.join(temp_dir, 'checkpoint.{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
period=period)
]
model.fit(x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=4,
verbose=1)
assert os.path.exists(filepath.format(epoch=2))
assert os.path.exists(filepath.format(epoch=4))
os.remove(filepath.format(epoch=2))
os.remove(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=3))
# Invalid use: this will raise a warning but not an Exception.
keras.callbacks.ModelCheckpoint(filepath,
monitor=monitor,
save_best_only=save_best_only,
mode='unknown')
# Case 6: `ModelCheckpoint` with a combination of `save_freq` and `period`.
# Though `period` is deprecated, we're testing it for
# backward-compatibility.
filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(filepath,
monitor=monitor,
mode=mode,
save_freq='epoch',
period=5)
]
assert not os.path.exists(filepath.format(epoch=0))
assert not os.path.exists(filepath.format(epoch=5))
model.fit(x_train,
y_train,
batch_size=2,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=10,
verbose=1)
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=2))
assert not os.path.exists(filepath.format(epoch=3))
assert not os.path.exists(filepath.format(epoch=4))
assert os.path.exists(filepath.format(epoch=5))
assert not os.path.exists(filepath.format(epoch=6))
assert os.path.exists(filepath.format(epoch=10))
os.remove(filepath.format(epoch=5))
os.remove(filepath.format(epoch=10))
# Case 7: `ModelCheckpoint` with an integer `save_freq`
filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq=15,
period=100) # The period should be ignored (this test tests this).
]
assert not os.path.exists(filepath.format(epoch=3))
model.fit(x_train,
y_train,
batch_size=2,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=10,
verbose=1)
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=2))
assert os.path.exists(filepath.format(epoch=3))
assert not os.path.exists(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=5))
assert os.path.exists(filepath.format(epoch=6))
assert not os.path.exists(filepath.format(epoch=7))
assert not os.path.exists(filepath.format(epoch=8))
assert os.path.exists(filepath.format(epoch=9))
os.remove(filepath.format(epoch=3))
os.remove(filepath.format(epoch=6))
os.remove(filepath.format(epoch=9))
# Case 8: `ModelCheckpoint` with valid and invalid save_freq argument.
with self.assertRaisesRegex(ValueError, 'Unrecognized save_freq'):
keras.callbacks.ModelCheckpoint(filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq='invalid_save_freq')
# The following should not raise ValueError.
keras.callbacks.ModelCheckpoint(filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq='epoch')
keras.callbacks.ModelCheckpoint(filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq=3)
# Case 9: `ModelCheckpoint` with valid and invalid `options` argument.
with self.assertRaisesRegex(TypeError, 'tf.train.CheckpointOptions'):
keras.callbacks.ModelCheckpoint(filepath,
monitor=monitor,
save_best_only=save_best_only,
save_weights_only=True,
mode=mode,
options=save_options_lib.SaveOptions())
with self.assertRaisesRegex(TypeError, 'tf.saved_model.SaveOptions'):
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
save_weights_only=False,
mode=mode,
options=checkpoint_options_lib.CheckpointOptions())
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
save_weights_only=True,
mode=mode,
options=checkpoint_options_lib.CheckpointOptions())
keras.callbacks.ModelCheckpoint(filepath,
monitor=monitor,
save_best_only=save_best_only,
save_weights_only=False,
mode=mode,
options=save_options_lib.SaveOptions())
def _get_dummy_resource_for_model_checkpoint_testing(self):
def get_input_datasets():
# Simple training input.
train_input = [[1.]] * 16
train_label = [[0.]] * 16
ds = dataset_ops.Dataset.from_tensor_slices((train_input, train_label))
return ds.batch(8, drop_remainder=True)
# Very simple bias model to eliminate randomness.
optimizer = gradient_descent.SGD(0.1)
model = sequential.Sequential()
model.add(testing_utils.Bias(input_shape=(1,)))
model.compile(loss='mae', optimizer=optimizer, metrics=['mae'])
train_ds = get_input_datasets()
temp_dir = self.get_temp_dir()
filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')
# The filepath shouldn't exist at the beginning.
self.assertFalse(os.path.exists(filepath))
callback = keras.callbacks.ModelCheckpoint(filepath=filepath,
save_weights_only=True)
return model, train_ds, callback, filepath
def _run_load_weights_on_restart_test_common_iterations(self):
(model, train_ds, callback,
filepath) = self._get_dummy_resource_for_model_checkpoint_testing()
initial_epochs = 3
model.fit(train_ds, epochs=initial_epochs, callbacks=[callback])
# The files should exist after fitting with callback.
for epoch in range(initial_epochs):
self.assertTrue(os.path.exists(filepath.format(epoch=epoch + 1)))
self.assertFalse(os.path.exists(filepath.format(epoch=initial_epochs + 1)))
self.assertEqual(
callback._get_most_recently_modified_file_matching_pattern(filepath), # pylint: disable=protected-access
filepath.format(epoch=initial_epochs))
model.fit(train_ds, epochs=1)
weights_after_one_more_epoch = model.get_weights()
# The filepath should continue to exist after fitting without callback.
for epoch in range(initial_epochs):
self.assertTrue(os.path.exists(filepath.format(epoch=epoch + 1)))
return model, train_ds, filepath, weights_after_one_more_epoch
@staticmethod
def get_ModelCheckpoint_load_weights_on_restart_true_test(save_weights_only):
def func(self):
(model, train_ds, filepath, weights_after_one_more_epoch) = \
self._run_load_weights_on_restart_test_common_iterations() # pylint: disable=protected-access
# Sleep for some short time period ensuring the files are created with
# a different time (in MacOS OSS the granularity is only 1 second).
time.sleep(2)
callback = keras.callbacks.ModelCheckpoint(
filepath=filepath,
save_weights_only=save_weights_only,
load_weights_on_restart=True)
model.fit(train_ds, epochs=1, callbacks=[callback])
weights_after_model_restoring_and_one_more_epoch = model.get_weights()
self.assertEqual(
callback._get_most_recently_modified_file_matching_pattern(filepath), # pylint: disable=protected-access
filepath.format(epoch=1))
model.fit(train_ds,
epochs=1,
callbacks=[
keras.callbacks.ModelCheckpoint(
filepath=filepath,
save_weights_only=save_weights_only,
load_weights_on_restart=True)
])
weights_with_one_final_extra_epoch = model.get_weights()
# Asserting the weights one epoch after initial fitting and another epoch
# after that are closed, if a ModelCheckpoint with
# load_weights_on_restart=True is given (so the model is restored at the
# beginning of training).
self.assertAllClose(weights_after_one_more_epoch,
weights_after_model_restoring_and_one_more_epoch)
self.assertNotAllClose(weights_after_one_more_epoch,
weights_with_one_final_extra_epoch)
return func
@staticmethod
def get_ModelCheckpoint_load_weights_on_restart_false_test(
save_weights_only):
def func(self):
(model, train_ds, filepath, weights_after_one_more_epoch) = \
self._run_load_weights_on_restart_test_common_iterations() # pylint: disable=protected-access
model.fit(train_ds,
epochs=1,
callbacks=[
keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=save_weights_only)
])
weights_after_model_restoring_and_one_more_epoch = model.get_weights()
# Asserting the weights one epoch after initial fitting and another epoch
# after that are different, if a ModelCheckpoint with
# load_weights_on_restart=False is given (so the model is not restored at
# the beginning of training).
self.assertNotAllClose(weights_after_one_more_epoch,
weights_after_model_restoring_and_one_more_epoch)
return func
test_model_checkpoint_load_weights_on_restart_true_save_weights_only_true = \
get_ModelCheckpoint_load_weights_on_restart_true_test.__func__(True)
test_model_checkpoint_load_weights_on_restart_true_save_weights_only_false = \
get_ModelCheckpoint_load_weights_on_restart_true_test.__func__(False)
test_model_checkpoint_load_weights_on_restart_false_save_weights_only_true = \
get_ModelCheckpoint_load_weights_on_restart_false_test.__func__(True)
test_model_checkpoint_load_weights_on_restart_false_save_weights_only_false \
= get_ModelCheckpoint_load_weights_on_restart_false_test.__func__(False)
def test_ModelCheckpoint_override_if_file_exist(self):
(model, train_ds, filepath, _) = \
self._run_load_weights_on_restart_test_common_iterations() # pylint: disable=protected-access
# Sleep for some short time period to ensure the files are created with
# a different time (in MacOS OSS the granularity is only 1 second).
time.sleep(2)
callback = keras.callbacks.ModelCheckpoint(filepath=filepath,
save_weights_only=True)
model.load_weights(
callback._get_most_recently_modified_file_matching_pattern(filepath)) # pylint: disable=protected-access
weights_before_additional_fit = model.get_weights()
model.fit(train_ds, epochs=1, callbacks=[callback])
model.load_weights(
callback._get_most_recently_modified_file_matching_pattern(filepath)) # pylint: disable=protected-access
weights_after_additional_fit = model.get_weights()
self.assertNotAllClose(weights_before_additional_fit,
weights_after_additional_fit)
def test_fit_with_ModelCheckpoint_with_tf_config(self):
(model, train_ds, callback,
_) = self._get_dummy_resource_for_model_checkpoint_testing()
os.environ['TF_CONFIG'] = json.dumps({
'cluster': {
'worker': ['localhost:23333']
},
'task': {
'type': 'worker',
'index': 0
}
})
# `model.fit()` should work regardless of the presence of `TF_CONFIG`.
model.fit(train_ds, epochs=1, callbacks=[callback])
def test_fit_with_ModelCheckpoint_with_dir_as_h5_filepath(self):
(model, train_ds, callback,
filepath) = self._get_dummy_resource_for_model_checkpoint_testing()
temp_dir = self.get_temp_dir()
filepath = os.path.join(temp_dir, 'temp.h5')
self.assertFalse(os.path.exists(filepath))
os.mkdir(filepath)
self.assertTrue(os.path.exists(filepath))
callback = keras.callbacks.ModelCheckpoint(filepath=filepath)
with self.assertRaisesRegex(
IOError, 'Please specify a non-directory '
'filepath for ModelCheckpoint.'):
model.fit(train_ds, epochs=1, callbacks=[callback])
def test_ModelCheckpoint_with_bad_path_placeholders(self):
(model, train_ds, callback,
filepath) = self._get_dummy_resource_for_model_checkpoint_testing()
temp_dir = self.get_temp_dir()
filepath = os.path.join(temp_dir, 'chkpt_{epoch:02d}_{mape:.2f}.h5')
callback = keras.callbacks.ModelCheckpoint(filepath=filepath)
with self.assertRaisesRegex(KeyError, 'Failed to format this callback '
'filepath.*'):
model.fit(train_ds, epochs=1, callbacks=[callback])
def test_ModelCheckpoint_nonblocking(self):
filepath = self.get_temp_dir()
# Should only cause a sync block when saving is actually performed.
callback = keras.callbacks.ModelCheckpoint(filepath=filepath,
save_freq=100)
self.assertTrue(callback._supports_tf_logs) # pylint: disable=protected-access
model = keras.Sequential([keras.layers.Dense(1)])
cb_list = keras.callbacks.CallbackList([callback],
model=model,
epochs=1,
steps=10,
verbose=0)
tensor = ops.convert_to_tensor_v2_with_dispatch(1.)
def mock_numpy():
raise RuntimeError(
'If this error is seen, ModelCheckpoint is causing a blocking '
'NumPy conversion even when not checkpointing.')
tensor.numpy = mock_numpy
logs = {'metric': tensor}
cb_list.on_train_begin(logs)
cb_list.on_epoch_begin(0, logs)
cb_list.on_train_batch_begin(0, logs)
cb_list.on_train_batch_end(0, logs)
cb_list.on_epoch_end(0, logs)
cb_list.on_train_end(logs)
cb_list.on_test_begin(logs)
cb_list.on_test_batch_begin(0, logs)
cb_list.on_test_batch_end(0, logs)
cb_list.on_test_end(logs)
cb_list.on_predict_begin(logs)
cb_list.on_predict_batch_begin(logs)
cb_list.on_predict_batch_end(logs)
cb_list.on_predict_end(logs)
def test_ProgbarLogger_verbose_2_nonblocking(self):
# Should only cause a sync block on epoch end methods.
callback = keras.callbacks.ProgbarLogger(count_mode='steps')
self.assertTrue(callback._supports_tf_logs) # pylint: disable=protected-access
model = keras.Sequential([keras.layers.Dense(1)])
cb_list = keras.callbacks.CallbackList([callback],
model=model,
epochs=1,
steps=10,
verbose=2)
tensor = ops.convert_to_tensor_v2_with_dispatch(1.)
def mock_numpy():
raise RuntimeError(
'If this error is seen, ModelCheckpoint is causing a blocking '
'NumPy conversion even when not checkpointing.')
tensor.numpy = mock_numpy
logs = {'metric': tensor}
cb_list.on_train_begin(logs)
cb_list.on_epoch_begin(0, logs)
cb_list.on_train_batch_begin(0, logs)
cb_list.on_train_batch_end(0, logs)
cb_list.on_test_begin(logs)
cb_list.on_test_batch_begin(0, logs)
cb_list.on_test_batch_end(0, logs)
cb_list.on_test_end(logs)
with self.assertRaisesRegex(RuntimeError, 'NumPy conversion'):
# on_epoch_end should still block.
cb_list.on_epoch_end(0, logs)
cb_list.on_train_end(logs)
def test_EarlyStopping(self):
with self.cached_session():
np.random.seed(123)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(num_hidden=NUM_HIDDEN,
num_classes=NUM_CLASSES,
input_dim=INPUT_DIM)
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['acc'])
cases = [('max', 'val_acc'), ('min', 'val_loss'), ('auto', 'val_acc'),
('auto', 'loss'), ('unknown', 'unknown')]
for mode, monitor in cases:
patience = 0
cbks = [
keras.callbacks.EarlyStopping(patience=patience,
monitor=monitor,
mode=mode)
]
model.fit(x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
def test_EarlyStopping_reuse(self):
with self.cached_session():
np.random.seed(1337)
patience = 3
data = np.random.random((128, 1))
labels = np.where(data > 0.5, 1, 0).astype(np.int32)
model = keras.models.Sequential((
keras.layers.Dense(1, input_dim=1, activation='relu'),
keras.layers.Dense(1, activation='sigmoid'),
))
model.compile(optimizer='sgd',
loss='binary_crossentropy',
metrics=['accuracy'])
weights = model.get_weights()
# This should allow training to go for at least `patience` epochs
model.set_weights(weights)
stopper = keras.callbacks.EarlyStopping(monitor='accuracy',
patience=patience)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_with_baseline(self):
with self.cached_session():
np.random.seed(1337)
baseline = 0.6
(data, labels), _ = testing_utils.get_test_data(train_samples=128,
test_samples=50,
input_shape=(1,),
num_classes=NUM_CLASSES)
labels = labels.astype(np.int32)
model = testing_utils.get_small_sequential_mlp(num_hidden=1,
num_classes=1,
input_dim=1)
model.compile(optimizer='sgd',
loss='binary_crossentropy',
metrics=['acc'])
stopper = keras.callbacks.EarlyStopping(monitor='acc', baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) == 1
patience = 3
stopper = keras.callbacks.EarlyStopping(monitor='acc',
patience=patience,
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_LearningRateScheduler(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(num_hidden=NUM_HIDDEN,
num_classes=NUM_CLASSES,
input_dim=INPUT_DIM)
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [keras.callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
model.fit(x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
assert (float(keras.backend.get_value(model.optimizer.lr)) -
0.2) < keras.backend.epsilon()
cbks = [keras.callbacks.LearningRateScheduler(lambda x, lr: lr / 2)]
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.fit(x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
assert (float(keras.backend.get_value(model.optimizer.lr)) -
0.01 / 4) < keras.backend.epsilon()
cbks = [
keras.callbacks.LearningRateScheduler(
lambda epoch, _: learning_rate_schedule.CosineDecay(0.01, 2)
(epoch))
]
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.fit(x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
cosine_decay_np = 0.5 * (1 + np.cos(np.pi * (1 / 2)))
decayed_learning_rate = 0.01 * cosine_decay_np
assert (float(keras.backend.get_value(model.optimizer.lr)) -
decayed_learning_rate) < keras.backend.epsilon()
def test_ReduceLROnPlateau(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def make_model():
random_seed.set_random_seed(1234)
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(num_hidden=NUM_HIDDEN,
num_classes=NUM_CLASSES,
input_dim=INPUT_DIM)
model.compile(loss='categorical_crossentropy',
optimizer=gradient_descent.SGD(lr=0.1))
return model
# TODO(psv): Make sure the callback works correctly when min_delta is
# set as 0. Test fails when the order of this callback and assertion is
# interchanged.
model = make_model()
cbks = [
keras.callbacks.ReduceLROnPlateau(monitor='val_loss',
factor=0.1,
min_delta=0,
patience=1,
cooldown=5)
]
model.fit(x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
self.assertAllClose(float(keras.backend.get_value(model.optimizer.lr)),
0.1,
atol=1e-4)
model = make_model()
# This should reduce the LR after the first epoch (due to high epsilon).
cbks = [
keras.callbacks.ReduceLROnPlateau(monitor='val_loss',
factor=0.1,
min_delta=10,
patience=1,
cooldown=5)
]
model.fit(x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=2)
self.assertAllClose(float(keras.backend.get_value(model.optimizer.lr)),
0.01,
atol=1e-4)
def test_ReduceLROnPlateau_backwards_compatibility(self):
with test.mock.patch.object(logging, 'warning') as mock_log:
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(epsilon=1e-13)
self.assertRegex(str(mock_log.call_args),
'`epsilon` argument is deprecated')
self.assertFalse(hasattr(reduce_on_plateau, 'epsilon'))
self.assertTrue(hasattr(reduce_on_plateau, 'min_delta'))
self.assertEqual(reduce_on_plateau.min_delta, 1e-13)
def test_CSVLogger(self):
with self.cached_session():
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'log.tsv')
sep = '\t'
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(num_hidden=NUM_HIDDEN,
num_classes=NUM_CLASSES,
input_dim=INPUT_DIM)
model.compile(loss='categorical_crossentropy',
optimizer=gradient_descent.SGD(lr=0.1),
metrics=['accuracy'])
return model
# case 1, create new file with defined separator
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep)]
model.fit(x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
with open(filepath) as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read())
assert dialect.delimiter == sep
del model
del cbks
# case 2, append data to existing file, skip header
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep, append=True)]
model.fit(x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# case 3, reuse of CSVLogger object
model.fit(x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
with open(filepath) as csvfile:
list_lines = csvfile.readlines()
for line in list_lines:
assert line.count(sep) == 4
assert len(list_lines) == 5
output = ' '.join(list_lines)
assert len(re.findall('epoch', output)) == 1
os.remove(filepath)
@keras_parameterized.run_all_keras_modes(always_skip_eager=True,
always_skip_v1=True)
def test_TerminateOnNaN(self):
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN()]
model = keras.models.Sequential()
initializer = keras.initializers.Constant(value=1e5)
for _ in range(5):
model.add(
keras.layers.Dense(2,
input_dim=INPUT_DIM,
activation='relu',
kernel_initializer=initializer))
model.add(keras.layers.Dense(NUM_CLASSES))
model.compile(loss='mean_squared_error', optimizer='rmsprop')
history = model.fit(x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
self.assertEqual(len(loss), 1)
self.assertTrue(np.isnan(loss[0]) or np.isinf(loss[0]))
@unittest.skipIf(
os.name == 'nt',
'use_multiprocessing=True does not work on windows properly.')
def test_LambdaCallback(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(NUM_HIDDEN,
input_dim=INPUT_DIM,
activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Start an arbitrary process that should run during model
# training and be terminated after training has completed.
e = threading.Event()
def target():
e.wait()
t = threading.Thread(target=target)
t.start()
cleanup_callback = keras.callbacks.LambdaCallback(
on_train_end=lambda logs: e.set())
cbks = [cleanup_callback]
model.fit(x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
t.join()
assert not t.is_alive()
@keras_parameterized.run_all_keras_modes(always_skip_eager=True,
always_skip_v1=True)
def test_callback_passed_floats(self):
class MyCallback(keras.callbacks.Callback):
def on_batch_end(self, batch, logs=None):
assert isinstance(batch, int)
assert isinstance(logs['loss'], float)
self.on_batch_end_called = True
def on_epoch_end(self, batch, logs=None): # pylint: disable=arguments-differ
assert isinstance(batch, int)
assert isinstance(logs['loss'], float)
self.on_epoch_end_called = True
x, y = np.ones((32, 1)), np.ones((32, 1))
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
callback = MyCallback()
model.fit(x, y, epochs=2, callbacks=[callback])
self.assertTrue(callback.on_batch_end_called)
@keras_parameterized.run_all_keras_modes(always_skip_eager=True,
always_skip_v1=True)
def test_implements_batch_hooks(self):
class MyCallbackWithBatchHooks(keras.callbacks.Callback):
def __init__(self): # pylint: disable=super-init-not-called
self.train_batches = 0
self.test_batches = 0
self.predict_batches = 0
def on_train_batch_end(self, batch, logs=None):
self.train_batches += 1
def on_test_batch_end(self, batch, logs=None):
self.test_batches += 1
def on_predict_batch_end(self, batch, logs=None):
self.predict_batches += 1
class MyCallbackWithoutBatchHooks(keras.callbacks.Callback):
def __init__(self): # pylint: disable=super-init-not-called
self.epochs = 0
def on_epoch_end(self, batch, logs=None): # pylint: disable=arguments-differ
del batch
self.epochs += 1
x, y = np.ones((10, 1)), np.ones((10, 1))
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse')
my_cb = MyCallbackWithBatchHooks()
cb_list = keras.callbacks.CallbackList([my_cb], verbose=0)
self.assertTrue(cb_list._should_call_train_batch_hooks) # pylint: disable=protected-access
self.assertTrue(cb_list._should_call_test_batch_hooks) # pylint: disable=protected-access
self.assertTrue(cb_list._should_call_predict_batch_hooks) # pylint: disable=protected-access
model.fit(x, y, epochs=2, batch_size=10, callbacks=[my_cb], verbose=0)
model.evaluate(x, y, batch_size=10, callbacks=[my_cb], verbose=0)
model.predict(x, batch_size=10, callbacks=[my_cb], verbose=0)
self.assertEqual(my_cb.train_batches, 2)
self.assertEqual(my_cb.test_batches, 1)
self.assertEqual(my_cb.predict_batches, 1)
my_cb = MyCallbackWithoutBatchHooks()
cb_list = keras.callbacks.CallbackList([my_cb], verbose=0)
self.assertLen(cb_list.callbacks, 1)
self.assertFalse(cb_list._should_call_train_batch_hooks) # pylint: disable=protected-access
self.assertFalse(cb_list._should_call_test_batch_hooks) # pylint: disable=protected-access
self.assertFalse(cb_list._should_call_predict_batch_hooks) # pylint: disable=protected-access
model.fit(x, y, epochs=2, batch_size=10, callbacks=[my_cb], verbose=0)
model.evaluate(x, y, batch_size=10, callbacks=[my_cb], verbose=0)
model.predict(x, batch_size=10, callbacks=[my_cb], verbose=0)
@keras_parameterized.run_all_keras_modes(always_skip_eager=True,
always_skip_v1=True)
def test_implements_batch_hooks_override(self):
class MyCallback(keras.callbacks.Callback):
def __init__(self, should_run=True): # pylint: disable=super-init-not-called
self.should_run = should_run
self.train_batches = 0
self.test_batches = 0
self.predict_batches = 0
def on_train_batch_end(self, batch, logs=None):
self.train_batches += 1
def on_test_batch_end(self, batch, logs=None):
self.test_batches += 1
def on_predict_batch_end(self, batch, logs=None):
self.predict_batches += 1
def _implements_train_batch_hooks(self):
return self.should_run
def _implements_test_batch_hooks(self):
return self.should_run
def _implements_predict_batch_hooks(self):
return self.should_run
x, y = np.ones((10, 1)), np.ones((10, 1))
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse')
my_cb = MyCallback(should_run=True)
cb_list = keras.callbacks.CallbackList([my_cb], verbose=0)
self.assertTrue(cb_list._should_call_train_batch_hooks) # pylint: disable=protected-access
self.assertTrue(cb_list._should_call_test_batch_hooks) # pylint: disable=protected-access
self.assertTrue(cb_list._should_call_predict_batch_hooks) # pylint: disable=protected-access
model.fit(x, y, epochs=2, batch_size=10, callbacks=[my_cb], verbose=0)
model.evaluate(x, y, batch_size=10, callbacks=[my_cb], verbose=0)
model.predict(x, batch_size=10, callbacks=[my_cb], verbose=0)
self.assertEqual(my_cb.train_batches, 2)
self.assertEqual(my_cb.test_batches, 1)
self.assertEqual(my_cb.predict_batches, 1)
my_cb = MyCallback(should_run=False)
cb_list = keras.callbacks.CallbackList([my_cb], verbose=0)
self.assertFalse(cb_list._should_call_train_batch_hooks) # pylint: disable=protected-access
self.assertFalse(cb_list._should_call_test_batch_hooks) # pylint: disable=protected-access
self.assertFalse(cb_list._should_call_predict_batch_hooks) # pylint: disable=protected-access
model.fit(x, y, epochs=2, batch_size=10, callbacks=[my_cb], verbose=0)
model.evaluate(x, y, batch_size=10, callbacks=[my_cb], verbose=0)
model.predict(x, batch_size=10, callbacks=[my_cb], verbose=0)
self.assertEqual(my_cb.train_batches, 0)
self.assertEqual(my_cb.test_batches, 0)
self.assertEqual(my_cb.predict_batches, 0)
@keras_parameterized.run_all_keras_modes(always_skip_eager=True,
always_skip_v1=True)
def test_default_callbacks_do_not_call_batch_hooks(self):
model = keras.Sequential([keras.layers.Dense(1)])
log_dir = self.get_temp_dir()
cb_list = keras.callbacks.CallbackList([
keras.callbacks.TensorBoard(log_dir, profile_batch=0),
keras.callbacks.ModelCheckpoint(log_dir),
],
add_progbar=True,
model=model,
verbose=2,
epochs=3)
self.assertLen(cb_list.callbacks, 3)
self.assertFalse(cb_list._should_call_train_batch_hooks) # pylint: disable=protected-access
self.assertFalse(cb_list._should_call_test_batch_hooks) # pylint: disable=protected-access
self.assertFalse(cb_list._should_call_predict_batch_hooks) # pylint: disable=protected-access
@keras_parameterized.run_all_keras_modes(always_skip_eager=True,
always_skip_v1=True)
def test_stop_training_batch_level(self):
class MyCallback(keras.callbacks.Callback):
def __init__(self):
super(MyCallback, self).__init__()
self.batch_counter = 0
def on_train_batch_end(self, batch, logs=None):
self.batch_counter += 1
if batch == 2:
self.model.stop_training = True
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse')
x, y = np.ones((10, 10)), np.ones((10, 1))
my_cb = MyCallback()
# Will run 5 batches if `stop_training` doesn't work.
model.fit(x, y, batch_size=2, callbacks=[my_cb])
self.assertEqual(my_cb.batch_counter, 3)
if __name__ == '__main__':
test.main()
|
experiment.py
|
import os
import threading
import traceback
import yaml
import json
import helm
import benchmark
import logging
from common import load_profiles, Context
INVOCATIONS_DIR = "invocations"
METRICS_DIR = "metrics"
log = logging.getLogger('abm')
def run(context: Context, args: list):
"""
Runs a single benchmark defined by *args[0]*
:param args: a list that contains a single element, the path to a benchmark
configuration file.
:return: True if the benchmarks completed sucessfully. False otherwise.
"""
if len(args) == 0:
print("ERROR: No benchmarking configuration provided.")
return False
benchmark_path = args[0]
if not os.path.exists(benchmark_path):
print(f"ERROR: Benchmarking configuration not found {benchmark_path}")
return False
with open(benchmark_path, 'r') as f:
config = yaml.safe_load(f)
profiles = load_profiles()
# latch = CountdownLatch(len(config['cloud']))
threads = []
for cloud in config['cloud']:
if cloud not in profiles:
print(f"WARNING: No profile found for {cloud}")
continue
t = threading.Thread(target=run_on_cloud, args=(cloud, config))
threads.append(t)
print(f"Starting thread for {cloud}")
t.start()
print('Waiting for threads')
for t in threads:
t.join()
print('All threads have terminated.')
# if not set_active_profile(cloud):
# print(f"ERROR: Unable to set the profile for {cloud}")
# continue
# if lib.KUBECONFIG is None:
# print(f"ERROR: No kubeconfig set for {cloud}")
# continue
# print("------------------------")
# print(f"Benchmarking: {cloud}")
# for conf in config['job_configs']:
# job_conf_path = f"rules/{conf}.yml"
# if not helm.update([job_conf_path]):
# print(f"WARNING: job conf not found {conf}")
# continue
# for n in range(num_runs):
# history_name_prefix = f"{n} {cloud} {conf}"
# for workflow_conf in config['benchmark_confs']:
# benchmark.run([workflow_conf, history_name_prefix])
# for n in range(num_runs):
# print("------------------------")
# print(f"Benchmarking run #{n+1}")
# for cloud in config['cloud']:
# if cloud not in profiles:
# print(f"WARNING: no profile for instance {cloud}")
# continue
# if not set_active_profile(cloud):
# print(f"WARNING: unable to set {cloud} as the active profile")
# if lib.KUBECONFIG is None:
# print(f"WARNGING: no kubeconfig for instance {cloud}")
# continue
# for job_conf in config['job_configs']:
# job_conf_path = f"rules/{job_conf}.yml"
# if not helm.update([job_conf_path]):
# print(f"WARNING: job conf not found {job_conf}")
# continue
# history_name_prefix = f"Run {n} {job_conf}"
# for workflow_conf in config['workflow_conf']:
# workflow.run([workflow_conf, history_name_prefix])
def run_on_cloud(cloud: str, config: dict):
print("------------------------")
print(f"Benchmarking: {cloud}")
context = Context(cloud)
namespace = 'galaxy'
chart = 'anvil/galaxykubeman'
if 'galaxy' in config:
namespace = config['galaxy']['namespace']
chart = config['galaxy']['chart']
for conf in config['job_configs']:
if not helm.update(context, [f"rules/{conf}.yml", namespace, chart]):
log.warning(f"job configuration not found: rules/{conf}.yml")
continue
for n in range(config['runs']):
history_name_prefix = f"{n} {cloud} {conf}"
for workflow_conf in config['benchmark_confs']:
benchmark.run(context, workflow_conf, history_name_prefix, config['name'])
def test(context: Context, args: list):
print(context.GALAXY_SERVER)
if os.path.exists(args[0]):
with open(args[0]) as f:
data = yaml.safe_load(f)
print(data)
def parse_toolid(id:str) -> str:
parts = id.split('/')
return f"{parts[-2]},{parts[-1]}"
def summarize(context: Context, args: list):
"""
Parses all the files in the specified directory and prints metrics
as CSV to stdout
:param args[0]: The path to the directory containing metrics filees
:return: None
"""
separator = None
input_dir = None
for arg in args:
if arg in ['-t', '--tsv']:
separator = '\t'
elif arg in ['-c', '--csv']:
separator = ','
else:
input_dir = arg
if input_dir is None:
input_dir = 'metrics'
if separator is None:
separator = ','
row = [''] * 14
#print("Run,Cloud,Job Conf,Workflow,History,Inputs,Server,Tool,Tool Version,State,Slots,Memory,Runtime (Sec),CPU,Memory Limit (Bytes),Memory Max usage (Bytes),Memory Soft Limit")
print("Run,Cloud,Job Conf,Workflow,History,Inputs,Tool,Tool Version,State,Slots,Memory,Runtime (Sec),CPU,Memory Limit (Bytes),Memory Max usage (Bytes)")
for file in os.listdir(input_dir):
input_path = os.path.join(input_dir, file)
if not os.path.isfile(input_path) or not input_path.endswith('.json'):
continue
try:
with open(input_path, 'r') as f:
data = json.load(f)
row[0] = data['run']
row[1] = data['cloud']
row[2] = data['job_conf']
row[3] = data['workflow_id']
row[4] = data['history_id']
row[5] = data['inputs']
#row[6] = data['server'] if data['server'] is not None else 'https://iu1.usegvl.org/galaxy'
row[6] = parse_toolid(data['metrics']['tool_id'])
row[7] = data['metrics']['state']
add_metrics_to_row(data['metrics']['job_metrics'], row)
print(separator.join(row))
except Exception as e:
# Silently fail to allow the remainder of the table to be generated.
pass
def add_metrics_to_row(metrics_list: list, row: list):
accept_metrics = ['galaxy_slots', 'galaxy_memory_mb', 'runtime_seconds', 'cpuacct.usage','memory.limit_in_bytes', 'memory.max_usage_in_bytes'] #,'memory.soft_limit_in_bytes']
for job_metrics in metrics_list:
if job_metrics['name'] in accept_metrics:
index = accept_metrics.index(job_metrics['name'])
row[index + 8] = job_metrics['raw_value']
# row.append(job_metrics['raw_value'])
|
process_task.py
|
#!/usr/bin/env python
#coding:utf-8
"""
Author: --<v1ll4n>
Purpose: process_task for a task(ProcessMode)
Created: 2016/12/12
"""
import unittest
import time
import multiprocessing
import threading
import sys
import types
import warnings
from multiprocessing import Pipe
from pprint import pprint
import traceback
if sys.version.startswith('2'):
import exceptions
else:
from . import exceptions
def sleep_(num):
#pprint("~~~")
time.sleep(num)
#----------------------------------------------------------------------
def result_callback(result):
""""""
for i in result:
yield i
#----------------------------------------------------------------------
def testfun(num):
""""""
#print('UserFunc called!')
for i in range(6):
threading.Thread(target=sleep_, args=(num,)).start()
#print('SubProcess Called!')
time.sleep(0.4)
for i in range(5):
yield i
#pprint(threading.enumerate())
########################################################################
class ProcessTask(multiprocessing.Process):
""""""
#----------------------------------------------------------------------
def __init__(self, id, target, args=tuple(), kwargs={},
status_monitor_pipe=None, result_pipe=None,
result_hook_function=None,
threads_update_interval=0.0):
"""Constructor"""
multiprocessing.Process.__init__(self, name=id)
self._target = target
self.args = args
self.kwargs = kwargs
self._id = id
self._sub_threads_list = []
self._threads_update_interval = threads_update_interval
#
# Bulid result
#
self._status_monitor_pipe = status_monitor_pipe
self._result_send_pipe = result_pipe
self._result_hook = result_hook_function
#self._init_timer()
#----------------------------------------------------------------------
def _init_timer(self):
""""""
self._threads_monitor = threading.Thread(name='update_subthreads_list',
target=self._deamon_check_threads)
self._threads_monitor.daemon = True
self._threads_monitor.start()
#----------------------------------------------------------------------
@property
def task_id(self):
""""""
return self._id
#----------------------------------------------------------------------
def run(self):
""""""
self._init_timer()
resultdict = {}
resultdict['state'] = False
resultdict['exception'] = ''
resultdict['result'] = ''
try:
#
# getting result and process result
#
result = self._target(*self.args, **self.kwargs)
if self._result_hook:
result = self._result_hook(result)
resultdict['state'] = True
#
# send back the result element
#
if isinstance(result, types.GeneratorType):
for i in result:
try:
resultdict['result'] = i
self._result_send_pipe.send(resultdict)
except Exception as e:
warnings.warn('[?] the result cannot be send back!' + \
'\n Because : \n' + \
traceback.format_exc())
else:
try:
resultdict['result'] = result
self._result_send_pipe.send(resultdict)
except Exception as e:
warnings.warn('[?] the result cannot be send back!' + \
'\n Because: \n' + \
traceback.format_exc())
except Exception as e:
resultdict['exception'] = traceback.format_exc()
self._result_send_pipe.send(resultdict)
#----------------------------------------------------------------------
def _enum_threads(self):
""""""
threads_list = threading.enumerate()
return threads_list
#----------------------------------------------------------------------
def _deamon_check_threads(self):
""""""
assert isinstance(self._threads_update_interval, (int, float))
while True:
#pprint('test')
self._sub_threads_list = None
self._sub_threads_list = self._enum_threads()
#print(len(self._sub_threads_list))
#print len(self._sub_threads_list)
threads_check_result = {}
threads_check_result['timestamp'] = time.time()
threads_check_result['from'] = self._id
for i in self._sub_threads_list:
threads_check_result[i.name] = i.is_alive()
#pprint(threads_check_result)
self._status_monitor_pipe.send(threads_check_result)
time.sleep(self._threads_update_interval)
##----------------------------------------------------------------------
#@property
#def subthreads_count(self):
#return len(self._sub_threads_list)
########################################################################
class ProcessTaskTest(unittest.case.TestCase):
""""""
#----------------------------------------------------------------------
def print_bar(self):
""""""
print(('-'*64))
#----------------------------------------------------------------------
def print_end_bar(self):
""""""
print(('-'*30 + 'END' + '-'*31))
#----------------------------------------------------------------------
def test_basic_usage(self):
""""""
pipp, pipc = Pipe()
pips, pipr = Pipe()
self.print_bar()
print('Test Task Interface')
ret_process = ProcessTask(id='test-1', target=testfun, args=(5,),
status_monitor_pipe=pipc,
result_pipe=pips,
result_hook_function=result_callback)
ret_process.start()
print('Test get threads status')
time.sleep(1)
#print(ret_process.subthreads_count)
threads_status = pipp.recv()
self.assertIsInstance(threads_status, dict)
#print pipr.recv()
#print pipr.recv()
#print pipr.recv()
#print pipr.recv()
self.print_end_bar()
if __name__ == '__main__':
unittest.main()
|
estim.py
|
# Modified by Daniel Gomez-Sanchez: adding portableQueue dependency for MacOS compatibility, and opening bgzipped pileups
from portableQueue import Queue
import numpy as np
from prob_cond_true_freq import prob_cond_true_freq
from multiprocessing import Process, Lock
import parse_pileup as pp
class A:
"class A"
def __init__(self):
self.list_chro = []
self.list_pos = []
self.list_anc = []
self.list_der = []
self.list_u = []
#...
#...
#function that sorts a list of A object by the list_pos[0]
def quickSort(L):
def trirap(L, g, d):
pivot = L[(g+d)//2].list_pos[0]
i = g
j = d
while True:
while L[i].list_pos[0]<pivot:
i+=1
while L[j].list_pos[0]>pivot:
j-=1
if i>j:
break
if i<j:
L[i], L[j] = L[j], L[i]
i+=1
j-=1
if g<j:
trirap(L,g,j)
if i<d:
trirap(L,i,d)
g=0
d=len(L)-1
trirap(L,g,d)
#...
#function that is parallelized
def process_estim(qinput,qoutput,lock,parser_parameters,pileup_prefix,n,p_neutral,ancestral):
print 'process starts'
pileup = pp.openPileup(pileup_prefix, 'r')
qualityEncoding = parser_parameters[0]
minQual = parser_parameters[1]
minCount = parser_parameters[2]
minCoverage = parser_parameters[3]
maxCoverage = parser_parameters[4]
#creation of the parser object
if ancestral == "provided":
parser = pp.Pileup_parser_provided(qualityEncoding,minQual,minCount,minCoverage,maxCoverage)
elif ancestral == "unknown":
parser = pp.Pileup_parser_folded(qualityEncoding,minQual,minCount,minCoverage,maxCoverage)
else:
parser = pp.Pileup_parser_ref(qualityEncoding,minQual,minCount,minCoverage,maxCoverage)
f = pp.Format()
for item in iter(qinput.get,'STOP'):
l = []
lock.acquire()
pileup.seek(item[0])
for i in range(item[1]):
l.append(pileup.readline())
#...
lock.release()
estim_tab = A()
for l_item in l:
parsed =parser.get_pileup_parser(l_item)
if parsed['valid'] == 1:
info = f.format('info',parsed)
unfolded = int(info.split()[7])
SE = np.fromstring(f.format('qual',parsed) , dtype=float, sep=' ')
votemp = np.fromstring(f.format('freq',parsed), dtype=int, sep=' ')
SEtemp = 10**(-SE/10)
estim_tab.list_chro.append(info.split()[0])
estim_tab.list_pos.append(int(info.split()[1]))
estim_tab.list_anc.append(info.split()[4])
estim_tab.list_der.append(info.split()[5])
if unfolded == 1:
estim_tab.list_u.append(np.argmax(p_neutral * prob_cond_true_freq(n,votemp,SEtemp,1)))
else:
estim_tab.list_u.append(np.argmax((p_neutral + p_neutral[::-1])* prob_cond_true_freq(n,votemp,SEtemp,1)))
Ltemp=len(estim_tab.list_u)-1
if estim_tab.list_u[Ltemp]>(n/2):
estim_tab.list_u[Ltemp]=n-estim_tab.list_u[Ltemp]
estim_tab.list_anc[Ltemp]=info.split()[5]
estim_tab.list_der[Ltemp]=info.split()[4]
#...
#...
#print '+1'
if len(estim_tab.list_u) != 0: #in case that all the lines parsed are not valid
qoutput.put(estim_tab)
#...
#...
print 'process stops'
pileup.close()
#...
def estimation(parser_parameters, region, nProcess, n, prefix,p_neutral,pileup_prefix,ancestral):
lock = Lock()
task_queue = Queue()
done_queue = Queue()
block = 10000
pileup = pp.openPileup(pileup_prefix, 'r')
if region:
chro = region[0]
start = region[1]
end = region[2]
offset_default = pileup.tell()
pileup_line = pileup.readline()
a = pileup_line.split()[0]
while(a != chro):
offset_default = pileup.tell()
pileup_line = pileup.readline()
try:
a = pileup_line.split()[0]
except IndexError:
#if the pileup_line can't be splited, that's the end of the file
print ('ERROR : chro %s not found' % (chro))
sys.exit()
#...
#...
if start:
a = int(pileup_line.split()[1])
b = pileup_line.split()[0]
if a > end:
print ('ERROR : interval\'s positions not found.')
sys.exit()
#...
while a < start and b == chro:
offset_default = pileup.tell()
pileup_line = pileup.readline()
try:
a = int(pileup_line.split()[1])
b = pileup_line.split()[0]
except IndexError:
#if the pileup_line can't be splited, that's the end of the file
print ('ERROR : interval\'s positions not found.')
#...
#...
if b != chro:
print ('ERROR : interval\'s positions not found.')
sys.exit()
#...
offset_table = [offset_default]
nbLine = 0
split_pileup = pileup_line.split()
while split_pileup[0] == chro:
if start:
if int(split_pileup[1]) > end:
break
#...
#...
nbLine += 1
if nbLine % block == 0:
offset_table.append(pileup.tell())
#...
pileup_line = pileup.readline()
split_pileup = pileup_line.split()
if len(split_pileup) == 0:
break
#...
#...
#...
else:
offset_table = [0]
nbLine = 0
pileup_line = pileup.readline()
while(pileup_line != ''): #if pileup_line == '', that's the end of the file
nbLine += 1
if nbLine % block == 0:
offset_table.append(pileup.tell())
#...
pileup_line = pileup.readline()
#...
#...
#for each offset expept the last one
for offset in offset_table[:-1]:
task_queue.put([offset,block])
#...
#management of the last line_block
if nbLine % block != 0:
task_queue.put([offset_table[-1],nbLine % block])
#...
for i in range(nProcess):
task_queue.put('STOP')
#...
for i in range(nProcess):
p = Process(target=process_estim,args=(task_queue, done_queue,lock,parser_parameters,pileup_prefix,n,p_neutral,ancestral)).start()
#...
while task_queue.qsize() != 0:
pass
#...
estim = []
for i in range(done_queue.qsize()):
estim.append(done_queue.get())
#...
if len(estim) > 1:
quickSort(estim)
#...
fic_estim = open(prefix + '.estim', 'w')
for item in estim:
for i in range(len(item.list_pos)):
fic_estim.write( item.list_chro[i] + ' ' + str(item.list_pos[i]) + ' ' + item.list_anc[i] + ' ' + item.list_der[i] + ' ' + str(item.list_u[i]) + '\n')
#...
#...
fic_estim.close()
#...
|
parallelize.py
|
"""
Author: Anastassios Dardas, PhD - Higher Education Specialist at Education & Research at Esri Canada
Date: Q4 - 2021
About: Perform parallel processing.
"""
from multiprocessing import Process, Pool, set_start_method, cpu_count
from tqdm import tqdm
class ParallelProcess:
def __init__(self, start_method, targeter, parameters, split_val):
"""
Use the Process function in multiprocessing to parallel process.
:params start_method: The method to initiate - typically in Linux -> "fork"; Windows -> "spawn".
:params targeter: The custom function that is to be parallel processed.
:params parameters: The parameters required in the custom function.
:params split_val: The list that is to be split into chunks.
"""
self._process(start_method=start_method,
targeter=targeter,
parameters=parameters,
split_val=split_val)
def _process(self, start_method, targeter, parameters, split_val):
"""
Initiate parallel processing.
:params start_method: The method to initiate - typically in Linux -> "fork"; Windows -> "spawn".
:params targeter: The custom function that is to be parallel processed.
:params parameters: The parameters required in the custom function.
:params split_val: The list that is to be split into chunks.
"""
set_start_method(method=start_method, force=True)
processes = []
for i in range(len(split_val)):
new_param = (split_val[i], ) + parameters[1:]
p = Process(target = targeter, args = new_param)
processes.append(p)
p.start()
for process in processes:
process.join()
class ParallelPool:
def __init__(self, start_method, partial_func, main_list):
"""
Use the Pool function in multiprocessing to parallel process.
:params start_method: The method to initiate - typically in Linux -> "fork"; Windows -> "spawn".
:params partial_func: A custom partial function that takes most of the parameters of a custom function to be parallel processed.
:params main_list: A numpy array list that has been chunked into n number of cores.
"""
self._pool(start_method=start_method, partial_func=partial_func, main_list=main_list)
def _pool(self, start_method, partial_func, main_list):
"""
Initiate parallel processing.
:params start_method: The method to initiate - typically in Linux -> "fork"; Windows -> "spawn".
:params partial_func: A custom partial function that takes most of the parameters of a custom function to be parallel processed.
:params main_list: A numpy array list that has been chunked into n number of cores.
"""
set_start_method(method=start_method, force=True)
with Pool(processes=cpu_count()) as p:
max_ = len(main_list)
with tqdm(total=max_) as pbar:
for i, _ in enumerate(p.imap_unordered(partial_func, main_list)):
pbar.update()
p.close()
p.join()
|
test_external_step.py
|
import os
import tempfile
import time
import uuid
from threading import Thread
import pytest
from dagster import (
DynamicOut,
DynamicOutput,
Failure,
Field,
MetadataEntry,
ModeDefinition,
ResourceDefinition,
RetryPolicy,
RetryRequested,
String,
execute_pipeline,
execute_pipeline_iterator,
fs_io_manager,
job,
op,
pipeline,
reconstructable,
reexecute_pipeline,
resource,
solid,
)
from dagster.core.definitions.no_step_launcher import no_step_launcher
from dagster.core.events import DagsterEventType
from dagster.core.execution.api import create_execution_plan
from dagster.core.execution.context_creation_pipeline import PlanExecutionContextManager
from dagster.core.execution.plan.external_step import (
LocalExternalStepLauncher,
local_external_step_launcher,
step_context_to_step_run_ref,
step_run_ref_to_step_context,
)
from dagster.core.execution.plan.state import KnownExecutionState
from dagster.core.execution.retries import RetryMode
from dagster.core.instance import DagsterInstance
from dagster.core.storage.pipeline_run import PipelineRun
from dagster.core.test_utils import instance_for_test
from dagster.utils import safe_tempfile_path, send_interrupt
from dagster.utils.merger import deep_merge_dicts, merge_dicts
RUN_CONFIG_BASE = {"solids": {"return_two": {"config": {"a": "b"}}}}
def make_run_config(scratch_dir, mode):
if mode in ["external", "request_retry"]:
step_launcher_resource_keys = ["first_step_launcher", "second_step_launcher"]
else:
step_launcher_resource_keys = ["second_step_launcher"]
return deep_merge_dicts(
RUN_CONFIG_BASE,
{
"resources": merge_dicts(
{"io_manager": {"config": {"base_dir": scratch_dir}}},
{
step_launcher_resource_key: {"config": {"scratch_dir": scratch_dir}}
for step_launcher_resource_key in step_launcher_resource_keys
},
),
},
)
class RequestRetryLocalExternalStepLauncher(LocalExternalStepLauncher):
def launch_step(self, step_context):
if step_context.previous_attempt_count == 0:
raise RetryRequested()
else:
return super(RequestRetryLocalExternalStepLauncher, self).launch_step(step_context)
@resource(config_schema=local_external_step_launcher.config_schema)
def request_retry_local_external_step_launcher(context):
return RequestRetryLocalExternalStepLauncher(**context.resource_config)
def _define_failing_job(has_policy: bool, is_explicit: bool = True):
@op(
required_resource_keys={"step_launcher"},
retry_policy=RetryPolicy(max_retries=3) if has_policy else None,
)
def retry_op(context):
if context.retry_number < 3:
if is_explicit:
raise Failure(description="some failure description", metadata={"foo": 1.23})
else:
_ = "x" + 1
return context.retry_number
@job(
resource_defs={
"step_launcher": local_external_step_launcher,
"io_manager": fs_io_manager,
}
)
def retry_job():
retry_op()
return retry_job
def _define_retry_job():
return _define_failing_job(has_policy=True)
def _define_error_job():
return _define_failing_job(has_policy=False, is_explicit=False)
def _define_failure_job():
return _define_failing_job(has_policy=False)
def _define_dynamic_job(launch_initial, launch_final):
from typing import List
initial_launcher = (
local_external_step_launcher if launch_initial else ResourceDefinition.mock_resource()
)
final_launcher = (
local_external_step_launcher if launch_final else ResourceDefinition.mock_resource()
)
@op(required_resource_keys={"initial_launcher"}, out=DynamicOut(int))
def dynamic_outs():
for i in range(0, 3):
yield DynamicOutput(value=i, mapping_key=f"num_{i}")
@op
def increment(i):
return i + 1
@op(required_resource_keys={"final_launcher"})
def total(ins: List[int]):
return sum(ins)
@job(
resource_defs={
"initial_launcher": initial_launcher,
"final_launcher": final_launcher,
"io_manager": fs_io_manager,
}
)
def my_job():
all_incs = dynamic_outs().map(increment)
total(all_incs.collect())
return my_job
def _define_basic_job(launch_initial, launch_final):
initial_launcher = (
local_external_step_launcher if launch_initial else ResourceDefinition.mock_resource()
)
final_launcher = (
local_external_step_launcher if launch_final else ResourceDefinition.mock_resource()
)
@op(required_resource_keys={"initial_launcher"})
def op1():
return 1
@op(required_resource_keys={"initial_launcher"})
def op2():
return 2
@op(required_resource_keys={"final_launcher"})
def combine(a, b):
return a + b
@job(
resource_defs={
"initial_launcher": initial_launcher,
"final_launcher": final_launcher,
"io_manager": fs_io_manager,
}
)
def my_job():
combine(op1(), op2())
return my_job
def define_dynamic_job_all_launched():
return _define_dynamic_job(True, True)
def define_dynamic_job_first_launched():
return _define_dynamic_job(True, False)
def define_dynamic_job_last_launched():
return _define_dynamic_job(False, True)
def define_basic_job_all_launched():
return _define_basic_job(True, True)
def define_basic_job_first_launched():
return _define_basic_job(True, False)
def define_basic_job_last_launched():
return _define_basic_job(False, True)
def define_basic_pipeline():
@solid(required_resource_keys=set(["first_step_launcher"]), config_schema={"a": Field(str)})
def return_two(_):
return 2
@solid(required_resource_keys=set(["second_step_launcher"]))
def add_one(_, num):
return num + 1
@pipeline(
mode_defs=[
ModeDefinition(
"external",
resource_defs={
"first_step_launcher": local_external_step_launcher,
"second_step_launcher": local_external_step_launcher,
"io_manager": fs_io_manager,
},
),
ModeDefinition(
"internal_and_external",
resource_defs={
"first_step_launcher": no_step_launcher,
"second_step_launcher": local_external_step_launcher,
"io_manager": fs_io_manager,
},
),
ModeDefinition(
"request_retry",
resource_defs={
"first_step_launcher": request_retry_local_external_step_launcher,
"second_step_launcher": request_retry_local_external_step_launcher,
"io_manager": fs_io_manager,
},
),
]
)
def basic_pipeline():
add_one(return_two())
return basic_pipeline
def define_sleepy_pipeline():
@solid(
config_schema={"tempfile": Field(String)},
required_resource_keys=set(["first_step_launcher"]),
)
def sleepy_solid(context):
with open(context.solid_config["tempfile"], "w", encoding="utf8") as ff:
ff.write("yup")
start_time = time.time()
while True:
time.sleep(0.1)
if time.time() - start_time > 120:
raise Exception("Timed out")
@pipeline(
mode_defs=[
ModeDefinition(
"external",
resource_defs={
"first_step_launcher": local_external_step_launcher,
"io_manager": fs_io_manager,
},
),
]
)
def sleepy_pipeline():
sleepy_solid()
return sleepy_pipeline
def initialize_step_context(scratch_dir, instance):
pipeline_run = PipelineRun(
pipeline_name="foo_pipeline",
run_id=str(uuid.uuid4()),
run_config=make_run_config(scratch_dir, "external"),
mode="external",
)
recon_pipeline = reconstructable(define_basic_pipeline)
plan = create_execution_plan(recon_pipeline, pipeline_run.run_config, mode="external")
initialization_manager = PlanExecutionContextManager(
pipeline=recon_pipeline,
execution_plan=plan,
run_config=pipeline_run.run_config,
pipeline_run=pipeline_run,
instance=instance,
retry_mode=RetryMode.DISABLED,
)
for _ in initialization_manager.prepare_context():
pass
pipeline_context = initialization_manager.get_context()
step_context = pipeline_context.for_step(
plan.get_step_by_key("return_two"),
KnownExecutionState(),
)
return step_context
def test_step_context_to_step_run_ref():
with DagsterInstance.ephemeral() as instance:
step_context = initialize_step_context("", instance)
step = step_context.step
step_run_ref = step_context_to_step_run_ref(step_context)
assert step_run_ref.run_config == step_context.pipeline_run.run_config
assert step_run_ref.run_id == step_context.pipeline_run.run_id
rehydrated_step_context = step_run_ref_to_step_context(step_run_ref, instance)
rehydrated_step = rehydrated_step_context.step
assert rehydrated_step.pipeline_name == step.pipeline_name
assert rehydrated_step.step_inputs == step.step_inputs
assert rehydrated_step.step_outputs == step.step_outputs
assert rehydrated_step.kind == step.kind
assert rehydrated_step.solid_handle.name == step.solid_handle.name
assert rehydrated_step.logging_tags == step.logging_tags
assert rehydrated_step.tags == step.tags
def test_local_external_step_launcher():
with tempfile.TemporaryDirectory() as tmpdir:
with DagsterInstance.ephemeral() as instance:
step_context = initialize_step_context(tmpdir, instance)
step_launcher = LocalExternalStepLauncher(tmpdir)
events = list(step_launcher.launch_step(step_context))
event_types = [event.event_type for event in events]
assert DagsterEventType.STEP_START in event_types
assert DagsterEventType.STEP_SUCCESS in event_types
assert DagsterEventType.STEP_FAILURE not in event_types
@pytest.mark.parametrize("mode", ["external", "internal_and_external"])
def test_pipeline(mode):
with tempfile.TemporaryDirectory() as tmpdir:
result = execute_pipeline(
pipeline=reconstructable(define_basic_pipeline),
mode=mode,
run_config=make_run_config(tmpdir, mode),
)
assert result.result_for_solid("return_two").output_value() == 2
assert result.result_for_solid("add_one").output_value() == 3
@pytest.mark.parametrize(
"job_fn",
[
define_dynamic_job_all_launched,
define_dynamic_job_first_launched,
define_dynamic_job_last_launched,
],
)
def test_dynamic_job(job_fn):
with tempfile.TemporaryDirectory() as tmpdir:
with instance_for_test() as instance:
result = execute_pipeline(
pipeline=reconstructable(job_fn),
run_config={
"resources": {
"initial_launcher": {
"config": {"scratch_dir": tmpdir},
},
"final_launcher": {
"config": {"scratch_dir": tmpdir},
},
"io_manager": {"config": {"base_dir": tmpdir}},
}
},
instance=instance,
)
assert result.output_for_solid("total") == 6
@pytest.mark.parametrize(
"job_fn",
[
define_basic_job_all_launched,
define_basic_job_first_launched,
define_basic_job_last_launched,
],
)
def test_reexecution(job_fn):
with tempfile.TemporaryDirectory() as tmpdir:
run_config = {
"resources": {
"initial_launcher": {
"config": {"scratch_dir": tmpdir},
},
"final_launcher": {
"config": {"scratch_dir": tmpdir},
},
"io_manager": {"config": {"base_dir": tmpdir}},
}
}
with instance_for_test() as instance:
run1 = execute_pipeline(
pipeline=reconstructable(job_fn),
run_config=run_config,
instance=instance,
)
assert run1.success
assert run1.result_for_solid("combine").output_value() == 3
run2 = reexecute_pipeline(
pipeline=reconstructable(job_fn),
parent_run_id=run1.run_id,
run_config=run_config,
instance=instance,
step_selection=["combine"],
)
assert run2.success
assert run2.result_for_solid("combine").output_value() == 3
def test_retry_policy():
with tempfile.TemporaryDirectory() as tmpdir:
run_config = {
"resources": {
"step_launcher": {"config": {"scratch_dir": tmpdir}},
"io_manager": {"config": {"base_dir": tmpdir}},
}
}
with instance_for_test() as instance:
run = execute_pipeline(
pipeline=reconstructable(_define_retry_job),
run_config=run_config,
instance=instance,
)
assert run.success
assert run.result_for_solid("retry_op").output_value() == 3
step_retry_events = [
e for e in run.event_list if e.event_type_value == "STEP_RESTARTED"
]
assert len(step_retry_events) == 3
def test_explicit_failure():
with tempfile.TemporaryDirectory() as tmpdir:
run_config = {
"resources": {
"step_launcher": {"config": {"scratch_dir": tmpdir}},
"io_manager": {"config": {"base_dir": tmpdir}},
}
}
with instance_for_test() as instance:
run = execute_pipeline(
pipeline=reconstructable(_define_failure_job),
run_config=run_config,
instance=instance,
raise_on_error=False,
)
fd = run.result_for_solid("retry_op").failure_data
assert fd.user_failure_data.description == "some failure description"
assert fd.user_failure_data.metadata_entries == [
MetadataEntry.float(label="foo", value=1.23)
]
def test_arbitrary_error():
with tempfile.TemporaryDirectory() as tmpdir:
run_config = {
"resources": {
"step_launcher": {"config": {"scratch_dir": tmpdir}},
"io_manager": {"config": {"base_dir": tmpdir}},
}
}
with instance_for_test() as instance:
run = execute_pipeline(
pipeline=reconstructable(_define_error_job),
run_config=run_config,
instance=instance,
raise_on_error=False,
)
failure_events = [e for e in run.event_list if e.event_type_value == "STEP_FAILURE"]
assert len(failure_events) == 1
fd = run.result_for_solid("retry_op").failure_data
assert fd.error.cause.cls_name == "TypeError"
def test_launcher_requests_retry():
mode = "request_retry"
with tempfile.TemporaryDirectory() as tmpdir:
result = execute_pipeline(
pipeline=reconstructable(define_basic_pipeline),
mode=mode,
run_config=make_run_config(tmpdir, mode),
)
assert result.success
assert result.result_for_solid("return_two").output_value() == 2
assert result.result_for_solid("add_one").output_value() == 3
for step_key, events in result.events_by_step_key.items():
if step_key:
event_types = [event.event_type for event in events]
assert DagsterEventType.STEP_UP_FOR_RETRY in event_types
assert DagsterEventType.STEP_RESTARTED in event_types
def _send_interrupt_thread(temp_file):
while not os.path.exists(temp_file):
time.sleep(0.1)
send_interrupt()
@pytest.mark.parametrize("mode", ["external"])
def test_interrupt_step_launcher(mode):
with tempfile.TemporaryDirectory() as tmpdir:
with safe_tempfile_path() as success_tempfile:
sleepy_run_config = {
"resources": {
"first_step_launcher": {
"config": {"scratch_dir": tmpdir},
},
"io_manager": {"config": {"base_dir": tmpdir}},
},
"solids": {"sleepy_solid": {"config": {"tempfile": success_tempfile}}},
}
interrupt_thread = Thread(target=_send_interrupt_thread, args=(success_tempfile,))
interrupt_thread.start()
results = []
for result in execute_pipeline_iterator(
pipeline=reconstructable(define_sleepy_pipeline),
mode=mode,
run_config=sleepy_run_config,
):
results.append(result.event_type)
assert DagsterEventType.STEP_FAILURE in results
assert DagsterEventType.PIPELINE_FAILURE in results
interrupt_thread.join()
def test_multiproc_launcher_requests_retry():
mode = "request_retry"
with tempfile.TemporaryDirectory() as tmpdir:
run_config = make_run_config(tmpdir, mode)
run_config["execution"] = {"multiprocess": {}}
result = execute_pipeline(
instance=DagsterInstance.local_temp(tmpdir),
pipeline=reconstructable(define_basic_pipeline),
mode=mode,
run_config=run_config,
)
assert result.success
assert result.result_for_solid("return_two").output_value() == 2
assert result.result_for_solid("add_one").output_value() == 3
for step_key, events in result.events_by_step_key.items():
if step_key:
event_types = [event.event_type for event in events]
assert DagsterEventType.STEP_UP_FOR_RETRY in event_types
assert DagsterEventType.STEP_RESTARTED in event_types
|
pygments_style.py
|
"""Display a "Color Styles" menu."""
from __future__ import annotations
import threading
import tkinter
from typing import List, Optional, Tuple
from pygments import styles, token # type: ignore[import]
from porcupine import get_main_window, get_tab_manager, menubar, settings, utils
def get_colors(style_name: str) -> Tuple[str, str]:
style = styles.get_style_by_name(style_name)
bg: str = style.background_color
# style_names have a style_for_token() method, but only iterating
# is documented :( http://pygments.org/docs/formatterdevelopment/
# i'm using iter() to make sure that dict() really treats
# the style as an iterable of pairs instead of some other
# metaprogramming fanciness
fg: Optional[str] = None
style_infos = dict(iter(style))
for tokentype in [token.String, token.Text]:
if style_infos[tokentype]["color"] is not None:
fg = "#" + style_infos[tokentype]["color"]
break
if fg is None:
# do like textwidget.use_pygments_theme does
fg = getattr(style, "default_style", "") or utils.invert_color(bg)
return (fg, bg)
# threading this gives a significant speed improvement on startup
# on this system, setup() took 0.287940 seconds before adding threads
# and 0.000371 seconds after adding threads
def load_style_names_to_list(target_list: List[str]) -> None:
target_list.extend(styles.get_all_styles()) # slow
target_list.sort()
def setup() -> None:
style_names: List[str] = []
thread = threading.Thread(target=load_style_names_to_list, args=[style_names])
thread.daemon = True # i don't care wtf happens to this
thread.start()
def check_if_it_finished() -> None:
if thread.is_alive():
get_main_window().after(200, check_if_it_finished)
return
var = tkinter.StringVar(value=settings.get("pygments_style", str))
def settings2var(event: tkinter.Event[tkinter.Misc]) -> None:
var.set(settings.get("pygments_style", str))
def var2settings(*junk: str) -> None:
settings.set_("pygments_style", var.get())
# this doesn't recurse infinitely because <<SettingChanged:bla>>
# gets generated only when the setting actually changes
get_tab_manager().bind("<<SettingChanged:pygments_style>>", settings2var, add=True)
var.trace_add("write", var2settings)
for style_name in style_names:
fg, bg = get_colors(style_name)
menubar.get_menu("Color Styles").add_radiobutton(
label=style_name,
value=style_name,
variable=var,
foreground=fg,
background=bg,
# swapped colors
activeforeground=bg,
activebackground=fg,
)
get_main_window().after(200, check_if_it_finished)
|
test.py
|
#!/usr/bin/env python3
import json
import os
import requests
import tempfile
import time
import threading
import queue
import unittest
from multiprocessing import Process
from pathlib import Path
from unittest import mock
from websocket import ABNF
from websocket._exceptions import WebSocketConnectionClosedException
from selfdrive.athena import athenad
from selfdrive.athena.athenad import dispatcher
from selfdrive.athena.test_helpers import MockWebsocket, MockParams, MockApi, EchoSocket, with_http_server
from cereal import messaging
class TestAthenadMethods(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.SOCKET_PORT = 45454
athenad.ROOT = tempfile.mkdtemp()
athenad.Params = MockParams
athenad.Api = MockApi
athenad.LOCAL_PORT_WHITELIST = set([cls.SOCKET_PORT])
def test_echo(self):
assert dispatcher["echo"]("bob") == "bob"
def test_getMessage(self):
with self.assertRaises(TimeoutError) as _:
dispatcher["getMessage"]("controlsState")
def send_thermal():
messaging.context = messaging.Context()
pub_sock = messaging.pub_sock("thermal")
start = time.time()
while time.time() - start < 1:
msg = messaging.new_message('thermal')
pub_sock.send(msg.to_bytes())
time.sleep(0.01)
p = Process(target=send_thermal)
p.start()
time.sleep(0.1)
try:
thermal = dispatcher["getMessage"]("thermal")
assert thermal['thermal']
finally:
p.terminate()
def test_listDataDirectory(self):
print(dispatcher["listDataDirectory"]())
@with_http_server
def test_do_upload(self, host):
fn = os.path.join(athenad.ROOT, 'qlog.bz2')
Path(fn).touch()
try:
item = athenad.UploadItem(path=fn, url="http://localhost:1238", headers={}, created_at=int(time.time()*1000), id='')
try:
athenad._do_upload(item)
except requests.exceptions.ConnectionError:
pass
item = athenad.UploadItem(path=fn, url=f"{host}/qlog.bz2", headers={}, created_at=int(time.time()*1000), id='')
resp = athenad._do_upload(item)
self.assertEqual(resp.status_code, 201)
finally:
os.unlink(fn)
@with_http_server
def test_uploadFileToUrl(self, host):
not_exists_resp = dispatcher["uploadFileToUrl"]("does_not_exist.bz2", "http://localhost:1238", {})
self.assertEqual(not_exists_resp, 404)
fn = os.path.join(athenad.ROOT, 'qlog.bz2')
Path(fn).touch()
try:
resp = dispatcher["uploadFileToUrl"]("qlog.bz2", f"{host}/qlog.bz2", {})
self.assertEqual(resp['enqueued'], 1)
self.assertDictContainsSubset({"path": fn, "url": f"{host}/qlog.bz2", "headers": {}}, resp['item'])
self.assertIsNotNone(resp['item'].get('id'))
self.assertEqual(athenad.upload_queue.qsize(), 1)
finally:
athenad.upload_queue = queue.Queue()
os.unlink(fn)
@with_http_server
def test_upload_handler(self, host):
fn = os.path.join(athenad.ROOT, 'qlog.bz2')
Path(fn).touch()
item = athenad.UploadItem(path=fn, url=f"{host}/qlog.bz2", headers={}, created_at=int(time.time()*1000), id='')
end_event = threading.Event()
thread = threading.Thread(target=athenad.upload_handler, args=(end_event,))
thread.start()
athenad.upload_queue.put_nowait(item)
try:
now = time.time()
while time.time() - now < 5:
if athenad.upload_queue.qsize() == 0:
break
self.assertEqual(athenad.upload_queue.qsize(), 0)
finally:
end_event.set()
athenad.upload_queue = queue.Queue()
os.unlink(fn)
def test_cancelUpload(self):
item = athenad.UploadItem(path="qlog.bz2", url="http://localhost:44444/qlog.bz2", headers={}, created_at=int(time.time()*1000), id='id')
athenad.upload_queue.put_nowait(item)
dispatcher["cancelUpload"](item.id)
self.assertIn(item.id, athenad.cancelled_uploads)
end_event = threading.Event()
thread = threading.Thread(target=athenad.upload_handler, args=(end_event,))
thread.start()
try:
now = time.time()
while time.time() - now < 5:
if athenad.upload_queue.qsize() == 0 and len(athenad.cancelled_uploads) == 0:
break
self.assertEqual(athenad.upload_queue.qsize(), 0)
self.assertEqual(len(athenad.cancelled_uploads), 0)
finally:
end_event.set()
athenad.upload_queue = queue.Queue()
def test_listUploadQueue(self):
item = athenad.UploadItem(path="qlog.bz2", url="http://localhost:44444/qlog.bz2", headers={}, created_at=int(time.time()*1000), id='id')
athenad.upload_queue.put_nowait(item)
try:
items = dispatcher["listUploadQueue"]()
self.assertEqual(len(items), 1)
self.assertDictEqual(items[0], item._asdict())
finally:
athenad.upload_queue = queue.Queue()
@mock.patch('selfdrive.athena.athenad.create_connection')
def test_startLocalProxy(self, mock_create_connection):
end_event = threading.Event()
ws_recv = queue.Queue()
ws_send = queue.Queue()
mock_ws = MockWebsocket(ws_recv, ws_send)
mock_create_connection.return_value = mock_ws
echo_socket = EchoSocket(self.SOCKET_PORT)
socket_thread = threading.Thread(target=echo_socket.run)
socket_thread.start()
athenad.startLocalProxy(end_event, 'ws://localhost:1234', self.SOCKET_PORT)
ws_recv.put_nowait(b'ping')
try:
recv = ws_send.get(timeout=5)
assert recv == (b'ping', ABNF.OPCODE_BINARY), recv
finally:
# signal websocket close to athenad.ws_proxy_recv
ws_recv.put_nowait(WebSocketConnectionClosedException())
socket_thread.join()
def test_getSshAuthorizedKeys(self):
keys = dispatcher["getSshAuthorizedKeys"]()
self.assertEqual(keys, MockParams().params["GithubSshKeys"].decode('utf-8'))
def test_jsonrpc_handler(self):
end_event = threading.Event()
thread = threading.Thread(target=athenad.jsonrpc_handler, args=(end_event,))
thread.daemon = True
thread.start()
athenad.payload_queue.put_nowait(json.dumps({"method": "echo", "params": ["hello"], "jsonrpc": "2.0", "id": 0}))
try:
resp = athenad.response_queue.get(timeout=3)
self.assertDictEqual(resp.data, {'result': 'hello', 'id': 0, 'jsonrpc': '2.0'})
finally:
end_event.set()
thread.join()
if __name__ == '__main__':
unittest.main()
|
manager.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Processes DAGs."""
import enum
import importlib
import inspect
import logging
import multiprocessing
import os
import random
import signal
import sys
import time
import zipfile
from collections import defaultdict
from datetime import datetime, timedelta
from importlib import import_module
from multiprocessing.connection import Connection as MultiprocessingConnection
from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Union, cast
from setproctitle import setproctitle
from tabulate import tabulate
import airflow.models
from airflow.callbacks.callback_requests import CallbackRequest
from airflow.configuration import conf
from airflow.dag_processing.processor import DagFileProcessorProcess
from airflow.models import DagModel, errors
from airflow.models.serialized_dag import SerializedDagModel
from airflow.stats import Stats
from airflow.utils import timezone
from airflow.utils.file import list_py_file_paths, might_contain_dag
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.mixins import MultiprocessingStartMethodMixin
from airflow.utils.net import get_hostname
from airflow.utils.process_utils import kill_child_processes_by_pids, reap_process_group
from airflow.utils.session import provide_session
if TYPE_CHECKING:
import pathlib
class DagParsingStat(NamedTuple):
"""Information on processing progress"""
done: bool
all_files_processed: bool
class DagFileStat(NamedTuple):
"""Information about single processing of one file"""
num_dags: int
import_errors: int
last_finish_time: Optional[datetime]
last_duration: Optional[float]
run_count: int
class DagParsingSignal(enum.Enum):
"""All signals sent to parser."""
AGENT_RUN_ONCE = 'agent_run_once'
TERMINATE_MANAGER = 'terminate_manager'
END_MANAGER = 'end_manager'
class DagFileProcessorAgent(LoggingMixin, MultiprocessingStartMethodMixin):
"""
Agent for DAG file processing. It is responsible for all DAG parsing
related jobs in scheduler process. Mainly it can spin up DagFileProcessorManager
in a subprocess, collect DAG parsing results from it and communicate
signal/DAG parsing stat with it.
This class runs in the main `airflow scheduler` process.
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:param processor_timeout: How long to wait before timing out a DAG file processor
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:param pickle_dags: whether to pickle DAGs.
:param async_mode: Whether to start agent in async mode
"""
def __init__(
self,
dag_directory: str,
max_runs: int,
processor_timeout: timedelta,
dag_ids: Optional[List[str]],
pickle_dags: bool,
async_mode: bool,
):
super().__init__()
self._file_path_queue: List[str] = []
self._dag_directory: str = dag_directory
self._max_runs = max_runs
self._processor_timeout = processor_timeout
self._dag_ids = dag_ids
self._pickle_dags = pickle_dags
self._async_mode = async_mode
# Map from file path to the processor
self._processors: Dict[str, DagFileProcessorProcess] = {}
# Pipe for communicating signals
self._process: Optional[multiprocessing.process.BaseProcess] = None
self._done: bool = False
# Initialized as true so we do not deactivate w/o any actual DAG parsing.
self._all_files_processed = True
self._parent_signal_conn: Optional[MultiprocessingConnection] = None
self._last_parsing_stat_received_at: float = time.monotonic()
def start(self) -> None:
"""Launch DagFileProcessorManager processor and start DAG parsing loop in manager."""
mp_start_method = self._get_multiprocessing_start_method()
context = multiprocessing.get_context(mp_start_method)
self._last_parsing_stat_received_at = time.monotonic()
self._parent_signal_conn, child_signal_conn = context.Pipe()
process = context.Process(
target=type(self)._run_processor_manager,
args=(
self._dag_directory,
self._max_runs,
self._processor_timeout,
child_signal_conn,
self._dag_ids,
self._pickle_dags,
self._async_mode,
),
)
self._process = process
process.start()
self.log.info("Launched DagFileProcessorManager with pid: %s", process.pid)
def run_single_parsing_loop(self) -> None:
"""
Should only be used when launched DAG file processor manager in sync mode.
Send agent heartbeat signal to the manager, requesting that it runs one
processing "loop".
Call wait_until_finished to ensure that any launched processors have
finished before continuing
"""
if not self._parent_signal_conn or not self._process:
raise ValueError("Process not started.")
if not self._process.is_alive():
return
try:
self._parent_signal_conn.send(DagParsingSignal.AGENT_RUN_ONCE)
except ConnectionError:
# If this died cos of an error then we will noticed and restarted
# when harvest_serialized_dags calls _heartbeat_manager.
pass
def get_callbacks_pipe(self) -> MultiprocessingConnection:
"""Returns the pipe for sending Callbacks to DagProcessorManager."""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
return self._parent_signal_conn
def wait_until_finished(self) -> None:
"""Waits until DAG parsing is finished."""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
if self._async_mode:
raise RuntimeError("wait_until_finished should only be called in sync_mode")
while self._parent_signal_conn.poll(timeout=None):
try:
result = self._parent_signal_conn.recv()
except EOFError:
return
self._process_message(result)
if isinstance(result, DagParsingStat):
# In sync mode (which is the only time we call this function) we don't send this message from
# the Manager until all the running processors have finished
return
@staticmethod
def _run_processor_manager(
dag_directory: str,
max_runs: int,
processor_timeout: timedelta,
signal_conn: MultiprocessingConnection,
dag_ids: Optional[List[str]],
pickle_dags: bool,
async_mode: bool,
) -> None:
# Make this process start as a new process group - that makes it easy
# to kill all sub-process of this at the OS-level, rather than having
# to iterate the child processes
os.setpgid(0, 0)
setproctitle("airflow scheduler -- DagFileProcessorManager")
# Reload configurations and settings to avoid collision with parent process.
# Because this process may need custom configurations that cannot be shared,
# e.g. RotatingFileHandler. And it can cause connection corruption if we
# do not recreate the SQLA connection pool.
os.environ['CONFIG_PROCESSOR_MANAGER_LOGGER'] = 'True'
os.environ['AIRFLOW__LOGGING__COLORED_CONSOLE_LOG'] = 'False'
# Replicating the behavior of how logging module was loaded
# in logging_config.py
# TODO: This reloading should be removed when we fix our logging behaviour
# In case of "spawn" method of starting processes for multiprocessing, reinitializing of the
# SQLAlchemy engine causes extremely unexpected behaviour of messing with objects already loaded
# in a parent process (likely via resources shared in memory by the ORM libraries).
# This caused flaky tests in our CI for many months and has been discovered while
# iterating on https://github.com/apache/airflow/pull/19860
# The issue that describes the problem and possible remediation is
# at https://github.com/apache/airflow/issues/19934
importlib.reload(import_module(airflow.settings.LOGGING_CLASS_PATH.rsplit('.', 1)[0])) # type: ignore
importlib.reload(airflow.settings)
airflow.settings.initialize()
del os.environ['CONFIG_PROCESSOR_MANAGER_LOGGER']
processor_manager = DagFileProcessorManager(
dag_directory,
max_runs,
processor_timeout,
signal_conn,
dag_ids,
pickle_dags,
async_mode,
)
processor_manager.start()
def heartbeat(self) -> None:
"""Check if the DagFileProcessorManager process is alive, and process any pending messages"""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
# Receive any pending messages before checking if the process has exited.
while self._parent_signal_conn.poll(timeout=0.01):
try:
result = self._parent_signal_conn.recv()
except (EOFError, ConnectionError):
break
self._process_message(result)
# If it died unexpectedly restart the manager process
self._heartbeat_manager()
def _process_message(self, message):
self.log.debug("Received message of type %s", type(message).__name__)
if isinstance(message, DagParsingStat):
self._sync_metadata(message)
else:
raise RuntimeError(f"Unexpected message received of type {type(message).__name__}")
def _heartbeat_manager(self):
"""Heartbeat DAG file processor and restart it if we are not done."""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
if self._process and not self._process.is_alive():
self._process.join(timeout=0)
if not self.done:
self.log.warning(
"DagFileProcessorManager (PID=%d) exited with exit code %d - re-launching",
self._process.pid,
self._process.exitcode,
)
self.start()
if self.done:
return
parsing_stat_age = time.monotonic() - self._last_parsing_stat_received_at
if parsing_stat_age > self._processor_timeout.total_seconds():
Stats.incr('dag_processing.manager_stalls')
self.log.error(
"DagFileProcessorManager (PID=%d) last sent a heartbeat %.2f seconds ago! Restarting it",
self._process.pid,
parsing_stat_age,
)
reap_process_group(self._process.pid, logger=self.log)
self.start()
def _sync_metadata(self, stat):
"""Sync metadata from stat queue and only keep the latest stat."""
self._done = stat.done
self._all_files_processed = stat.all_files_processed
self._last_parsing_stat_received_at = time.monotonic()
@property
def done(self) -> bool:
"""Has DagFileProcessorManager ended?"""
return self._done
@property
def all_files_processed(self):
"""Have all files been processed at least once?"""
return self._all_files_processed
def terminate(self):
"""
Send termination signal to DAG parsing processor manager
and expect it to terminate all DAG file processors.
"""
if self._process and self._process.is_alive():
self.log.info("Sending termination message to manager.")
try:
self._parent_signal_conn.send(DagParsingSignal.TERMINATE_MANAGER)
except ConnectionError:
pass
def end(self):
"""
Terminate (and then kill) the manager process launched.
:return:
"""
if not self._process:
self.log.warning('Ending without manager process.')
return
# Give the Manager some time to cleanly shut down, but not too long, as
# it's better to finish sooner than wait for (non-critical) work to
# finish
self._process.join(timeout=1.0)
reap_process_group(self._process.pid, logger=self.log)
self._parent_signal_conn.close()
class DagFileProcessorManager(LoggingMixin):
"""
Given a list of DAG definition files, this kicks off several processors
in parallel to process them and put the results to a multiprocessing.Queue
for DagFileProcessorAgent to harvest. The parallelism is limited and as the
processors finish, more are launched. The files are processed over and
over again, but no more often than the specified interval.
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:param processor_timeout: How long to wait before timing out a DAG file processor
:param signal_conn: connection to communicate signal with processor agent.
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:param pickle_dags: whether to pickle DAGs.
:param async_mode: whether to start the manager in async mode
"""
def __init__(
self,
dag_directory: Union[str, "pathlib.Path"],
max_runs: int,
processor_timeout: timedelta,
signal_conn: MultiprocessingConnection,
dag_ids: Optional[List[str]],
pickle_dags: bool,
async_mode: bool = True,
):
super().__init__()
self._file_paths: List[str] = []
self._file_path_queue: List[str] = []
self._dag_directory = dag_directory
self._max_runs = max_runs
self._signal_conn = signal_conn
self._pickle_dags = pickle_dags
self._dag_ids = dag_ids
self._async_mode = async_mode
self._parsing_start_time: Optional[int] = None
# Set the signal conn in to non-blocking mode, so that attempting to
# send when the buffer is full errors, rather than hangs for-ever
# attempting to send (this is to avoid deadlocks!)
#
# Don't do this in sync_mode, as we _need_ the DagParsingStat sent to
# continue the scheduler
if self._async_mode:
os.set_blocking(self._signal_conn.fileno(), False)
self._parallelism = conf.getint('scheduler', 'parsing_processes')
if conf.get('core', 'sql_alchemy_conn').startswith('sqlite') and self._parallelism > 1:
self.log.warning(
"Because we cannot use more than 1 thread (parsing_processes = "
"%d) when using sqlite. So we set parallelism to 1.",
self._parallelism,
)
self._parallelism = 1
# Parse and schedule each file no faster than this interval.
self._file_process_interval = conf.getint('scheduler', 'min_file_process_interval')
# How often to print out DAG file processing stats to the log. Default to
# 30 seconds.
self.print_stats_interval = conf.getint('scheduler', 'print_stats_interval')
# Map from file path to the processor
self._processors: Dict[str, DagFileProcessorProcess] = {}
self._num_run = 0
# Map from file path to stats about the file
self._file_stats: Dict[str, DagFileStat] = {}
# Last time that the DAG dir was traversed to look for files
self.last_dag_dir_refresh_time = timezone.make_aware(datetime.fromtimestamp(0))
# Last time stats were printed
self.last_stat_print_time = 0
# How long to wait before timing out a process to parse a DAG file
self._processor_timeout = processor_timeout
# How often to scan the DAGs directory for new files. Default to 5 minutes.
self.dag_dir_list_interval = conf.getint('scheduler', 'dag_dir_list_interval')
# Mapping file name and callbacks requests
self._callback_to_execute: Dict[str, List[CallbackRequest]] = defaultdict(list)
self._log = logging.getLogger('airflow.processor_manager')
self.waitables: Dict[Any, Union[MultiprocessingConnection, DagFileProcessorProcess]] = {
self._signal_conn: self._signal_conn,
}
def register_exit_signals(self):
"""Register signals that stop child processes"""
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
# So that we ignore the debug dump signal, making it easier to send
signal.signal(signal.SIGUSR2, signal.SIG_IGN)
def _exit_gracefully(self, signum, frame):
"""Helper method to clean up DAG file processors to avoid leaving orphan processes."""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
self.log.debug("Current Stacktrace is: %s", '\n'.join(map(str, inspect.stack())))
self.terminate()
self.end()
self.log.debug("Finished terminating DAG processors.")
sys.exit(os.EX_OK)
def start(self):
"""
Use multiple processes to parse and generate tasks for the
DAGs in parallel. By processing them in separate processes,
we can get parallelism and isolation from potentially harmful
user code.
"""
self.register_exit_signals()
# Start a new process group
os.setpgid(0, 0)
self.log.info("Processing files using up to %s processes at a time ", self._parallelism)
self.log.info("Process each file at most once every %s seconds", self._file_process_interval)
self.log.info(
"Checking for new files in %s every %s seconds", self._dag_directory, self.dag_dir_list_interval
)
return self._run_parsing_loop()
def _run_parsing_loop(self):
# In sync mode we want timeout=None -- wait forever until a message is received
if self._async_mode:
poll_time = 0.0
else:
poll_time = None
self._refresh_dag_dir()
self.prepare_file_path_queue()
if self._async_mode:
# If we're in async mode, we can start up straight away. If we're
# in sync mode we need to be told to start a "loop"
self.start_new_processes()
while True:
loop_start_time = time.monotonic()
ready = multiprocessing.connection.wait(self.waitables.keys(), timeout=poll_time)
if self._signal_conn in ready:
agent_signal = self._signal_conn.recv()
self.log.debug("Received %s signal from DagFileProcessorAgent", agent_signal)
if agent_signal == DagParsingSignal.TERMINATE_MANAGER:
self.terminate()
break
elif agent_signal == DagParsingSignal.END_MANAGER:
self.end()
sys.exit(os.EX_OK)
elif agent_signal == DagParsingSignal.AGENT_RUN_ONCE:
# continue the loop to parse dags
pass
elif isinstance(agent_signal, CallbackRequest):
self._add_callback_to_queue(agent_signal)
else:
raise ValueError(f"Invalid message {type(agent_signal)}")
if not ready and not self._async_mode:
# In "sync" mode we don't want to parse the DAGs until we
# are told to (as that would open another connection to the
# SQLite DB which isn't a good practice
# This shouldn't happen, as in sync mode poll should block for
# ever. Lets be defensive about that.
self.log.warning(
"wait() unexpectedly returned nothing ready after infinite timeout (%r)!", poll_time
)
continue
for sentinel in ready:
if sentinel is self._signal_conn:
continue
processor = self.waitables.get(sentinel)
if not processor:
continue
self._collect_results_from_processor(processor)
self.waitables.pop(sentinel)
self._processors.pop(processor.file_path)
self._refresh_dag_dir()
self._kill_timed_out_processors()
# Generate more file paths to process if we processed all the files
# already.
if not self._file_path_queue:
self.emit_metrics()
self.prepare_file_path_queue()
self.start_new_processes()
# Update number of loop iteration.
self._num_run += 1
if not self._async_mode:
self.log.debug("Waiting for processors to finish since we're using sqlite")
# Wait until the running DAG processors are finished before
# sending a DagParsingStat message back. This means the Agent
# can tell we've got to the end of this iteration when it sees
# this type of message
self.wait_until_finished()
# Collect anything else that has finished, but don't kick off any more processors
self.collect_results()
self._print_stat()
all_files_processed = all(self.get_last_finish_time(x) is not None for x in self.file_paths)
max_runs_reached = self.max_runs_reached()
try:
self._signal_conn.send(
DagParsingStat(
max_runs_reached,
all_files_processed,
)
)
except BlockingIOError:
# Try again next time around the loop!
# It is better to fail, than it is deadlock. This should
# "almost never happen" since the DagParsingStat object is
# small, and in async mode this stat is not actually _required_
# for normal operation (It only drives "max runs")
self.log.debug("BlockingIOError received trying to send DagParsingStat, ignoring")
if max_runs_reached:
self.log.info(
"Exiting dag parsing loop as all files have been processed %s times", self._max_runs
)
break
if self._async_mode:
loop_duration = time.monotonic() - loop_start_time
if loop_duration < 1:
poll_time = 1 - loop_duration
else:
poll_time = 0.0
def _add_callback_to_queue(self, request: CallbackRequest):
self._callback_to_execute[request.full_filepath].append(request)
# Callback has a higher priority over DAG Run scheduling
if request.full_filepath in self._file_path_queue:
# Remove file paths matching request.full_filepath from self._file_path_queue
# Since we are already going to use that filepath to run callback,
# there is no need to have same file path again in the queue
self._file_path_queue = [
file_path for file_path in self._file_path_queue if file_path != request.full_filepath
]
self._file_path_queue.insert(0, request.full_filepath)
def _refresh_dag_dir(self):
"""Refresh file paths from dag dir if we haven't done it for too long."""
now = timezone.utcnow()
elapsed_time_since_refresh = (now - self.last_dag_dir_refresh_time).total_seconds()
if elapsed_time_since_refresh > self.dag_dir_list_interval:
# Build up a list of Python files that could contain DAGs
self.log.info("Searching for files in %s", self._dag_directory)
self._file_paths = list_py_file_paths(self._dag_directory)
self.last_dag_dir_refresh_time = now
self.log.info("There are %s files in %s", len(self._file_paths), self._dag_directory)
self.set_file_paths(self._file_paths)
try:
self.log.debug("Removing old import errors")
self.clear_nonexistent_import_errors()
except Exception:
self.log.exception("Error removing old import errors")
# Check if file path is a zipfile and get the full path of the python file.
# Without this, SerializedDagModel.remove_deleted_files would delete zipped dags.
# Likewise DagCode.remove_deleted_code
dag_filelocs = []
for fileloc in self._file_paths:
if zipfile.is_zipfile(fileloc):
with zipfile.ZipFile(fileloc) as z:
dag_filelocs.extend(
[
os.path.join(fileloc, info.filename)
for info in z.infolist()
if might_contain_dag(info.filename, True, z)
]
)
else:
dag_filelocs.append(fileloc)
SerializedDagModel.remove_deleted_dags(dag_filelocs)
DagModel.deactivate_deleted_dags(self._file_paths)
from airflow.models.dagcode import DagCode
DagCode.remove_deleted_code(dag_filelocs)
def _print_stat(self):
"""Occasionally print out stats about how fast the files are getting processed"""
if 0 < self.print_stats_interval < time.monotonic() - self.last_stat_print_time:
if self._file_paths:
self._log_file_processing_stats(self._file_paths)
self.last_stat_print_time = time.monotonic()
@provide_session
def clear_nonexistent_import_errors(self, session):
"""
Clears import errors for files that no longer exist.
:param session: session for ORM operations
"""
query = session.query(errors.ImportError)
if self._file_paths:
query = query.filter(~errors.ImportError.filename.in_(self._file_paths))
query.delete(synchronize_session='fetch')
session.commit()
def _log_file_processing_stats(self, known_file_paths):
"""
Print out stats about how files are getting processed.
:param known_file_paths: a list of file paths that may contain Airflow
DAG definitions
:return: None
"""
# File Path: Path to the file containing the DAG definition
# PID: PID associated with the process that's processing the file. May
# be empty.
# Runtime: If the process is currently running, how long it's been
# running for in seconds.
# Last Runtime: If the process ran before, how long did it take to
# finish in seconds
# Last Run: When the file finished processing in the previous run.
headers = ["File Path", "PID", "Runtime", "# DAGs", "# Errors", "Last Runtime", "Last Run"]
rows = []
now = timezone.utcnow()
for file_path in known_file_paths:
last_runtime = self.get_last_runtime(file_path)
num_dags = self.get_last_dag_count(file_path)
num_errors = self.get_last_error_count(file_path)
file_name = os.path.basename(file_path)
file_name = os.path.splitext(file_name)[0].replace(os.sep, '.')
processor_pid = self.get_pid(file_path)
processor_start_time = self.get_start_time(file_path)
runtime = (now - processor_start_time) if processor_start_time else None
last_run = self.get_last_finish_time(file_path)
if last_run:
seconds_ago = (now - last_run).total_seconds()
Stats.gauge(f'dag_processing.last_run.seconds_ago.{file_name}', seconds_ago)
rows.append((file_path, processor_pid, runtime, num_dags, num_errors, last_runtime, last_run))
# Sort by longest last runtime. (Can't sort None values in python3)
rows = sorted(rows, key=lambda x: x[3] or 0.0)
formatted_rows = []
for file_path, pid, runtime, num_dags, num_errors, last_runtime, last_run in rows:
formatted_rows.append(
(
file_path,
pid,
f"{runtime.total_seconds():.2f}s" if runtime else None,
num_dags,
num_errors,
f"{last_runtime:.2f}s" if last_runtime else None,
last_run.strftime("%Y-%m-%dT%H:%M:%S") if last_run else None,
)
)
log_str = (
"\n"
+ "=" * 80
+ "\n"
+ "DAG File Processing Stats\n\n"
+ tabulate(formatted_rows, headers=headers)
+ "\n"
+ "=" * 80
)
self.log.info(log_str)
def get_pid(self, file_path):
"""
:param file_path: the path to the file that's being processed
:return: the PID of the process processing the given file or None if
the specified file is not being processed
:rtype: int
"""
if file_path in self._processors:
return self._processors[file_path].pid
return None
def get_all_pids(self):
"""
:return: a list of the PIDs for the processors that are running
:rtype: List[int]
"""
return [x.pid for x in self._processors.values()]
def get_last_runtime(self, file_path):
"""
:param file_path: the path to the file that was processed
:return: the runtime (in seconds) of the process of the last run, or
None if the file was never processed.
:rtype: float
"""
stat = self._file_stats.get(file_path)
return stat.last_duration if stat else None
def get_last_dag_count(self, file_path):
"""
:param file_path: the path to the file that was processed
:return: the number of dags loaded from that file, or None if the file
was never processed.
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.num_dags if stat else None
def get_last_error_count(self, file_path):
"""
:param file_path: the path to the file that was processed
:return: the number of import errors from processing, or None if the file
was never processed.
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.import_errors if stat else None
def get_last_finish_time(self, file_path):
"""
:param file_path: the path to the file that was processed
:return: the finish time of the process of the last run, or None if the
file was never processed.
:rtype: datetime
"""
stat = self._file_stats.get(file_path)
return stat.last_finish_time if stat else None
def get_start_time(self, file_path):
"""
:param file_path: the path to the file that's being processed
:return: the start time of the process that's processing the
specified file or None if the file is not currently being processed
:rtype: datetime
"""
if file_path in self._processors:
return self._processors[file_path].start_time
return None
def get_run_count(self, file_path):
"""
:param file_path: the path to the file that's being processed
:return: the number of times the given file has been parsed
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.run_count if stat else 0
def set_file_paths(self, new_file_paths):
"""
Update this with a new set of paths to DAG definition files.
:param new_file_paths: list of paths to DAG definition files
:return: None
"""
self._file_paths = new_file_paths
self._file_path_queue = [x for x in self._file_path_queue if x in new_file_paths]
# Stop processors that are working on deleted files
filtered_processors = {}
for file_path, processor in self._processors.items():
if file_path in new_file_paths:
filtered_processors[file_path] = processor
else:
self.log.warning("Stopping processor for %s", file_path)
Stats.decr('dag_processing.processes')
processor.terminate()
self._file_stats.pop(file_path)
self._processors = filtered_processors
def wait_until_finished(self):
"""Sleeps until all the processors are done."""
for processor in self._processors.values():
while not processor.done:
time.sleep(0.1)
def _collect_results_from_processor(self, processor) -> None:
self.log.debug("Processor for %s finished", processor.file_path)
Stats.decr('dag_processing.processes')
last_finish_time = timezone.utcnow()
if processor.result is not None:
num_dags, count_import_errors = processor.result
else:
self.log.error(
"Processor for %s exited with return code %s.", processor.file_path, processor.exit_code
)
count_import_errors = -1
num_dags = 0
last_duration = (last_finish_time - processor.start_time).total_seconds()
stat = DagFileStat(
num_dags=num_dags,
import_errors=count_import_errors,
last_finish_time=last_finish_time,
last_duration=last_duration,
run_count=self.get_run_count(processor.file_path) + 1,
)
self._file_stats[processor.file_path] = stat
file_name = os.path.splitext(os.path.basename(processor.file_path))[0].replace(os.sep, '.')
Stats.timing(f'dag_processing.last_duration.{file_name}', last_duration)
def collect_results(self) -> None:
"""Collect the result from any finished DAG processors"""
ready = multiprocessing.connection.wait(self.waitables.keys() - [self._signal_conn], timeout=0)
for sentinel in ready:
if sentinel is self._signal_conn:
continue
processor = cast(DagFileProcessorProcess, self.waitables[sentinel])
self.waitables.pop(processor.waitable_handle)
self._processors.pop(processor.file_path)
self._collect_results_from_processor(processor)
self.log.debug("%s/%s DAG parsing processes running", len(self._processors), self._parallelism)
self.log.debug("%s file paths queued for processing", len(self._file_path_queue))
@staticmethod
def _create_process(file_path, pickle_dags, dag_ids, callback_requests):
"""Creates DagFileProcessorProcess instance."""
return DagFileProcessorProcess(
file_path=file_path, pickle_dags=pickle_dags, dag_ids=dag_ids, callback_requests=callback_requests
)
def start_new_processes(self):
"""Start more processors if we have enough slots and files to process"""
while self._parallelism - len(self._processors) > 0 and self._file_path_queue:
file_path = self._file_path_queue.pop(0)
# Stop creating duplicate processor i.e. processor with the same filepath
if file_path in self._processors.keys():
continue
callback_to_execute_for_file = self._callback_to_execute[file_path]
processor = self._create_process(
file_path, self._pickle_dags, self._dag_ids, callback_to_execute_for_file
)
del self._callback_to_execute[file_path]
Stats.incr('dag_processing.processes')
processor.start()
self.log.debug("Started a process (PID: %s) to generate tasks for %s", processor.pid, file_path)
self._processors[file_path] = processor
self.waitables[processor.waitable_handle] = processor
def prepare_file_path_queue(self):
"""Generate more file paths to process. Result are saved in _file_path_queue."""
self._parsing_start_time = time.perf_counter()
# If the file path is already being processed, or if a file was
# processed recently, wait until the next batch
file_paths_in_progress = self._processors.keys()
now = timezone.utcnow()
# Sort the file paths by the parsing order mode
list_mode = conf.get("scheduler", "file_parsing_sort_mode")
files_with_mtime = {}
file_paths = []
is_mtime_mode = list_mode == "modified_time"
file_paths_recently_processed = []
for file_path in self._file_paths:
if is_mtime_mode:
try:
files_with_mtime[file_path] = os.path.getmtime(file_path)
except FileNotFoundError:
self.log.warning("Skipping processing of missing file: %s", file_path)
continue
file_modified_time = timezone.make_aware(datetime.fromtimestamp(files_with_mtime[file_path]))
else:
file_paths.append(file_path)
file_modified_time = None
# Find file paths that were recently processed to exclude them
# from being added to file_path_queue
# unless they were modified recently and parsing mode is "modified_time"
# in which case we don't honor "self._file_process_interval" (min_file_process_interval)
last_finish_time = self.get_last_finish_time(file_path)
if (
last_finish_time is not None
and (now - last_finish_time).total_seconds() < self._file_process_interval
and not (is_mtime_mode and file_modified_time and (file_modified_time > last_finish_time))
):
file_paths_recently_processed.append(file_path)
# Sort file paths via last modified time
if is_mtime_mode:
file_paths = sorted(files_with_mtime, key=files_with_mtime.get, reverse=True)
elif list_mode == "alphabetical":
file_paths = sorted(file_paths)
elif list_mode == "random_seeded_by_host":
# Shuffle the list seeded by hostname so multiple schedulers can work on different
# set of files. Since we set the seed, the sort order will remain same per host
random.Random(get_hostname()).shuffle(file_paths)
files_paths_at_run_limit = [
file_path for file_path, stat in self._file_stats.items() if stat.run_count == self._max_runs
]
file_paths_to_exclude = set(file_paths_in_progress).union(
file_paths_recently_processed, files_paths_at_run_limit
)
# Do not convert the following list to set as set does not preserve the order
# and we need to maintain the order of file_paths for `[scheduler] file_parsing_sort_mode`
files_paths_to_queue = [
file_path for file_path in file_paths if file_path not in file_paths_to_exclude
]
for file_path, processor in self._processors.items():
self.log.debug(
"File path %s is still being processed (started: %s)",
processor.file_path,
processor.start_time.isoformat(),
)
self.log.debug("Queuing the following files for processing:\n\t%s", "\n\t".join(files_paths_to_queue))
for file_path in files_paths_to_queue:
if file_path not in self._file_stats:
self._file_stats[file_path] = DagFileStat(
num_dags=0, import_errors=0, last_finish_time=None, last_duration=None, run_count=0
)
self._file_path_queue.extend(files_paths_to_queue)
def _kill_timed_out_processors(self):
"""Kill any file processors that timeout to defend against process hangs."""
now = timezone.utcnow()
for file_path, processor in self._processors.items():
duration = now - processor.start_time
if duration > self._processor_timeout:
self.log.error(
"Processor for %s with PID %s started at %s has timed out, killing it.",
file_path,
processor.pid,
processor.start_time.isoformat(),
)
Stats.decr('dag_processing.processes')
Stats.incr('dag_processing.processor_timeouts')
# TODO: Remove after Airflow 2.0
Stats.incr('dag_file_processor_timeouts')
processor.kill()
def max_runs_reached(self):
""":return: whether all file paths have been processed max_runs times"""
if self._max_runs == -1: # Unlimited runs.
return False
for stat in self._file_stats.values():
if stat.run_count < self._max_runs:
return False
if self._num_run < self._max_runs:
return False
return True
def terminate(self):
"""
Stops all running processors
:return: None
"""
for processor in self._processors.values():
Stats.decr('dag_processing.processes')
processor.terminate()
def end(self):
"""
Kill all child processes on exit since we don't want to leave
them as orphaned.
"""
pids_to_kill = self.get_all_pids()
if pids_to_kill:
kill_child_processes_by_pids(pids_to_kill)
def emit_metrics(self):
"""
Emit metrics about dag parsing summary
This is called once every time around the parsing "loop" - i.e. after
all files have been parsed.
"""
parse_time = time.perf_counter() - self._parsing_start_time
Stats.gauge('dag_processing.total_parse_time', parse_time)
Stats.gauge('dagbag_size', sum(stat.num_dags for stat in self._file_stats.values()))
Stats.gauge(
'dag_processing.import_errors', sum(stat.import_errors for stat in self._file_stats.values())
)
@property
def file_paths(self):
return self._file_paths
|
test_stack.py
|
# -*- encoding: utf-8 -*-
import os
import threading
import time
import timeit
import pytest
from ddtrace.vendor import six
from ddtrace.profiling import _nogevent
from ddtrace.profiling import collector
from ddtrace.profiling import profiler
from ddtrace.profiling import recorder
from ddtrace.profiling import _service
from ddtrace.profiling.collector import stack
from ddtrace.profiling.collector import _threading
from . import test_collector
TESTING_GEVENT = os.getenv("DD_PROFILE_TEST_GEVENT", False)
def func1():
return func2()
def func2():
return func3()
def func3():
return func4()
def func4():
return func5()
def func5():
return _nogevent.sleep(1)
def test_collect_truncate():
r = recorder.Recorder()
c = stack.StackCollector(r, nframes=5)
c.start()
func1()
while not r.events[stack.StackSampleEvent]:
pass
c.stop()
for e in r.events[stack.StackSampleEvent]:
if e.thread_name == "MainThread":
assert len(e.frames) <= c.nframes
break
else:
pytest.fail("Unable to find the main thread")
def test_collect_once():
r = recorder.Recorder()
s = stack.StackCollector(r)
s._init()
all_events = s.collect()
assert len(all_events) == 2
stack_events = all_events[0]
for e in stack_events:
if e.thread_name == "MainThread":
if TESTING_GEVENT and stack.FEATURES["gevent-tasks"]:
assert e.task_id > 0
assert e.task_name == e.thread_name
else:
assert e.task_id is None
assert e.task_name is None
assert e.thread_id > 0
assert len(e.frames) >= 1
assert e.frames[0][0].endswith(".py")
assert e.frames[0][1] > 0
assert isinstance(e.frames[0][2], str)
break
else:
pytest.fail("Unable to find MainThread")
def _fib(n):
if n == 1:
return 1
elif n == 0:
return 0
else:
return _fib(n - 1) + _fib(n - 2)
@pytest.mark.skipif(not stack.FEATURES["gevent-tasks"], reason="gevent-tasks not supported")
def test_collect_gevent_thread_task():
r = recorder.Recorder()
s = stack.StackCollector(r)
# Start some (green)threads
def _dofib():
for _ in range(10):
# spend some time in CPU so the profiler can catch something
_fib(28)
# Just make sure gevent switches threads/greenlets
time.sleep(0)
threads = []
with s:
for i in range(10):
t = threading.Thread(target=_dofib, name="TestThread %d" % i)
t.start()
threads.append(t)
for t in threads:
t.join()
for event in r.events[stack.StackSampleEvent]:
if event.thread_name == "MainThread" and event.task_id in {thread.ident for thread in threads}:
assert event.task_name.startswith("TestThread ")
# This test is not uber-reliable as it has timing issue, therefore if we find one of our TestThread with the
# correct info, we're happy enough to stop here.
break
else:
pytest.fail("No gevent thread found")
def test_max_time_usage():
r = recorder.Recorder()
with pytest.raises(ValueError):
stack.StackCollector(r, max_time_usage_pct=0)
def test_max_time_usage_over():
r = recorder.Recorder()
with pytest.raises(ValueError):
stack.StackCollector(r, max_time_usage_pct=200)
def test_ignore_profiler_single():
r, c, thread_id = test_collector._test_collector_collect(stack.StackCollector, stack.StackSampleEvent)
events = r.events[stack.StackSampleEvent]
assert thread_id not in {e.thread_id for e in events}
def test_no_ignore_profiler_single():
r, c, thread_id = test_collector._test_collector_collect(
stack.StackCollector, stack.StackSampleEvent, ignore_profiler=False
)
events = r.events[stack.StackSampleEvent]
assert thread_id in {e.thread_id for e in events}
class CollectorTest(collector.PeriodicCollector):
def collect(self):
_fib(20)
return []
def test_ignore_profiler_gevent_task(profiler):
# This test is particularly useful with gevent enabled: create a test collector that run often and for long so we're
# sure to catch it with the StackProfiler and that it's ignored.
c = CollectorTest(profiler._profiler._recorder, interval=0.00001)
c.start()
events = profiler._profiler._recorder.events[stack.StackSampleEvent]
collector_thread_ids = {
col._worker.ident
for col in profiler._profiler._collectors
if (isinstance(col, collector.PeriodicCollector) and col.status == _service.ServiceStatus.RUNNING)
}
collector_thread_ids.add(c._worker.ident)
time.sleep(3)
c.stop()
assert collector_thread_ids.isdisjoint({e.task_id for e in events})
@pytest.mark.skipif(not stack.FEATURES["gevent-tasks"], reason="gevent-tasks not supported")
def test_not_ignore_profiler_gevent_task(monkeypatch):
monkeypatch.setenv("DD_PROFILING_API_TIMEOUT", "0.1")
monkeypatch.setenv("DD_PROFILING_IGNORE_PROFILER", "0")
p = profiler.Profiler()
p.start()
# This test is particularly useful with gevent enabled: create a test collector that run often and for long so we're
# sure to catch it with the StackProfiler and that it's ignored.
c = CollectorTest(p._profiler._recorder, interval=0.00001)
c.start()
events = p._profiler._recorder.events[stack.StackSampleEvent]
time.sleep(3)
c.stop()
p.stop()
assert c._worker.ident in {e.task_id for e in events}
def test_collect():
test_collector._test_collector_collect(stack.StackCollector, stack.StackSampleEvent)
def test_restart():
test_collector._test_restart(stack.StackCollector)
def test_repr():
test_collector._test_repr(
stack.StackCollector,
"StackCollector(status=<ServiceStatus.STOPPED: 'stopped'>, "
"recorder=Recorder(default_max_events=32768, max_events={}), min_interval_time=0.01, max_time_usage_pct=2.0, "
"nframes=64, ignore_profiler=True, tracer=None)",
)
def test_new_interval():
r = recorder.Recorder()
c = stack.StackCollector(r)
new_interval = c._compute_new_interval(1000000)
assert new_interval == 0.049
new_interval = c._compute_new_interval(2000000)
assert new_interval == 0.098
c = stack.StackCollector(r, max_time_usage_pct=10)
new_interval = c._compute_new_interval(200000)
assert new_interval == 0.01
new_interval = c._compute_new_interval(1)
assert new_interval == c.min_interval_time
# Function to use for stress-test of polling
MAX_FN_NUM = 30
FN_TEMPLATE = """def _f{num}():
return _f{nump1}()"""
for num in range(MAX_FN_NUM):
if six.PY3:
exec(FN_TEMPLATE.format(num=num, nump1=num + 1))
else:
exec(FN_TEMPLATE.format(num=num, nump1=num + 1))
exec(
"""def _f{MAX_FN_NUM}():
try:
raise ValueError('test')
except Exception:
time.sleep(2)""".format(
MAX_FN_NUM=MAX_FN_NUM
)
)
def test_stress_threads():
NB_THREADS = 40
threads = []
for i in range(NB_THREADS):
t = threading.Thread(target=_f0) # noqa: E149,F821
t.start()
threads.append(t)
s = stack.StackCollector(recorder=recorder.Recorder())
number = 20000
s._init()
exectime = timeit.timeit(s.collect, number=number)
# Threads are fake threads with gevent, so result is actually for one thread, not NB_THREADS
exectime_per_collect = exectime / number
print("%.3f ms per call" % (1000.0 * exectime_per_collect))
print(
"CPU overhead for %d threads with %d functions long at %d Hz: %.2f%%"
% (
NB_THREADS,
MAX_FN_NUM,
1 / s.min_interval_time,
100 * exectime_per_collect / s.min_interval_time,
)
)
for t in threads:
t.join()
def test_stress_threads_run_as_thread():
NB_THREADS = 40
threads = []
for i in range(NB_THREADS):
t = threading.Thread(target=_f0) # noqa: E149,F821
t.start()
threads.append(t)
r = recorder.Recorder()
s = stack.StackCollector(recorder=r)
# This mainly check nothing bad happens when we collect a lot of threads and store the result in the Recorder
with s:
time.sleep(3)
assert r.events[stack.StackSampleEvent]
for t in threads:
t.join()
@pytest.mark.skipif(not stack.FEATURES["stack-exceptions"], reason="Stack exceptions not supported")
@pytest.mark.skipif(TESTING_GEVENT, reason="Test not compatible with gevent")
def test_exception_collection_threads():
NB_THREADS = 5
threads = []
for i in range(NB_THREADS):
t = threading.Thread(target=_f0) # noqa: E149,F821
t.start()
threads.append(t)
r, c, thread_id = test_collector._test_collector_collect(stack.StackCollector, stack.StackExceptionSampleEvent)
exception_events = r.events[stack.StackExceptionSampleEvent]
e = exception_events[0]
assert e.timestamp > 0
assert e.sampling_period > 0
assert e.thread_id in {t.ident for t in threads}
assert isinstance(e.thread_name, str)
assert e.frames == [("<string>", 5, "_f30")]
assert e.nframes == 1
assert e.exc_type == ValueError
for t in threads:
t.join()
@pytest.mark.skipif(not stack.FEATURES["stack-exceptions"], reason="Stack exceptions not supported")
def test_exception_collection():
r = recorder.Recorder()
c = stack.StackCollector(r)
with c:
try:
raise ValueError("hello")
except Exception:
_nogevent.sleep(1)
exception_events = r.events[stack.StackExceptionSampleEvent]
assert len(exception_events) >= 1
e = exception_events[0]
assert e.timestamp > 0
assert e.sampling_period > 0
assert e.thread_id == _nogevent.thread_get_ident()
assert e.thread_name == "MainThread"
assert e.frames == [(__file__, 327, "test_exception_collection")]
assert e.nframes == 1
assert e.exc_type == ValueError
@pytest.fixture
def tracer_and_collector(tracer):
r = recorder.Recorder()
c = stack.StackCollector(r, tracer=tracer)
c.start()
try:
yield tracer, c
finally:
c.stop()
def test_thread_to_span_thread_isolation(tracer_and_collector):
t, c = tracer_and_collector
root = t.start_span("root")
thread_id = _nogevent.thread_get_ident()
assert c._thread_span_links.get_active_leaf_spans_from_thread_id(thread_id) == {root}
quit_thread = threading.Event()
span_started = threading.Event()
store = {}
def start_span():
store["span2"] = t.start_span("thread2")
span_started.set()
quit_thread.wait()
th = threading.Thread(target=start_span)
th.start()
span_started.wait()
if TESTING_GEVENT:
# We track *real* threads, gevent is using only one in this case
assert c._thread_span_links.get_active_leaf_spans_from_thread_id(thread_id) == {root, store["span2"]}
assert c._thread_span_links.get_active_leaf_spans_from_thread_id(th.ident) == set()
else:
assert c._thread_span_links.get_active_leaf_spans_from_thread_id(thread_id) == {root}
assert c._thread_span_links.get_active_leaf_spans_from_thread_id(th.ident) == {store["span2"]}
# Do not quit the thread before we test, otherwise the collector might clean up the thread from the list of spans
quit_thread.set()
th.join()
def test_thread_to_span_multiple(tracer_and_collector):
t, c = tracer_and_collector
root = t.start_span("root")
thread_id = _nogevent.thread_get_ident()
assert c._thread_span_links.get_active_leaf_spans_from_thread_id(thread_id) == {root}
subspan = t.start_span("subtrace", child_of=root)
assert c._thread_span_links.get_active_leaf_spans_from_thread_id(thread_id) == {subspan}
subspan.finish()
assert c._thread_span_links.get_active_leaf_spans_from_thread_id(thread_id) == {root}
root.finish()
assert c._thread_span_links.get_active_leaf_spans_from_thread_id(thread_id) == set()
def test_thread_to_child_span_multiple_unknown_thread(tracer_and_collector):
t, c = tracer_and_collector
t.start_span("root")
assert c._thread_span_links.get_active_leaf_spans_from_thread_id(3456789) == set()
def test_thread_to_child_span_clear(tracer_and_collector):
t, c = tracer_and_collector
root = t.start_span("root")
thread_id = _nogevent.thread_get_ident()
assert c._thread_span_links.get_active_leaf_spans_from_thread_id(thread_id) == {root}
c._thread_span_links.clear_threads(set())
assert c._thread_span_links.get_active_leaf_spans_from_thread_id(thread_id) == set()
def test_thread_to_child_span_multiple_more_children(tracer_and_collector):
t, c = tracer_and_collector
root = t.start_span("root")
thread_id = _nogevent.thread_get_ident()
assert c._thread_span_links.get_active_leaf_spans_from_thread_id(thread_id) == {root}
subspan = t.start_span("subtrace", child_of=root)
subsubspan = t.start_span("subsubtrace", child_of=subspan)
assert c._thread_span_links.get_active_leaf_spans_from_thread_id(thread_id) == {subsubspan}
subsubspan2 = t.start_span("subsubtrace2", child_of=subspan)
assert c._thread_span_links.get_active_leaf_spans_from_thread_id(thread_id) == {subsubspan, subsubspan2}
# ⚠ subspan is not supposed to finish before its children, but the API authorizes it
# In that case, we would return also the root span as it's becoming a parent without children 🤷
subspan.finish()
assert c._thread_span_links.get_active_leaf_spans_from_thread_id(thread_id) == {root, subsubspan, subsubspan2}
def test_collect_span_ids(tracer_and_collector):
t, c = tracer_and_collector
span = t.start_span("foobar")
# This test will run forever if it fails. Don't make it fail.
while True:
try:
event = c.recorder.events[stack.StackSampleEvent].pop()
except IndexError:
# No event left or no event yet
continue
if span.trace_id in event.trace_ids and span.span_id in event.span_ids:
break
def test_collect_multiple_span_ids(tracer_and_collector):
t, c = tracer_and_collector
span = t.start_span("foobar")
child = t.start_span("foobar", child_of=span)
# This test will run forever if it fails. Don't make it fail.
while True:
try:
event = c.recorder.events[stack.StackSampleEvent].pop()
except IndexError:
# No event left or no event yet
continue
if child.trace_id in event.trace_ids and child.span_id in event.span_ids:
break
def test_stress_trace_collection(tracer_and_collector):
tracer, collector = tracer_and_collector
def _trace():
for _ in range(5000):
with tracer.trace("hello"):
time.sleep(0.001)
NB_THREADS = 30
threads = []
for i in range(NB_THREADS):
t = threading.Thread(target=_trace)
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
@pytest.mark.skipif(TESTING_GEVENT, reason="Test not compatible with gevent")
def test_thread_time_cache():
tt = stack._ThreadTime()
lock = _nogevent.Lock()
lock.acquire()
t = _nogevent.Thread(target=lock.acquire)
t.start()
main_thread_id = threading.current_thread().ident
threads = [
main_thread_id,
t.ident,
]
cpu_time = tt(threads)
assert sorted(k[0] for k in cpu_time.keys()) == sorted([main_thread_id, t.ident])
assert all(t >= 0 for t in cpu_time.values())
cpu_time = tt(threads)
assert sorted(k[0] for k in cpu_time.keys()) == sorted([main_thread_id, t.ident])
assert all(t >= 0 for t in cpu_time.values())
if stack.FEATURES["cpu-time"]:
assert set(tt._get_last_thread_time().keys()) == set(
(pthread_id, _threading.get_thread_native_id(pthread_id)) for pthread_id in threads
)
lock.release()
threads = {
main_thread_id: _threading.get_thread_native_id(main_thread_id),
}
cpu_time = tt(threads)
assert sorted(k[0] for k in cpu_time.keys()) == sorted([main_thread_id])
assert all(t >= 0 for t in cpu_time.values())
if stack.FEATURES["cpu-time"]:
assert set(tt._get_last_thread_time().keys()) == set(
(pthread_id, _threading.get_thread_native_id(pthread_id)) for pthread_id in threads
)
|
core.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
import multiprocessing
import os
import signal
import sys
import time
from ...actors import create_actor_pool
from ...cluster_info import StaticClusterDiscoverer
from ...config import options
from ...resource import cpu_count
from ...scheduler.service import SchedulerService
from ...session import new_session
from ...utils import get_next_port, kill_process_tree
from ...worker.service import WorkerService
from .distributor import gen_distributor
_mp_spawn_context = multiprocessing.get_context('spawn')
_local_cluster_clients = dict()
atexit.register(lambda: [v.stop() for v in list(_local_cluster_clients.values())])
class LocalDistributedCluster(object):
# at least 2 process are required by scheduler and worker
MIN_SCHEDULER_N_PROCESS = 2
MIN_WORKER_N_PROCESS = 2
def __init__(self, endpoint, n_process=None, scheduler_n_process=None,
worker_n_process=None, cuda_device=None, ignore_avail_mem=True,
shared_memory=None):
self._endpoint = endpoint
self._started = False
self._stopped = False
self._pool = None
self._scheduler_service = SchedulerService()
cuda_devices = [cuda_device] if cuda_device is not None else None
self._worker_service = WorkerService(ignore_avail_mem=ignore_avail_mem,
cache_mem_size=shared_memory,
cuda_devices=cuda_devices,
distributed=False)
self._scheduler_n_process, self._worker_n_process = \
self._calc_scheduler_worker_n_process(n_process,
scheduler_n_process,
worker_n_process)
@property
def pool(self):
return self._pool
@classmethod
def _calc_scheduler_worker_n_process(cls, n_process, scheduler_n_process, worker_n_process,
calc_cpu_count=cpu_count):
n_scheduler, n_worker = scheduler_n_process, worker_n_process
if n_scheduler is None and n_worker is None:
n_scheduler = cls.MIN_SCHEDULER_N_PROCESS
n_process = n_process if n_process is not None else calc_cpu_count() + n_scheduler
n_worker = max(n_process - n_scheduler, cls.MIN_WORKER_N_PROCESS)
elif n_scheduler is None or n_worker is None:
# one of scheduler and worker n_process provided
if n_scheduler is None:
n_process = n_process if n_process is not None else calc_cpu_count()
n_scheduler = max(n_process - n_worker, cls.MIN_SCHEDULER_N_PROCESS)
else:
assert n_worker is None
n_process = n_process if n_process is not None else calc_cpu_count() + n_scheduler
n_worker = max(n_process - n_scheduler, cls.MIN_WORKER_N_PROCESS)
return n_scheduler, n_worker
def _make_sure_scheduler_ready(self, timeout=120):
check_start_time = time.time()
while True:
workers_meta = self._scheduler_service._resource_ref.get_workers_meta()
if not workers_meta:
# wait for worker to report status
self._pool.sleep(.5)
if time.time() - check_start_time > timeout: # pragma: no cover
raise TimeoutError('Check worker ready timed out.')
else:
break
def start_service(self):
if self._started:
return
self._started = True
# start plasma
self._worker_service.start_plasma()
# start actor pool
n_process = self._scheduler_n_process + self._worker_n_process
distributor = gen_distributor(self._scheduler_n_process, self._worker_n_process)
self._pool = create_actor_pool(self._endpoint, n_process, distributor=distributor)
discoverer = StaticClusterDiscoverer([self._endpoint])
# start scheduler first
self._scheduler_service.start(self._endpoint, discoverer, self._pool, distributed=False)
# start worker next
self._worker_service.start(self._endpoint, self._pool,
discoverer=discoverer,
process_start_index=self._scheduler_n_process)
# make sure scheduler is ready
self._make_sure_scheduler_ready()
def stop_service(self):
if self._stopped:
return
self._stopped = True
try:
self._scheduler_service.stop(self._pool)
self._worker_service.stop()
finally:
self._pool.stop()
def serve_forever(self):
try:
self._pool.join()
except KeyboardInterrupt:
pass
finally:
self.stop_service()
def __enter__(self):
self.start_service()
return self
def __exit__(self, *_):
self.stop_service()
def gen_endpoint(address):
port = None
tries = 5 # retry for 5 times
for i in range(tries):
try:
port = get_next_port()
break
except SystemError:
if i < tries - 1:
continue
raise
return f'{address}:{port}'
def _start_cluster(endpoint, event, n_process=None, shared_memory=None, **kw):
options_dict = kw.pop('options', None) or {}
options.update(options_dict)
modules = kw.pop('modules', None) or []
for m in modules:
__import__(m, globals(), locals(), [])
cluster = LocalDistributedCluster(endpoint, n_process=n_process,
shared_memory=shared_memory, **kw)
cluster.start_service()
event.set()
try:
cluster.serve_forever()
finally:
cluster.stop_service()
def _start_cluster_process(endpoint, n_process, shared_memory, **kw):
event = _mp_spawn_context.Event()
kw = kw.copy()
kw['n_process'] = n_process
kw['shared_memory'] = shared_memory or '20%'
process = _mp_spawn_context.Process(
target=_start_cluster, args=(endpoint, event), kwargs=kw)
process.start()
while True:
event.wait(5)
if not event.is_set():
# service not started yet
continue
if not process.is_alive():
raise SystemError('New local cluster failed')
else:
break
return process
def _start_web(scheduler_address, ui_port, event, options_dict):
import gevent.monkey
gevent.monkey.patch_all(thread=False)
options.update(options_dict)
from ...web import MarsWeb
web = MarsWeb(None, ui_port, scheduler_address)
try:
web.start(event=event, block=True)
finally:
web.stop()
def _start_web_process(scheduler_endpoint, web_endpoint):
ui_port = int(web_endpoint.rsplit(':', 1)[1])
options_dict = options.to_dict()
web_event = _mp_spawn_context.Event()
web_process = _mp_spawn_context.Process(
target=_start_web, args=(scheduler_endpoint, ui_port, web_event, options_dict),
daemon=True)
web_process.start()
while True:
web_event.wait(5)
if not web_event.is_set():
# web not started yet
continue
if not web_process.is_alive():
raise SystemError('New web interface failed')
else:
break
return web_process
class LocalDistributedClusterClient(object):
def __init__(self, endpoint, web_endpoint, cluster_process, web_process):
self._cluster_process = cluster_process
self._web_process = web_process
self._endpoint = endpoint
self._web_endpoint = web_endpoint
self._session = new_session(endpoint).as_default()
@property
def endpoint(self):
return self._endpoint
@property
def web_endpoint(self):
return self._web_endpoint
@property
def session(self):
return self._session
def __enter__(self):
return self
def __exit__(self, *_):
self.stop()
@staticmethod
def _ensure_process_finish(proc):
if proc is None or not proc.is_alive():
return
proc.join(3)
kill_process_tree(proc.pid)
def stop(self):
try:
del _local_cluster_clients[id(self)]
except KeyError: # pragma: no cover
pass
if self._cluster_process.is_alive():
os.kill(self._cluster_process.pid, signal.SIGINT)
if self._web_process is not None and self._web_process.is_alive():
os.kill(self._web_process.pid, signal.SIGINT)
self._ensure_process_finish(self._cluster_process)
self._ensure_process_finish(self._web_process)
def new_cluster(address='0.0.0.0', web=False, n_process=None, shared_memory=None,
open_browser=None, **kw):
open_browser = open_browser if open_browser is not None else options.deploy.open_browser
endpoint = gen_endpoint(address)
web_endpoint = None
if web is True:
web_endpoint = gen_endpoint('0.0.0.0')
elif isinstance(web, str):
if ':' in web:
web_endpoint = web
else:
web_endpoint = gen_endpoint(web)
options_dict = options.to_dict()
options_dict.update(kw.get('options') or dict())
kw['options'] = options_dict
process = _start_cluster_process(endpoint, n_process, shared_memory, **kw)
web_process = None
if web_endpoint:
web_process = _start_web_process(endpoint, web_endpoint)
print(f'Web endpoint started at http://{web_endpoint}', file=sys.stderr)
if open_browser:
import webbrowser
webbrowser.open_new_tab(f'http://{web_endpoint}')
client = LocalDistributedClusterClient(endpoint, web_endpoint, process, web_process)
_local_cluster_clients[id(client)] = client
return client
|
main.py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from __future__ import division
from __future__ import print_function
import yaml
import argparse
import multiprocessing
from qlib_server.config import init, LoggingConfig
from qlib.log import get_module_logger, set_log_with_config
from qlib_server.log import log_subprocess_config, listener_process
# read config for qlib-server
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", help="config file path", default="./config.yaml")
parser.add_argument(
"-m",
"--module",
help="modules to run",
nargs="+",
choices=["request_handler", "data_processor"],
default=["request_handler", "data_processor"],
)
ARGS = parser.parse_args()
# start qlib-server process
def main():
LOG = get_module_logger(__file__)
from qlib_server.request_handler import RequestHandler
from qlib_server.data_processor import DataProcessor
LOG.info("QLibServer starting...")
threads = []
if "request_handler" in ARGS.module:
threads.append(RequestHandler())
if "data_processor" in ARGS.module:
threads.append(DataProcessor())
for t in threads:
t.start()
for t in threads:
t.join()
if __name__ == "__main__":
with open(ARGS.config) as f:
config = yaml.load(f, Loader=yaml.FullLoader)
# setting root error logger send to email
# setting root logger to queue
set_log_with_config(log_subprocess_config)
logger_config = config.get("logging_config", LoggingConfig["logging_config"])
stop_event = multiprocessing.Event()
log_process = multiprocessing.Process(target=listener_process, args=(stop_event, logger_config))
log_process.start()
init(config)
main()
stop_event.set()
log_process.join()
|
utils.py
|
# coding=utf-8
# Copyright 2019 The SEED Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions/classes."""
import collections
import pickle
import threading
import time
import timeit
from absl import flags
from absl import logging
import gym
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow.python.distribute import values as values_lib
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import tensor_conversion_registry
FLAGS = flags.FLAGS
# `observation` is the observation *after* a transition. When `done` is True,
# `observation` will be the observation *after* the reset.
EnvOutput = collections.namedtuple(
'EnvOutput', 'reward done observation abandoned episode_step')
Settings = collections.namedtuple(
'Settings', 'strategy inference_devices training_strategy encode decode')
MultiHostSettings = collections.namedtuple(
'MultiHostSettings', 'strategy hosts training_strategy encode decode')
def init_learner_multi_host(num_training_tpus: int):
"""Performs common learner initialization including multi-host setting.
In multi-host setting, this function will enter a loop for secondary learners
until the primary learner signals end of training.
Args:
num_training_tpus: Number of training TPUs.
Returns:
A MultiHostSettings object.
"""
tpu = ''
job_name = None
if tf.config.experimental.list_logical_devices('TPU'):
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
tpu=tpu, job_name=job_name)
topology = tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.experimental.TPUStrategy(resolver)
assert num_training_tpus % topology.num_tasks == 0
num_training_tpus_per_task = num_training_tpus // topology.num_tasks
hosts = []
training_coordinates = []
for per_host_coordinates in topology.device_coordinates:
host = topology.cpu_device_name_at_coordinates(
per_host_coordinates[0], job=job_name)
task_training_coordinates = (
per_host_coordinates[:num_training_tpus_per_task])
training_coordinates.extend([[c] for c in task_training_coordinates])
inference_coordinates = per_host_coordinates[num_training_tpus_per_task:]
hosts.append((host, [
topology.tpu_device_name_at_coordinates(c, job=job_name)
for c in inference_coordinates
]))
training_da = tf.tpu.experimental.DeviceAssignment(topology,
training_coordinates)
training_strategy = tf.distribute.experimental.TPUStrategy(
resolver, device_assignment=training_da)
return MultiHostSettings(strategy, hosts, training_strategy, tpu_encode,
tpu_decode)
else:
tf.device('/cpu').__enter__()
any_gpu = tf.config.experimental.list_logical_devices('GPU')
device_name = '/device:GPU:0' if any_gpu else '/device:CPU:0'
strategy = tf.distribute.OneDeviceStrategy(device=device_name)
enc = lambda x: x
dec = lambda x, s=None: x if s is None else tf.nest.pack_sequence_as(s, x)
return MultiHostSettings(
strategy, [('/cpu', [device_name])], strategy, enc, dec)
def init_learner(num_training_tpus):
"""Performs common learner initialization."""
settings = init_learner_multi_host(num_training_tpus)
if len(settings.hosts) != 1:
raise ValueError(f'Invalid number of hosts: {len(settings.hosts)}')
return Settings(settings.strategy, settings.hosts[0][1],
settings.training_strategy, settings.encode, settings.decode)
class UnrollStore(tf.Module):
"""Utility module for combining individual environment steps into unrolls."""
def __init__(self,
num_envs,
unroll_length,
timestep_specs,
num_overlapping_steps=0,
name='UnrollStore'):
super(UnrollStore, self).__init__(name=name)
with self.name_scope:
self._full_length = num_overlapping_steps + unroll_length + 1
def create_unroll_variable(spec):
z = tf.zeros(
[num_envs, self._full_length] + spec.shape.dims, dtype=spec.dtype)
return tf.Variable(z, trainable=False, name=spec.name)
self._unroll_length = unroll_length
self._num_overlapping_steps = num_overlapping_steps
self._state = tf.nest.map_structure(create_unroll_variable,
timestep_specs)
# For each environment, the index into the environment dimension of the
# tensors in self._state where we should add the next element.
self._index = tf.Variable(
tf.fill([num_envs], tf.constant(num_overlapping_steps, tf.int32)),
trainable=False,
name='index')
@property
def unroll_specs(self):
return tf.nest.map_structure(lambda v: tf.TensorSpec(v.shape[1:], v.dtype),
self._state)
@tf.function
@tf.Module.with_name_scope
def append(self, env_ids, values):
"""Appends values and returns completed unrolls.
Args:
env_ids: 1D tensor with the list of environment IDs for which we append
data.
There must not be duplicates.
values: Values to add for each environment. This is a structure
(in the tf.nest sense) of tensors following "timestep_specs", with a
batch front dimension which must be equal to the length of 'env_ids'.
Returns:
A pair of:
- 1D tensor of the environment IDs of the completed unrolls.
- Completed unrolls. This is a structure of tensors following
'timestep_specs', with added front dimensions: [num_completed_unrolls,
num_overlapping_steps + unroll_length + 1].
"""
tf.debugging.assert_equal(
tf.shape(env_ids),
tf.shape(tf.unique(env_ids)[0]),
message=f'Duplicate environment ids in store {self.name}')
tf.nest.map_structure(
lambda s: tf.debugging.assert_equal(
tf.shape(env_ids)[0],
tf.shape(s)[0],
message=(f'Batch dimension must equal the number of environments '
f'in store {self.name}.')),
values)
curr_indices = self._index.sparse_read(env_ids)
unroll_indices = tf.stack([env_ids, curr_indices], axis=-1)
for s, v in zip(tf.nest.flatten(self._state), tf.nest.flatten(values)):
s.scatter_nd_update(unroll_indices, v)
# Intentionally not protecting against out-of-bounds to make it possible to
# detect completed unrolls.
self._index.scatter_add(tf.IndexedSlices(1, env_ids))
return self._complete_unrolls(env_ids)
@tf.function
@tf.Module.with_name_scope
def reset(self, env_ids):
"""Resets state.
Note, this is only intended to be called when environments need to be reset
after preemptions. Not at episode boundaries.
Args:
env_ids: The environments that need to have their state reset.
"""
self._index.scatter_update(
tf.IndexedSlices(self._num_overlapping_steps, env_ids))
# The following code is the equivalent of:
# s[env_ids, :j] = 0
j = self._num_overlapping_steps
repeated_env_ids = tf.reshape(
tf.tile(tf.expand_dims(tf.cast(env_ids, tf.int64), -1), [1, j]), [-1])
repeated_range = tf.tile(tf.range(j, dtype=tf.int64),
[tf.shape(env_ids)[0]])
indices = tf.stack([repeated_env_ids, repeated_range], axis=-1)
for s in tf.nest.flatten(self._state):
z = tf.zeros(tf.concat([tf.shape(repeated_env_ids),
s.shape[2:]], axis=0), s.dtype)
s.scatter_nd_update(indices, z)
def _complete_unrolls(self, env_ids):
# Environment with unrolls that are now complete and should be returned.
env_indices = self._index.sparse_read(env_ids)
env_ids = tf.gather(
env_ids,
tf.where(tf.equal(env_indices, self._full_length))[:, 0])
env_ids = tf.cast(env_ids, tf.int64)
unrolls = tf.nest.map_structure(lambda s: s.sparse_read(env_ids),
self._state)
# Store last transitions as the first in the next unroll.
# The following code is the equivalent of:
# s[env_ids, :j] = s[env_ids, -j:]
j = self._num_overlapping_steps + 1
repeated_start_range = tf.tile(tf.range(j, dtype=tf.int64),
[tf.shape(env_ids)[0]])
repeated_end_range = tf.tile(
tf.range(self._full_length - j, self._full_length, dtype=tf.int64),
[tf.shape(env_ids)[0]])
repeated_env_ids = tf.reshape(
tf.tile(tf.expand_dims(env_ids, -1), [1, j]), [-1])
start_indices = tf.stack([repeated_env_ids, repeated_start_range], -1)
end_indices = tf.stack([repeated_env_ids, repeated_end_range], -1)
for s in tf.nest.flatten(self._state):
s.scatter_nd_update(start_indices, s.gather_nd(end_indices))
self._index.scatter_update(
tf.IndexedSlices(1 + self._num_overlapping_steps, env_ids))
return env_ids, unrolls
class PrioritizedReplay(tf.Module):
"""Prioritized Replay Buffer.
This buffer is not threadsafe. Make sure you call insert() and sample() from a
single thread.
"""
def __init__(self, size, specs, importance_sampling_exponent,
name='PrioritizedReplay'):
super(PrioritizedReplay, self).__init__(name=name)
self._priorities = tf.Variable(tf.zeros([size]), dtype=tf.float32)
self._buffer = tf.nest.map_structure(
lambda ts: tf.Variable(tf.zeros([size] + ts.shape, dtype=ts.dtype)),
specs)
self.num_inserted = tf.Variable(0, dtype=tf.int64)
self._importance_sampling_exponent = importance_sampling_exponent
@tf.function
@tf.Module.with_name_scope
def insert(self, values, priorities):
"""FIFO insertion/removal.
Args:
values: The batched values to insert. The tensors must be of the same
shape and dtype as the `specs` provided in the constructor, except
including a batch dimension.
priorities: <float32>[batch_size] tensor with the priorities of the
elements we insert.
Returns:
The indices of the inserted values.
"""
tf.nest.assert_same_structure(values, self._buffer)
values = tf.nest.map_structure(tf.convert_to_tensor, values)
append_size = tf.nest.flatten(values)[0].shape[0]
start_index = self.num_inserted
end_index = start_index + append_size
# Wrap around insertion.
size = self._priorities.shape[0]
insert_indices = tf.range(start_index, end_index) % size
tf.nest.map_structure(
lambda b, v: b.batch_scatter_update(
tf.IndexedSlices(v, insert_indices)),
self._buffer,
values)
self.num_inserted.assign_add(append_size)
self._priorities.batch_scatter_update(
tf.IndexedSlices(priorities, insert_indices))
return insert_indices
@tf.function
@tf.Module.with_name_scope
def sample(self, num_samples, priority_exp):
r"""Samples items from the replay buffer, using priorities.
Args:
num_samples: int, number of replay items to sample.
priority_exp: Priority exponent. Every item i in the replay buffer will be
sampled with probability:
priority[i] ** priority_exp /
sum(priority[j] ** priority_exp, j \in [0, num_items))
Set this to 0 in order to get uniform sampling.
Returns:
Tuple of:
- indices: An int64 tensor of shape [num_samples] with the indices in
the replay buffer of the sampled items.
- weights: A float32 tensor of shape [num_samples] with the normalized
weights of the sampled items.
- sampled_values: A nested structure following the spec passed in the
contructor, where each tensor has an added front batch dimension equal
to 'num_samples'.
"""
tf.debugging.assert_greater_equal(
self.num_inserted,
tf.constant(0, tf.int64),
message='Cannot sample if replay buffer is empty')
size = self._priorities.shape[0]
limit = tf.minimum(tf.cast(size, tf.int64), self.num_inserted)
if priority_exp == 0:
indices = tf.random.uniform([num_samples], maxval=limit, dtype=tf.int64)
weights = tf.ones_like(indices, dtype=tf.float32)
else:
prob = self._priorities[:limit]**priority_exp
prob /= tf.reduce_sum(prob)
indices = tf.random.categorical([tf.math.log(prob)], num_samples)[0]
# Importance weights.
weights = (((1. / tf.cast(limit, tf.float32)) /
tf.gather(prob, indices)) **
self._importance_sampling_exponent)
weights /= tf.reduce_max(weights) # Normalize.
sampled_values = tf.nest.map_structure(
lambda b: b.sparse_read(indices), self._buffer)
return indices, weights, sampled_values
@tf.function
@tf.Module.with_name_scope
def update_priorities(self, indices, priorities):
"""Updates the priorities of the items with the given indices.
Args:
indices: <int64>[batch_size] tensor with the indices of the items to
update. If duplicate indices are provided, the priority that will be set
among possible ones is not specified.
priorities: <float32>[batch_size] tensor with the new priorities.
"""
self._priorities.batch_scatter_update(tf.IndexedSlices(priorities, indices))
class HindsightExperienceReplay(PrioritizedReplay):
"""Replay Buffer with Hindsight Experience Replay.
Hindsight goals are sampled uniformly from subsequent steps in the
same window (`future` strategy from https://arxiv.org/pdf/1707.01495).
They are not guaranteed to come from the same episode.
This buffer is not threadsafe. Make sure you call insert() and sample() from a
single thread.
"""
def __init__(self, size, specs, importance_sampling_exponent,
compute_reward_fn,
unroll_length,
substitution_probability,
name='HindsightExperienceReplay'):
super(HindsightExperienceReplay, self).__init__(
size, specs, importance_sampling_exponent, name)
self._compute_reward_fn = compute_reward_fn
self._unroll_length = unroll_length
self._substitution_probability = substitution_probability
@tf.Module.with_name_scope
def sample(self, num_samples, priority_exp):
indices, weights, sampled_values = super(
HindsightExperienceReplay, self).sample(num_samples, priority_exp)
observation = sampled_values.env_outputs.observation
batch_size, time_horizon = observation['achieved_goal'].shape[:2]
def compute_goal_reward():
# reward[batch][time] is the reward on transition from timestep time-1
# to time. This function outputs incorrect rewards for the last transition
# in each episode but we filter such cases later.
goal_reward = self._compute_reward_fn(
achieved_goal=observation['achieved_goal'][:, 1:],
desired_goal=observation['desired_goal'][:, :-1])
return tf.concat(values=[goal_reward[:, :1] * np.nan, goal_reward],
axis=1)
# Substitute goals.
old_goal_reward = compute_goal_reward()
assert old_goal_reward.shape == observation['achieved_goal'].shape[:-1]
goal_ind = tf.concat(
values=[tf.random.uniform((batch_size, 1), min(t + 1, time_horizon - 1),
time_horizon, dtype=tf.int32)
for t in range(time_horizon)], axis=1)
substituted_goal = tf.gather(observation['achieved_goal'],
goal_ind, axis=1, batch_dims=1)
mask = tf.cast(tfp.distributions.Bernoulli(
probs=self._substitution_probability *
tf.ones(goal_ind.shape)).sample(), observation['desired_goal'].dtype)
# We don't substitute goals for the last states in each episodes because we
# don't store the next states for them.
mask *= tf.cast(~sampled_values.env_outputs.done,
observation['desired_goal'].dtype)
mask = mask[..., tf.newaxis]
observation['desired_goal'] = (
mask * substituted_goal + (1 - mask) * observation['desired_goal'])
# Substitude reward
new_goal_reward = compute_goal_reward()
assert new_goal_reward.shape == observation['achieved_goal'].shape[:-1]
sampled_values = sampled_values._replace(
env_outputs=sampled_values.env_outputs._replace(
reward=sampled_values.env_outputs.reward +
(new_goal_reward - old_goal_reward) * tf.cast(
~sampled_values.env_outputs.done, tf.float32)
))
# Subsample unrolls of length unroll_length + 1.
assert time_horizon >= self._unroll_length + 1
unroll_begin_ind = tf.random.uniform(
(batch_size,), 0, time_horizon - self._unroll_length, dtype=tf.int32)
unroll_inds = unroll_begin_ind[:, tf.newaxis] + tf.math.cumsum(
tf.ones((batch_size, self._unroll_length + 1), tf.int32),
axis=1, exclusive=True)
subsampled_values = tf.nest.map_structure(
lambda t: tf.gather(t, unroll_inds, axis=1, batch_dims=1),
sampled_values)
if hasattr(sampled_values, 'agent_state'): # do not subsample the state
subsampled_values = subsampled_values._replace(
agent_state=sampled_values.agent_state)
return indices, weights, subsampled_values
class Aggregator(tf.Module):
"""Utility module for keeping state for individual environments."""
def __init__(self, num_envs, specs, name='Aggregator'):
"""Inits an Aggregator.
Args:
num_envs: int, number of environments.
specs: Structure (as defined by tf.nest) of tf.TensorSpecs that will be
stored for each environment.
name: Name of the scope for the operations.
"""
super(Aggregator, self).__init__(name=name)
def create_variable(spec):
z = tf.zeros([num_envs] + spec.shape.dims, dtype=spec.dtype)
return tf.Variable(z, trainable=False, name=spec.name)
self._state = tf.nest.map_structure(create_variable, specs)
@tf.Module.with_name_scope
def reset(self, env_ids):
"""Fills the tensors for the given environments with zeros."""
with tf.name_scope('Aggregator_reset'):
for s in tf.nest.flatten(self._state):
s.scatter_update(tf.IndexedSlices(0, env_ids))
@tf.Module.with_name_scope
def add(self, env_ids, values):
"""In-place adds values to the state associated to the given environments.
Args:
env_ids: 1D tensor with the environment IDs we want to add values to.
values: A structure of tensors following the input spec, with an added
first dimension that must either have the same size as 'env_ids', or
should not exist (in which case, the value is broadcasted to all
environment ids).
"""
tf.nest.assert_same_structure(values, self._state)
for s, v in zip(tf.nest.flatten(self._state), tf.nest.flatten(values)):
s.scatter_add(tf.IndexedSlices(v, env_ids))
@tf.Module.with_name_scope
def read(self, env_ids):
"""Reads the values corresponding to a list of environments.
Args:
env_ids: 1D tensor with the list of environment IDs we want to read.
Returns:
A structure of tensors with the same shapes as the input specs. A
dimension is added in front of each tensor, with size equal to the number
of env_ids provided.
"""
return tf.nest.map_structure(lambda s: s.sparse_read(env_ids),
self._state)
@tf.Module.with_name_scope
def replace(self, env_ids, values, debug_op_name='', debug_tensors=None):
"""Replaces the state associated to the given environments.
Args:
env_ids: 1D tensor with the list of environment IDs.
values: A structure of tensors following the input spec, with an added
first dimension that must either have the same size as 'env_ids', or
should not exist (in which case, the value is broadcasted to all
environment ids).
debug_op_name: Debug name for the operation.
debug_tensors: List of tensors to print when the assert fails.
"""
tf.debugging.assert_rank(
env_ids, 1,
message=f'Invalid rank for aggregator {self.name}')
tf.debugging.Assert(
tf.reduce_all(tf.equal(
tf.shape(env_ids), tf.shape(tf.unique(env_ids)[0]))),
data=[env_ids,
(f'Duplicate environment ids in Aggregator: {self.name} with '
f'op name "{debug_op_name}"')] + (debug_tensors or []),
summarize=4096,
name=f'assert_no_dups_{self.name}')
tf.nest.assert_same_structure(values, self._state)
for s, v in zip(tf.nest.flatten(self._state), tf.nest.flatten(values)):
s.scatter_update(tf.IndexedSlices(v, env_ids))
class ProgressLogger(object):
"""Helper class for performing periodic logging of the training progress."""
def __init__(self,
summary_writer=None,
initial_period=0.1,
period_factor=1.01,
max_period=10.0,
starting_step=0):
"""Constructs ProgressLogger.
Args:
summary_writer: Tensorflow summary writer to use.
initial_period: Initial logging period in seconds
(how often logging happens).
period_factor: Factor by which logging period is
multiplied after each iteration (exponential back-off).
max_period: Maximal logging period in seconds
(the end of exponential back-off).
starting_step: Step from which to start the summary writer.
"""
# summary_writer, last_log_{time, step} are set in reset() function.
self.summary_writer = None
self.last_log_time = None
self.last_log_step = 0
self.period = initial_period
self.period_factor = period_factor
self.max_period = max_period
# Array of strings with names of values to be logged.
self.log_keys = []
self.log_keys_set = set()
self.step_cnt = tf.Variable(-1, dtype=tf.int64)
self.ready_values = tf.Variable([-1.0],
dtype=tf.float32,
shape=tf.TensorShape(None))
self.logger_thread = None
self.logging_callback = None
self.terminator = None
self.reset(summary_writer, starting_step)
def reset(self, summary_writer=None, starting_step=0):
"""Resets the progress logger.
Args:
summary_writer: Tensorflow summary writer to use.
starting_step: Step from which to start the summary writer.
"""
self.summary_writer = summary_writer
self.step_cnt.assign(starting_step)
self.ready_values.assign([-1.0])
self.last_log_time = timeit.default_timer()
self.last_log_step = starting_step
def start(self, logging_callback=None):
assert self.logger_thread is None
self.logging_callback = logging_callback
self.terminator = threading.Event()
self.logger_thread = threading.Thread(target=self._logging_loop)
self.logger_thread.start()
def shutdown(self):
assert self.logger_thread
self.terminator.set()
self.logger_thread.join()
self.logger_thread = None
def log_session(self):
return []
def log(self, session, name, value):
# this is a python op so it happens only when this tf.function is compiled
if name not in self.log_keys_set:
self.log_keys.append(name)
self.log_keys_set.add(name)
# this is a TF op.
session.append(value)
def log_session_from_dict(self, dic):
session = self.log_session()
for key in dic:
self.log(session, key, dic[key])
return session
def step_end(self, session, strategy=None, step_increment=1):
logs = []
for value in session:
if strategy:
value = tf.reduce_mean(tf.cast(
strategy.experimental_local_results(value)[0], tf.float32))
logs.append(value)
self.ready_values.assign(logs)
self.step_cnt.assign_add(step_increment)
def _log(self):
"""Perform single round of logging."""
logging_time = timeit.default_timer()
step_cnt = self.step_cnt.read_value()
if step_cnt == self.last_log_step:
return
values = self.ready_values.read_value().numpy()
if values[0] == -1:
return
assert len(values) == len(
self.log_keys
), 'Mismatch between number of keys and values to log: %r vs %r' % (
values, self.log_keys)
if self.summary_writer:
self.summary_writer.set_as_default()
tf.summary.experimental.set_step(step_cnt.numpy())
if self.logging_callback:
self.logging_callback()
for key, value in zip(self.log_keys, values):
tf.summary.scalar(key, value)
dt = logging_time - self.last_log_time
df = tf.cast(step_cnt - self.last_log_step, tf.float32)
tf.summary.scalar('speed/steps_per_sec', df / dt)
self.last_log_time, self.last_log_step = logging_time, step_cnt
def _logging_loop(self):
"""Loop being run in a separate thread."""
last_log_try = timeit.default_timer()
while not self.terminator.isSet():
try:
self._log()
except Exception:
logging.fatal('Logging failed.', exc_info=True)
now = timeit.default_timer()
elapsed = now - last_log_try
last_log_try = now
self.period = min(self.period_factor * self.period,
self.max_period)
self.terminator.wait(timeout=max(0, self.period - elapsed))
class StructuredFIFOQueue(tf.queue.FIFOQueue):
"""A tf.queue.FIFOQueue that supports nests and tf.TensorSpec."""
def __init__(self,
capacity,
specs,
shared_name=None,
name='structured_fifo_queue'):
self._specs = specs
self._flattened_specs = tf.nest.flatten(specs)
dtypes = [ts.dtype for ts in self._flattened_specs]
shapes = [ts.shape for ts in self._flattened_specs]
super(StructuredFIFOQueue, self).__init__(capacity, dtypes, shapes)
def dequeue(self, name=None):
result = super(StructuredFIFOQueue, self).dequeue(name=name)
return tf.nest.pack_sequence_as(self._specs, result)
def dequeue_many(self, batch_size, name=None):
result = super(StructuredFIFOQueue, self).dequeue_many(
batch_size, name=name)
return tf.nest.pack_sequence_as(self._specs, result)
def enqueue(self, vals, name=None):
tf.nest.assert_same_structure(vals, self._specs)
return super(StructuredFIFOQueue, self).enqueue(
tf.nest.flatten(vals), name=name)
def enqueue_many(self, vals, name=None):
tf.nest.assert_same_structure(vals, self._specs)
return super(StructuredFIFOQueue, self).enqueue_many(
tf.nest.flatten(vals), name=name)
def batch_apply(fn, inputs):
"""Folds time into the batch dimension, runs fn() and unfolds the result.
Args:
fn: Function that takes as input the n tensors of the tf.nest structure,
with shape [time*batch, <remaining shape>], and returns a tf.nest
structure of batched tensors.
inputs: tf.nest structure of n [time, batch, <remaining shape>] tensors.
Returns:
tf.nest structure of [time, batch, <fn output shape>]. Structure is
determined by the output of fn.
"""
time_to_batch_fn = lambda t: tf.reshape(t, [-1] + t.shape[2:].as_list())
batched = tf.nest.map_structure(time_to_batch_fn, inputs)
output = fn(*batched)
prefix = [int(tf.nest.flatten(inputs)[0].shape[0]), -1]
batch_to_time_fn = lambda t: tf.reshape(t, prefix + t.shape[1:].as_list())
return tf.nest.map_structure(batch_to_time_fn, output)
def make_time_major(x):
"""Transposes the batch and time dimensions of a nest of Tensors.
If an input tensor has rank < 2 it returns the original tensor. Retains as
much of the static shape information as possible.
Args:
x: A nest of Tensors.
Returns:
x transposed along the first two dimensions.
"""
def transpose(t):
t_static_shape = t.shape
if t_static_shape.rank is not None and t_static_shape.rank < 2:
return t
t_rank = tf.rank(t)
t_t = tf.transpose(t, tf.concat(([1, 0], tf.range(2, t_rank)), axis=0))
t_t.set_shape(
tf.TensorShape([t_static_shape[1],
t_static_shape[0]]).concatenate(t_static_shape[2:]))
return t_t
return tf.nest.map_structure(
lambda t: tf.xla.experimental.compile(transpose, [t])[0], x)
class TPUEncodedUInt8Spec(tf.TypeSpec):
"""Type specification for composite tensor TPUEncodedUInt8."""
def __init__(self, encoded_shape, original_shape):
self._value_specs = (tf.TensorSpec(encoded_shape, tf.uint32),)
self.original_shape = original_shape
@property
def _component_specs(self):
return self._value_specs
def _to_components(self, value):
return (value.encoded,)
def _from_components(self, components):
return TPUEncodedUInt8(components[0], self.original_shape)
def _serialize(self):
return self._value_specs[0].shape, self.original_shape
def _to_legacy_output_types(self):
return self._value_specs[0].dtype
def _to_legacy_output_shapes(self):
return self._value_specs[0].shape
@property
def value_type(self):
return TPUEncodedUInt8
class TPUEncodedUInt8(composite_tensor.CompositeTensor):
def __init__(self, encoded, shape):
self.encoded = encoded
self.original_shape = shape
self._spec = TPUEncodedUInt8Spec(encoded.shape, tf.TensorShape(shape))
@property
def _type_spec(self):
return self._spec
tensor_conversion_registry.register_tensor_conversion_function(
TPUEncodedUInt8, lambda value, *unused_args, **unused_kwargs: value.encoded)
class TPUEncodedF32Spec(tf.TypeSpec):
"""Type specification for composite tensor TPUEncodedF32Spec."""
def __init__(self, encoded_shape, original_shape):
self._value_specs = (tf.TensorSpec(encoded_shape, tf.float32),)
self.original_shape = original_shape
@property
def _component_specs(self):
return self._value_specs
def _to_components(self, value):
return (value.encoded,)
def _from_components(self, components):
return TPUEncodedF32(components[0], self.original_shape)
def _serialize(self):
return self._value_specs[0].shape, self.original_shape
def _to_legacy_output_types(self):
return self._value_specs[0].dtype
def _to_legacy_output_shapes(self):
return self._value_specs[0].shape
@property
def value_type(self):
return TPUEncodedF32
class TPUEncodedF32(composite_tensor.CompositeTensor):
def __init__(self, encoded, shape):
self.encoded = encoded
self.original_shape = shape
self._spec = TPUEncodedF32Spec(encoded.shape, tf.TensorShape(shape))
@property
def _type_spec(self):
return self._spec
tensor_conversion_registry.register_tensor_conversion_function(
TPUEncodedF32, lambda value, *unused_args, **unused_kwargs: value.encoded)
def num_divisible(v, m):
return sum([1 for x in v if x % m == 0])
def tpu_encode(ts):
"""Encodes a nest of Tensors in a suitable way for TPUs.
TPUs do not support tf.uint8, tf.uint16 and other data types. Furthermore,
the speed of transfer and device reshapes depend on the shape of the data.
This function tries to optimize the data encoding for a number of use cases.
Should be used on CPU before sending data to TPU and in conjunction with
`tpu_decode` after the data is transferred.
Args:
ts: A tf.nest of Tensors.
Returns:
A tf.nest of encoded Tensors.
"""
def visit(t):
num_elements = t.shape.num_elements()
# We need a multiple of 128 elements: encoding reduces the number of
# elements by a factor 4 (packing uint8s into uint32s), and first thing
# decode does is to reshape with a 32 minor-most dimension.
if (t.dtype == tf.uint8 and num_elements is not None and
num_elements % 128 == 0):
# For details of these transformations, see b/137182262.
x = tf.xla.experimental.compile(
lambda x: tf.transpose(x, list(range(1, t.shape.rank)) + [0]), [t])[0]
x = tf.reshape(x, [-1, 4])
x = tf.bitcast(x, tf.uint32)
x = tf.reshape(x, [-1])
return TPUEncodedUInt8(x, t.shape)
elif t.dtype == tf.uint8:
logging.warning('Inefficient uint8 transfer with shape: %s', t.shape)
return tf.cast(t, tf.bfloat16)
elif t.dtype == tf.uint16:
return tf.cast(t, tf.int32)
elif (t.dtype == tf.float32 and t.shape.rank > 1 and not
(num_divisible(t.shape.dims, 128) >= 1 and
num_divisible(t.shape.dims, 8) >= 2)):
x = tf.reshape(t, [-1])
return TPUEncodedF32(x, t.shape)
else:
return t
return tf.nest.map_structure(visit, ts)
def tpu_decode(ts, structure=None):
"""Decodes a nest of Tensors encoded with tpu_encode.
Args:
ts: A nest of Tensors or TPUEncodedUInt8 composite tensors.
structure: If not None, a nest of Tensors or TPUEncodedUInt8 composite
tensors (possibly within PerReplica's) that are only used to recreate the
structure of `ts` which then should be a list without composite tensors.
Returns:
A nest of decoded tensors packed as `structure` if available, otherwise
packed as `ts`.
"""
def visit(t, s):
s = s.values[0] if isinstance(s, values_lib.PerReplica) else s
if isinstance(s, TPUEncodedUInt8):
x = t.encoded if isinstance(t, TPUEncodedUInt8) else t
x = tf.reshape(x, [-1, 32, 1])
x = tf.broadcast_to(x, x.shape[:-1] + [4])
x = tf.reshape(x, [-1, 128])
x = tf.bitwise.bitwise_and(x, [0xFF, 0xFF00, 0xFF0000, 0xFF000000] * 32)
x = tf.bitwise.right_shift(x, [0, 8, 16, 24] * 32)
rank = s.original_shape.rank
perm = [rank - 1] + list(range(rank - 1))
inverted_shape = np.array(s.original_shape)[np.argsort(perm)]
x = tf.reshape(x, inverted_shape)
x = tf.transpose(x, perm)
return x
elif isinstance(s, TPUEncodedF32):
x = t.encoded if isinstance(t, TPUEncodedF32) else t
x = tf.reshape(x, s.original_shape)
return x
else:
return t
return tf.nest.map_structure(visit, ts, structure or ts)
def split_structure(structure, prefix_length, axis=0):
"""Splits in two a tf.nest structure of tensors along the first axis."""
flattened = tf.nest.flatten(structure)
split = [tf.split(x, [prefix_length, tf.shape(x)[axis] - prefix_length],
axis=axis)
for x in flattened]
flattened_prefix = [pair[0] for pair in split]
flattened_suffix = [pair[1] for pair in split]
return (tf.nest.pack_sequence_as(structure, flattened_prefix),
tf.nest.pack_sequence_as(structure, flattened_suffix))
class nullcontext(object):
def __init__(self, *args, **kwds):
del args # unused
del kwds # unused
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def tensor_spec_from_gym_space(space, name):
"""Get a TensorSpec from a gym spec."""
if space.shape is not None:
return tf.TensorSpec(space.shape, space.dtype, name)
if not isinstance(space, gym.spaces.Tuple):
raise ValueError(
'Space \'{}\' is not a tuple: unknown shape.'.format(space))
num_elements = 0
for s in space:
if len(s.shape) != 1:
raise ValueError(
'Only 1 dimension subspaces are handled for tuple spaces: {}'.format(
space))
num_elements += s.shape[0]
return tf.TensorSpec((num_elements,), tf.float32, name)
def validate_learner_config(config, num_hosts=1):
"""Shared part of learner config validation."""
assert config.num_envs > 0
assert config.env_batch_size > 0
if config.inference_batch_size == -1:
config.inference_batch_size = max(config.env_batch_size,
config.num_envs // (2 * num_hosts))
assert config.inference_batch_size > 0
assert config.inference_batch_size % config.env_batch_size == 0, (
'Learner-side batch size (=%d) must be exact multiple of the '
'actor-side batch size (=%d).' %
(config.inference_batch_size, config.env_batch_size))
assert config.num_envs >= config.inference_batch_size * num_hosts, (
'Inference batch size is bigger than the number of environments.')
def get_non_dying_envs(envs_needing_reset, reset_mask, env_ids):
"""Returns which transitions are valid or generated before an env. restarted.
Args:
envs_needing_reset: <int32>[num_envs_needing_reset] tensor with the IDs
of the environments that need a reset.
reset_mask: <bool>[inference_batch_size] tensor that contains True for
transitions that triggered an environment reset (i.e. transition whose
run_id does not match the previously store run_id for the corresponding
environment).
env_ids: <int32>[inference_batch_size] tensor of environment ID for each
transition in the inference batch.
Returns:
A pair:
- <bool>[inference_batch_size] tensor, True when the transition comes from
a non-dying actor. False for the transitions generated by an environment
before the transition that triggered a reset. This will typically be the
last generated transitions before an environment restarts.
- <int32>[num_nondying_envs] tensor, IDs of the envs that are not dying.
"""
# <bool>[inference_batch_size] with True for all transitions coming from
# environments that need a reset. Contrary to 'reset_mask' this covers *all*
# transitions from the environments that have one transition that triggered
# a reset, while 'reset_mask' only contains True for the transitions that
# triggered a reset.
envs_needing_reset_mask = tf.reduce_any(
tf.equal(env_ids, tf.expand_dims(envs_needing_reset, -1)),
axis=0)
dying_envs_mask = tf.logical_and(
envs_needing_reset_mask,
tf.logical_not(reset_mask))
num_dying_envs = tf.reduce_sum(tf.cast(dying_envs_mask, tf.int32))
if tf.not_equal(num_dying_envs, 0):
tf.print('Found', num_dying_envs, 'transitions from dying environments. '
'Dying environment IDs:',
tf.boolean_mask(env_ids, dying_envs_mask),
'Dying environments mask:', dying_envs_mask)
nondying_envs_mask = tf.logical_not(dying_envs_mask)
nondying_env_ids = tf.boolean_mask(env_ids, nondying_envs_mask)
unique_nondying_env_ids, _, unique_nondying_env_ids_count = (
tf.unique_with_counts(nondying_env_ids))
# If this fires, a single inference batch contains at least two transitions
# with' the same env_id, even after filtering transitions from dying actors.
# This can mean that an actor restarted twice while the same inference batch
# was being filled.
tf.debugging.Assert(
tf.equal(tf.shape(nondying_env_ids)[0],
tf.shape(unique_nondying_env_ids)[0]),
data=[
tf.gather(unique_nondying_env_ids,
tf.where(unique_nondying_env_ids_count >= 2)[: 0]),
nondying_env_ids],
summarize=4096)
return nondying_envs_mask, nondying_env_ids
def config_from_flags():
"""Generates training config from flags.
Returns:
Generated training config.
"""
config = {}
for key in FLAGS.__flags.keys():
config[key] = FLAGS[key].value
return config
def serialize_config(config):
"""Serializes training config, so that it can be send over SEED's GRPC.
Args:
config: config to serialize.
Returns:
Tensor representing serialized training config.
"""
if isinstance(config, flags._flagvalues.FlagValues):
skip_keys = {'run_mode'}
output = {}
for key in FLAGS.__flags.keys():
if FLAGS[key].value != FLAGS[key].default and key not in skip_keys:
output[key] = FLAGS[key].value
return tf.constant(pickle.dumps(output))
return tf.constant(pickle.dumps(config))
def update_config(current_config, client):
"""Updates current config with information from the Learner.
Args:
current_config: config to update.
client: Learner's client object used to retrieve updated config.
"""
try:
update = client.get_config()
except AttributeError:
# Getting configuration is not supported by the Learner.
return
update = pickle.loads(update.numpy())
if isinstance(update, dict):
for key, value in update:
current_config[key] = value
else:
current_config = update
|
mock_server.py
|
# -----------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# -----------------------------------------------------------------------------
"""A mock server for testing purposes."""
from threading import Thread
import socket
try:
from urllib import parse
except ImportError:
import urlparse as parse
from future.backports.http.server import (BaseHTTPRequestHandler, HTTPServer)
import requests
class MockServer(BaseHTTPRequestHandler):
""" Overrides the following methods in BaseHTTPRequestHandler """
# pylint: disable=no-member
def do_GET(self): # pylint: disable=C0103,missing-docstring
self.send_response(requests.codes.ok)
self.end_headers()
def do_PUSH(self): # pylint: disable=C0103,missing-docstring
self.send_response(requests.codes.ok)
self.end_headers()
def do_POST(self): # pylint: disable=C0103,missing-docstring
# Certain requests expect a very specific response.
# For those, return other status codes
if (self.path.startswith('/$/RollbackUpgrade?') or
self.path.startswith('/ComposeDeployments/deploymentName/$/Delete') or
self.path.startswith('/$/StartClusterConfigurationUpgrade')):
self.send_response(requests.codes.accepted)
self.end_headers()
return
if self.path.startswith('/Applications/$/Create'):
self.send_response(requests.codes.created)
self.end_headers()
return
# Return ok as the default response
self.send_response(requests.codes.ok)
self.end_headers()
def do_PUT(self): # pylint: disable=C0103,missing-docstring
# Return 200 as the default response
if self.path.startswith('/ImageStore/') and self.path.find('sample_nested_folders') != -1:
parsed_url = parse.urlparse(self.path)
query = parse.parse_qs(parsed_url.query) # This is a dict of lists
counter = 0
import time
while int(query['timeout'][0]) > 0 and counter < 3:
time.sleep(1)
counter += 1
self.send_response(requests.codes.ok)
self.end_headers()
def do_DELETE(self): # pylint: disable=C0103,missing-docstring
# Return 200 as the default response
self.send_response(requests.codes.ok)
self.end_headers()
def find_localhost_free_port():
""" Return a free port. """
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# binds to 0, which auto reassigns to a free port
sock.bind(('localhost', 0))
# using [1] to access returned pair of address, port
return sock.getsockname()[1]
def start_mock_server(port):
""" Start a new mock server at localhost:port. """
mock_server = HTTPServer(('localhost', port), MockServer)
mock_server_thread = Thread(target=mock_server.serve_forever)
mock_server_thread.setDaemon(True) # Set automatic cleanup of this thread
mock_server_thread.start()
|
broadcast.py
|
#!/usr/bin/env python
#
# Peter F. Klemperer
#
# No-Frills Basic Broadcast Mechanism
#
# If re-writing this code, consider
# seperating the networking functions
# into a subclass apart from the
# broadcast specific functions
#
# TODO
# * Change the node_set into a Set()
#
#
from __future__ import print_function
import socket
import pickle
import threading
class BroadcastNode():
def __init__(self):
self.isActive = False
self.node_set = set()
self.port = 49152
self.server_address = (socket.gethostname(), self.port)
self.callback = self.test_callback
return
def test_callback(self, message):
print("broadcast_test message: " + str(message))
def set_callback(self, cb):
self.callback = cb
def send(self, address, pickled_message):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(address)
try:
sock.sendall(pickled_message)
finally:
sock.close()
def send_with_return(self, address, pickled_message):
""" send to just one node """
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1.0)
sock.connect((address,self.port))
try:
sock.sendall(pickled_message)
ret_msg = sock.recv(4096)
ret_msg = pickle.loads(ret_msg)
finally:
sock.close()
return ret_msg
def broadcast(self, message):
broadcast_message = ("BROADCAST", message)
msg = pickle.dumps(broadcast_message)
""" broadcast message to channel """
for node in self.node_set.copy():
self.send(node, msg)
def start_server(self):
""" start the receiving server with handle """
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_socket.bind(("0.0.0.0", self.port))
self.server_socket.listen(5)
self.server_socket.settimeout(0.1)
# spawn thread for server
self.server_thread = threading.Thread(target=self._server)
self.server_thread.start()
# add server to list of nodes
self.node_set.add(self.server_address)
return
def _server(self):
self.isActive = True
while self.isActive:
try:
conn, addr = self.server_socket.accept()
mesg = conn.recv(4096)
self._process_message(conn, addr, mesg)
conn.close()
except socket.timeout:
pass
def stop_server(self):
""" stop the receiver server """
# TODO broadcast shutdown message
# shutdown_message = "SHUTDOWN"
# self.broadcast( shutdown_message )
self.isActive = False
self.server_thread.join()
self.server_socket.close()
def join(self, address):
""" register node with the broadcast channel
* call any node in channel for their subscriber list
* update my node_set
* broadcast "JOIN"
"""
# ASK FOR A NODE_SET
viewers_mesg = ("SUBSCRIBERS","" )
pickled_message = pickle.dumps(viewers_mesg)
subscribers_reply = self.send_with_return(address, pickled_message)
# UPDATE MY NODE_SET
self.node_set |= subscribers_reply
# BROADCAST JOIN
broadcast_message = ("JOIN", self.node_set)
broadcast_message_picked = pickle.dumps(broadcast_message)
for node in self.node_set.copy():
self.send(node, broadcast_message_picked)
return
def register_handler(self, handler=None):
""" how to check if cb is valid type """
if handler is not None and self.isActive:
self.callback = handler
def leave(self):
""" unregister node with the broadcast channel """
leave_message = ("LEAVE", self.server_address)
leave_message_picked = pickle.dumps(leave_message)
for node in self.node_set.copy():
self.send(node, leave_message_picked)
return
def _process_message(self, conn, addr, mesg):
# use conn and addr, but caller will close conn
# unpickle the message
msg_type, msg_data = pickle.loads(mesg)
# cases to handle
# join
if msg_type == "TEST":
print("TEST " + str(msg_data))
elif msg_type == "SUBSCRIBERS":
#print("SUBSCRIBERS" + str(msg_data))
# send list back to caller
conn.sendall(pickle.dumps(self.node_set))
elif msg_type == "JOIN":
peer_node_set = msg_data
#print("JOIN" + str(peer_node_set))
self.node_set |= peer_node_set
# leave
elif msg_type == "LEAVE":
peer_name = msg_data
#print("LEAVE" + str(peer_name))
self.node_set.discard(peer_name)
# pass message to callback
elif msg_type == "BROADCAST":
#print("BROADCAST" + str(msg_data))
self.callback(msg_data)
return
def __str__(self):
resp = "BroadcastNode: "
if not self.isActive:
resp += "inactive"
return resp
resp += "active, node_set: "
resp += str(self.node_set)
return resp
def close(self):
self.leave()
self.stop_server()
def test_client(ip, port, message):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ip, port))
try:
sock.sendall(message)
#response = sock.recv(1024)
#print("Received: {}".format(response))
finally:
sock.close()
def test_node():
node = BroadcastNode()
try:
my_name = socket.gethostname()
print("gethostname()" + " = " + str(my_name))
node.start_server()
node.join("jiffy.local")
print(node.node_set)
node.broadcast("hello from " + my_name)
while True:
pass
except KeyboardInterrupt:
print("KeyboardInterrupt has been caught.")
node.leave()
node.stop_server()
if __name__ == '__main__':
test_node()
|
dining-philosophers.py
|
import time
from multiprocessing import Process, Semaphore
n = 5 # number of philosophers
m = 10 # number of times each philosopher eats
# make semaphores
sticks = [Semaphore(1) for _ in range(n)]
procs = []
# making each philosopher hungry m times
for i in range(n):
def f(i):
stick1, stick2 = (sticks[i],sticks[0]) if i == n-1 else (sticks[i+1], sticks[i])
for _ in range(m):
stick1.acquire()
print("pickup stick1 | done by:", i)
stick2.acquire()
print("pickup stick2 | done by:", i)
print("Philosopher", i, "is eating")
stick1.release()
time.sleep(0.005)
stick2.release()
# create and start a new thread
procs.append(Process(target = f, args = [i], daemon=True))
procs[-1].start()
for proc in procs: proc.join()
|
evaluator.py
|
# -*- coding: utf-8 -*-
# Copyright 2013-2021 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Martin Barisits <martin.barisits@cern.ch>, 2013-2021
# - Mario Lassnig <mario.lassnig@cern.ch>, 2013
# - Cedric Serfon <cedric.serfon@cern.ch>, 2013
# - Vincent Garonne <vincent.garonne@cern.ch>, 2016-2018
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019
# - Andrew Lister <andrew.lister@stfc.ac.uk>, 2019
# - Brandon White <bjwhite@fnal.gov>, 2019
# - Thomas Beermann <thomas.beermann@cern.ch>, 2020-2021
# - Benedikt Ziemons <benedikt.ziemons@cern.ch>, 2020
"""
Judge-Evaluator is a daemon to re-evaluate and execute replication rules.
"""
import logging
import os
import socket
import threading
import time
import traceback
from datetime import datetime, timedelta
from random import randint
from re import match
from six import iteritems
from sqlalchemy.exc import DatabaseError
from sqlalchemy.orm.exc import FlushError
import rucio.db.sqla.util
from rucio.common.exception import DatabaseException, DataIdentifierNotFound, ReplicationRuleCreationTemporaryFailed
from rucio.common.logging import setup_logging
from rucio.common.types import InternalScope
from rucio.core.heartbeat import live, die, sanity_check
from rucio.core.monitor import record_counter
from rucio.core.rule import re_evaluate_did, get_updated_dids, delete_updated_did
graceful_stop = threading.Event()
def re_evaluator(once=False):
"""
Main loop to check the re-evaluation of dids.
"""
hostname = socket.gethostname()
pid = os.getpid()
current_thread = threading.current_thread()
paused_dids = {} # {(scope, name): datetime}
# Make an initial heartbeat so that all judge-evaluators have the correct worker number on the next try
executable = 'judge-evaluator'
live(executable=executable, hostname=hostname, pid=pid, thread=current_thread, older_than=60 * 30)
graceful_stop.wait(1)
while not graceful_stop.is_set():
try:
# heartbeat
heartbeat = live(executable=executable, hostname=hostname, pid=pid, thread=current_thread, older_than=60 * 30)
start = time.time() # NOQA
# Refresh paused dids
paused_dids = dict((k, v) for k, v in iteritems(paused_dids) if datetime.utcnow() < v)
# Select a bunch of dids for re evaluation for this worker
dids = get_updated_dids(total_workers=heartbeat['nr_threads'],
worker_number=heartbeat['assign_thread'],
limit=100,
blocked_dids=[(InternalScope(key[0], fromExternal=False), key[1]) for key in paused_dids])
logging.debug('re_evaluator[%s/%s] index query time %f fetch size is %d (%d blocked)' % (heartbeat['assign_thread'],
heartbeat['nr_threads'],
time.time() - start,
len(dids),
len([(InternalScope(key[0], fromExternal=False), key[1]) for key in paused_dids])))
# If the list is empty, sent the worker to sleep
if not dids and not once:
logging.debug('re_evaluator[%s/%s] did not get any work (paused_dids=%s)' % (heartbeat['assign_thread'], heartbeat['nr_threads'], str(len(paused_dids))))
graceful_stop.wait(30)
else:
done_dids = {}
for did in dids:
if graceful_stop.is_set():
break
# Check if this did has already been operated on
did_tag = '%s:%s' % (did.scope.internal, did.name)
if did_tag in done_dids:
if did.rule_evaluation_action in done_dids[did_tag]:
logging.debug('re_evaluator[%s/%s]: evaluation of %s:%s already done' % (heartbeat['assign_thread'], heartbeat['nr_threads'], did.scope, did.name))
delete_updated_did(id_=did.id)
continue
else:
done_dids[did_tag] = []
# Jump paused dids
if (did.scope.internal, did.name) in paused_dids:
continue
try:
start_time = time.time()
re_evaluate_did(scope=did.scope, name=did.name, rule_evaluation_action=did.rule_evaluation_action)
logging.debug('re_evaluator[%s/%s]: evaluation of %s:%s took %f' % (heartbeat['assign_thread'], heartbeat['nr_threads'], did.scope, did.name, time.time() - start_time))
delete_updated_did(id_=did.id)
done_dids[did_tag].append(did.rule_evaluation_action)
except DataIdentifierNotFound:
delete_updated_did(id_=did.id)
except (DatabaseException, DatabaseError) as e:
if match('.*ORA-00054.*', str(e.args[0])):
paused_dids[(did.scope.internal, did.name)] = datetime.utcnow() + timedelta(seconds=randint(60, 600))
logging.warning('re_evaluator[%s/%s]: Locks detected for %s:%s' % (heartbeat['assign_thread'], heartbeat['nr_threads'], did.scope, did.name))
record_counter('rule.judge.exceptions.LocksDetected')
elif match('.*QueuePool.*', str(e.args[0])):
logging.warning(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
elif match('.*ORA-03135.*', str(e.args[0])):
logging.warning(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
else:
logging.error(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
except ReplicationRuleCreationTemporaryFailed as e:
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
logging.warning('re_evaluator[%s/%s]: Replica Creation temporary failed, retrying later for %s:%s' % (heartbeat['assign_thread'], heartbeat['nr_threads'], did.scope, did.name))
except FlushError as e:
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
logging.warning('re_evaluator[%s/%s]: Flush error for %s:%s' % (heartbeat['assign_thread'], heartbeat['nr_threads'], did.scope, did.name))
except (DatabaseException, DatabaseError) as e:
if match('.*QueuePool.*', str(e.args[0])):
logging.warning(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
elif match('.*ORA-03135.*', str(e.args[0])):
logging.warning(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
else:
logging.critical(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
except Exception as e:
logging.critical(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
if once:
break
die(executable=executable, hostname=hostname, pid=pid, thread=current_thread)
def stop(signum=None, frame=None):
"""
Graceful exit.
"""
graceful_stop.set()
def run(once=False, threads=1):
"""
Starts up the Judge-Eval threads.
"""
setup_logging()
if rucio.db.sqla.util.is_old_db():
raise DatabaseException('Database was not updated, daemon won\'t start')
executable = 'judge-evaluator'
hostname = socket.gethostname()
sanity_check(executable=executable, hostname=hostname)
if once:
re_evaluator(once)
else:
logging.info('Evaluator starting %s threads' % str(threads))
threads = [threading.Thread(target=re_evaluator, kwargs={'once': once}) for i in range(0, threads)]
[t.start() for t in threads]
# Interruptible joins require a timeout.
while threads[0].is_alive():
[t.join(timeout=3.14) for t in threads]
|
main.py
|
# Purpose: This is the main script of CB2 Bot, a work-in-progress Twitch chat
# bot that performs basic notification and chat interaction tasks
# including greeting chat users, thanking followers, cheerers, and
# subscribers, and storing and executing custom text commands.
# Author: Kyle Lander
# Date: 2021-11
# TODO: Add sound alert functionality to notify the streamer of when someone
# in the chat says hi to them.
# TODO: Add functionality for other EventSub topics (currently only follow,
# subscribe, and cheer are supported)
import json
import os
import requests
import schedule
import socket
import sqlite3
import threading
import logging
import urllib.parse as urlparse
from dotenv import load_dotenv
from helpers import (authorize, get_app_access_token, get_user_data,
nuke_eventsubs, subscribe_to_eventsub,
verify_signature)
from http.server import BaseHTTPRequestHandler, HTTPServer
from os.path import join, dirname
from time import time
from urllib.parse import parse_qs
# Load environment variables.
dotenv_path = join(dirname(__file__), 'config.env')
load_dotenv(dotenv_path)
# Get the location of the script for creating paths.
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
# Define some constants that are needed to connect to the servers.
BOT_USERNAME = os.environ['BOT_USERNAME']
CALLBACK = os.environ['CALLBACK']
CHANNEL = f'#{os.environ["CHANNEL"]}'
CLIENT_ID = os.environ['CLIENT_ID']
AUTH = {'Accept': 'application/vnd.twitchtv.v5+json',
'Client-ID': CLIENT_ID}
COOLDOWN = os.environ['COOLDOWN']
DB = os.path.join(__location__, os.environ['DB'])
ENDPOINT = 'https://api.twitch.tv/helix/eventsub/subscriptions'
HTTP_PORT = int(os.environ['HTTP_PORT'])
IRC_CONNECTION_DATA = ('irc.chat.twitch.tv', 6667)
OAUTH = f'oauth:{os.environ["OAUTH"]}'
SECRET = os.environ['SECRET']
APP_ACCESS_TOKEN = get_app_access_token(CLIENT_ID, SECRET)
# This list contains all users that will be able to execute certain chat
# commands that should only be performed by moderators. Names will be
# checked against this list before executing such commands.
MODS = os.environ['MODS']
# Define a list of phrases to respond to as a greeting.
GREETINGS = ['hi', 'hello', 'heyo', 'yo', 'hey', 'salut', 'suh']
# Define a list of users that have said one of the 'hello' variations already.
seen_users = []
# Defina a dictionary that will store instances of the CooldownHandler class.
# Every command will get its own instance of the class.
cooldown_handlers = {}
# Create a socket object and make a connection to the chat ircserver.
ircserver = socket.socket()
ircserver.connect(IRC_CONNECTION_DATA)
# Tell the ircserver who we are.
ircserver.send(bytes('PASS {}\r\n'.format(OAUTH), 'UTF-8'))
ircserver.send(bytes('NICK {}\r\n'.format(BOT_USERNAME), 'UTF-8'))
# This list will hold all previously seen Twitch-Eventsub-Message-Id values.
seen_message_ids = []
# Define a class that will keep track of when a command was last used and
# determine if it can be used again by non-mod users.
class CooldownHandler:
'''
A class to keep track of cooldowns for IRC chat commands.
...
Attributes
----------
command : str
name of the command
cooldown : int
length of cooldown in seconds
last_used : float
time the command was last used
Methods
-------
is_useable():
Checks if more time than the cooldown length has passed since the
command was last used. Returns a boolean: True if the cooldown has
passed, False if the command is still on cooldown.
'''
def __init__(self, command: str, cooldown: int) -> None:
'''
Constructs the attriutes for the CooldownHandler object.
Parameters
----------
command : str
name of the command
cooldown : int
length of cooldown in seconds
'''
self.command = command
self.cooldown = int(cooldown)
self.last_used = time()
def is_useable(self) -> bool:
if time() > self.cooldown + self.last_used:
self.last_used = time()
return True
return False
# Set up the request handler that will listen for requests from Twitch.
# Modified from: https://gist.github.com/mdonkers/63e115cc0c79b4f6b8b3a6b797e485c7
class RequestHandler(BaseHTTPRequestHandler):
'''
A class to handle HTTP requests from Twitch, subclassed from
BaseHTTPRequestHandler.
...
Methods
----------
do_GET():
Handles all GET requests from Twitch. This is currently only used for
handling the OIDC Authorization Code Flow process of authorizing the
bot for EventSub topics like 'subscribe' that require elevated
permission from the streamer.
do_POST():
Handles all POST requests from Twitch. This is currently used for
responding to webhook verifications and receiving EventSub
notifications.
'''
def _set_response(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_GET(self):
parsed = urlparse.urlparse(self.path).query
print(f'PARSED: {parsed}\n')
# Handle GET requests from Twitch
try:
code = parse_qs(parsed)['code'][0]
state = parse_qs(parsed)['state'][0]
print(f'STATE: {state}\n')
print(f'LOCAL STATE: {os.environ["STATE"]}\n')
print(f'CODE: {code}\n')
if state == os.environ['STATE']:
request_dict = {
'client_id': CLIENT_ID,
'client_secret': SECRET,
'code': code,
'grant_type': 'authorization_code',
'redirect_uri': CALLBACK
}
response = requests.post('https://id.twitch.tv/oauth2/token', request_dict)
print(f'RESPONSE: {response}\n')
self._set_response()
# Return 403 if the states don't match.
else:
self.send_response(403)
self.end_headers()
except:
pass
logging.info('GET request,\nPath: %s\nHeaders:\n%s\n', str(self.path), str(self.headers))
self._set_response()
self.wfile.write('GET request for {}'.format(self.path).encode('utf-8'))
def do_POST(self):
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
logging.info('POST request,\nPath: %s\nHeaders:\n%s\n\nBody:\n%s\n',
str(self.path), str(self.headers), post_data.decode('utf-8'))
# This section will handle POST requests that come from Twitch.
if self.headers['Twitch-Eventsub-Message-Id']:
message_id = self.headers['Twitch-Eventsub-Message-Id']
eventsub_timestamp = self.headers['Twitch-Eventsub-Message-Timestamp']
eventsub_signature = self.headers['Twitch-Eventsub-Message-Signature']
# Return a 200 status if the message ID has been seen before.
if message_id in seen_message_ids:
self._set_response()
print(f'Previously seen message ID: {message_id}, returning 200.\n')
# Verify that the request came from Twitch.
elif verify_signature(SECRET, message_id, eventsub_timestamp, post_data, eventsub_signature) == True:
seen_message_ids.append(message_id)
payload = json.loads(post_data)
# If the message is a webhook verification, return the challenge.
if self.headers['Twitch-Eventsub-Message-Type'] == 'webhook_callback_verification':
eventsub_challenge = payload['challenge']
challenge_bytes = eventsub_challenge.encode()
self.send_response(200)
self.send_header('Content-Length', str(len(challenge_bytes)))
self.end_headers()
self.wfile.write(challenge_bytes)
# If the message is a notification, take the appropriate action.
elif self.headers['Twitch-Eventsub-Message-Type'] == 'notification':
subscription_type = self.headers['Twitch-Eventsub-Subscription-Type']
user_name = payload['event']['user_name']
# If someone followed, thank them in chat.
if subscription_type == 'channel.follow':
sendmsg(f'Thank you for following {user_name}!')
self._set_response()
# If someone subscribed, thank them in chat.
elif subscription_type == 'channel.subscribe':
sub_tier = int(int(payload['event']['tier']) / 1000)
sendmsg(f'{user_name} subscribed at tier {sub_tier}! Thank you for the support!')
self._set_response()
# If someone cheered, thank them in chat.
elif subscription_type == 'channel.cheer':
bits = payload['event']['bits']
if payload['event']['is_anonymous'] == False:
sendmsg(f'{user_name} cheered {bits} bits! Thank you for the support!')
else:
sendmsg(f'Anonymous cheered {bits} bits! Thank you for the support!')
self._set_response()
# More actions for other notification types could be added here
# Return 403 if the signature verification failed.
else:
self.send_response(403)
self.end_headers()
else:
self._set_response()
self.wfile.write('POST request for {}'.format(self.path).encode('utf-8'))
# This function will define and run an HTTP server with the handler above.
def run(server_class=HTTPServer, handler_class=RequestHandler, port=HTTP_PORT):
logging.basicConfig(level=logging.INFO)
server_address = ('', port)
httpd = server_class(server_address, handler_class)
logging.info('Starting httpd...\n')
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
logging.info('Stopping httpd...\n')
# Assign a thread to the run() function above, this lets the request handler
# run forever in a backgrount thread while the rest of the program continues on.
thread = threading.Thread(target=run)
thread.daemon = True
# Define a function that adds a command to the database if it doesn't already exist.
def add_command(message: str, cursor: sqlite3.Cursor):
# Split the message into a list.
splitmsg = message.split(' ')
# Check if this is to be a mod-only command (the 'mod' flag will be provided after the
# command name, which will be at index 2).
if splitmsg[1].lower() == 'mod':
# Set the mod boolean to True if the mod flag is present.
mod = 1
# Get the name of the new command, its index depends on whether the 'mod' flag
# is present.
command = splitmsg[2].lower()
# Assemble the command contents into a string, starting index depends on whether
# the 'mod' flag is present.
content = ' '.join(splitmsg[3:])
else:
mod = 0
command = splitmsg[1].lower()
content = ' '.join(splitmsg[2:])
# Check if the command already exists.
cursor.execute('SELECT command FROM commands WHERE command = :command',
{'command': command})
# Insert the new command if it doesn't already exist.
if cursor.fetchone() == None:
cursor.execute('INSERT INTO commands (command, content, mod) VALUES (?, ?, ?)',
(command, content, mod))
return True
return False
# Schedule a job to clear out the seen_users list every day at midnight.
def clear_seen_users():
seen_users.clear()
sendmsg('/me Seen users list cleared!')
# Define a function that handles commands stored in the database.
def command_handler(command: str, user: str, cursor: sqlite3.Cursor) -> str:
# Try/except in case of sqlite3 error on query executon.
try:
cursor.execute('SELECT command, content, mod FROM commands WHERE command = :command',
{'command': command})
row = cursor.fetchone()
# Check if nothing was returned, meaning no command was found.
if row == None:
return None
# Return any command if the user is a mod.
if user in MODS:
return row[1]
# Non-mod commands are usable by anyone, but are subject to cooldowns.
if row[2] == 0:
# Check if a handler for the command already exists, and then
# check to see if the command is on cooldown.
if command in cooldown_handlers:
cmd = cooldown_handlers[command]
if cmd.is_useable():
return row[1]
print(f'Command {command} on cooldown.\n')
return None
# Create a CooldownHander for the command,
# then return the command since this will be its first use.
cooldown_handlers[f'{command}'] = CooldownHandler(command, COOLDOWN)
return row[1]
print(f'command_handler: user {user} does not have permission to use !{command}.\n')
# Return None because the command does exist, the user just did not
# have permission to use it.
return None
except sqlite3.Error:
print(f'SQLite3 Error raised, returning None.\n')
return None
# Define a function that takes in text that is decorated with a leading "!", indicating that is
# a command, and execute the appropriate command if it exists.
def command(message: str, name: str, cursor: sqlite3.Cursor, dbconnection: sqlite3.Connection):
# Remove the leading !, save this in case it's a new command that needs added to the DB.
message = message[1:]
# Split the message on spaces and get just the first part (the command name).
cmd = message.split(' ')[0]
print(f'Command {cmd} received, issued by user {name}\n')
# This handles execution of all commands that are stored in the database.
# Used the walrus operator for simplicity.
if dbcmd := command_handler(cmd, name, cursor):
# Command did not exist or user did not have permission
# to execute the command.
if dbcmd == None:
pass
# Execute the command if one was returned.
else:
sendmsg(dbcmd)
# This block handles all the hardcoded commands.
# These commands are mod-only and are necessary for the
# core functionality of the bot. Commands have been arranged
# according to estimated frequency of use.
# Shoutout command for referring viewers to other streamers.
elif cmd == 'so' and name in MODS:
shoutout = message.split(' ')[1]
sendmsg(f'Check out {shoutout} at https://twitch.tv/{shoutout} !')
# Adds a new command to the database.
elif cmd == 'addcom' and name in MODS:
if add_command(message, cursor):
dbconnection.commit()
else:
print(f'Failed to add command {cmd}, it may already exist.\n')
# Deletes a command stored in the database.
elif cmd == 'delcom' and name in MODS:
delete_command(message, cursor)
dbconnection.commit()
# Subscribes the bot to the channel's 'follow' EventSub topic.
elif cmd == 'esfollow' and name in MODS:
print('Subscribing to EventSub Follow.\n')
# Accessing the env variable here because the CHANNEL variable has a leading '#'.
subscribe_to_eventsub(APP_ACCESS_TOKEN, CALLBACK, CLIENT_ID, SECRET,
get_user_id(os.environ["CHANNEL"]), 'follow')
# Subscribes the bot to the channel's 'subscribe' and 'cheer' EventSub topics.
elif cmd == 'essub' and name in MODS:
print('Subscribing to EventSub Subscribe.\n')
subscribe_to_eventsub(APP_ACCESS_TOKEN, CALLBACK, CLIENT_ID, SECRET,
get_user_id(os.environ["CHANNEL"]), 'subscribe')
print('Subscribing to EventSub Cheer.\n')
subscribe_to_eventsub(APP_ACCESS_TOKEN, CALLBACK, CLIENT_ID, SECRET,
get_user_id(os.environ["CHANNEL"]), 'cheer')
# Unsubscribes the bot from all EventSub topics regardless of channel.
elif cmd == 'nukeeventsubs' and name in MODS:
print('Deleting all EventSub subscriptions.\n')
nuke_eventsubs(APP_ACCESS_TOKEN, CLIENT_ID)
# Disconnects the bot from Twitch chat, closes the database connection,
# and then performs the rest of the shut down tasks.
elif cmd == 'disconnect' and name in MODS:
dbconnection.close()
shut_down()
# Initiates the OIDC Authorization Code Flow process.
elif cmd == 'auth' and name in MODS:
os.environ['STATE'] = authorize(CALLBACK, CLIENT_ID)
else:
print(f'Command {cmd} is not a registered command, or {name} does '
'not have permission to use it, ignoring.\n')
# Define a function that deletes a command if it exists.
def delete_command(message: str, cursor: sqlite3.Cursor):
# Split the message into a list.
splitmsg = message.split(' ')
# Get just the command name.
command = splitmsg[1]
cursor.execute('DELETE FROM commands WHERE command = :command',
{'command': command})
print(f'Command {command} deleted.\n')
# Define a function to get a user ID specifically.
def get_user_id(user: str, auth: dict=AUTH) -> str:
data = get_user_data(user, auth)
user_id = ''
for i in data['users']:
user_id = str(i['_id'])
return user_id
# Define a function to join a chat channel.
def joinchan(chan: str=CHANNEL):
ircserver.send(bytes('JOIN {}\r\n'.format(chan), 'UTF-8'))
sendmsg('/me has joined the chat.')
# Define a function to post messages in chat.
def sendmsg(msg: str, target: str=CHANNEL):
ircserver.send(bytes('PRIVMSG {} :{}\r\n'.format(target, msg), 'UTF-8'))
# Define a function that shuts down the bot when called.
def shut_down():
print('Cancelling EventSubs and shutting down...\n')
nuke_eventsubs(APP_ACCESS_TOKEN, CLIENT_ID)
sendmsg('/me goes to sleep ResidentSleeper')
thread.join()
exit(0)
# Define the main function.
def main():
# Start the HTTP request handler.
thread.start()
# Connect to the bot's database and create a cursor.
dbconnection = sqlite3.connect(DB)
dbcursor = dbconnection.cursor()
# Join the IRC channel (chat).
joinchan()
# Schedule the seen users list-clearing task.
schedule.every().day.at('00:00').do(clear_seen_users)
while True:
schedule.run_pending()
ircmsg = ircserver.recv(2048).decode('UTF-8')
ircmsg = ircmsg.strip('nr')
cmd = ''
name = ''
# Check the type of message received.
if ircmsg.find('PRIVMSG') != -1:
# strip() removes \n characters
name = ircmsg.split('!', 1)[0][1:].strip()
message = ircmsg.split('PRIVMSG', 1)[1].split(':', 1)[1].strip()
print(f'Message: {message}\n')
# If message starts with a !, indicating a bot command.
if message[0] == '!':
command(message, name, dbcursor, dbconnection)
# See if the user is saying hi.
elif any(word in message.lower() for word in GREETINGS):
# Say hi if the user has not been seen lately.
if name not in seen_users:
sendmsg('Hi {} :)'.format(name))
seen_users.append(name)
# Respond to ircserver pings to maintain connection.
elif ircmsg.find('PING') != -1:
ircserver.send(bytes('PONG :tmi.twitch.tv\r\n', 'UTF-8'))
print('Ping response sent.')
if __name__ == '__main__':
try:
# Start the chat bot.
main()
except KeyboardInterrupt:
shut_down()
|
optimizer_tcp_manager.py
|
import yaml
import os
import subprocess
import socket
import json
import logging
import time
import math
import pkg_resources
from threading import Thread
from retry import retry
from .solver_response import SolverResponse
class OptimizerTcpManager:
"""Client for TCP interface of parametric optimizers
This class is used to start and stop a TCP server, which
has been generated by <code>opengen</code>.
"""
def __init__(self, optimizer_path=None, ip=None, port=None):
"""Constructs instance of <code>OptimizerTcpManager</code>
Args:
optimizer_path: path to auto-generated optimizer (just to
be clear: this is the folder that contains <code>optimizer.yml</code>)
Returns:
New instance of <code>OptimizerTcpManager</code>
"""
self.__optimizer_path = optimizer_path
if optimizer_path is not None:
self.__optimizer_details_from_yml = None
self.__load_tcp_details()
elif ip is not None and port is not None:
self.__optimizer_details_from_yml = {"tcp": {"ip": ip, "port": port}}
else:
raise Exception("Illegal arguments")
# Check whether the optimizer was built with the current version of opengen
opengen_version = self.__optimizer_details_from_yml['build']['opengen_version']
current_opengen_version = pkg_resources.require("opengen")[0].version
if current_opengen_version != opengen_version:
logging.warn('the target optimizer was build with a different version of opengen (%s)' % opengen_version)
logging.warn('you are running opengen version %s' % current_opengen_version)
def __load_tcp_details(self):
logging.info("loading TCP/IP details")
yaml_file = os.path.join(self.__optimizer_path, "optimizer.yml")
with open(yaml_file, 'r') as stream:
self.__optimizer_details_from_yml = yaml.safe_load(stream)
details = self.__optimizer_details_from_yml
logging.info("TCP/IP details: %s:%d", details['tcp']['ip'], details['tcp']['port'])
def __threaded_start(self):
optimizer_details = self.__optimizer_details_from_yml
logging.info("Starting TCP/IP server at %s:%d (in a detached thread)",
optimizer_details['tcp']['ip'],
optimizer_details['tcp']['port'])
command = ['cargo', 'run', '-q']
if optimizer_details['build']['build_mode'] == 'release':
command.append('--release')
tcp_dir_name = "tcp_iface_" + optimizer_details['meta']['optimizer_name']
tcp_iface_directory = os.path.join(self.__optimizer_path, tcp_dir_name)
p = subprocess.Popen(command, cwd=tcp_iface_directory)
p.wait()
@retry(tries=10, delay=1)
def __obtain_socket_connection(self):
tcp_data = self.__optimizer_details_from_yml
ip = tcp_data['tcp']['ip']
port = tcp_data['tcp']['port']
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
s.connect((ip, port))
return s
def __send_receive_data(self, text_to_send, buffer_size=512, max_data_size=1048576):
conn_socket = self.__obtain_socket_connection()
encoded_data = text_to_send.encode()
conn_socket.sendall(encoded_data)
conn_socket.shutdown(socket.SHUT_WR)
max_read_rounds = math.ceil(max_data_size/buffer_size)
data = b''
for _i in range(max_read_rounds):
data_chunk = conn_socket.recv(buffer_size)
if data_chunk is None:
break
data += data_chunk
conn_socket.close()
return data.decode()
def ping(self):
"""Pings the server
Pings the server to check whether it is up and running
"""
request = '{"Ping":1}'
data = self.__send_receive_data(request)
return json.loads(data)
def __check_if_server_is_running(self):
tcp_data = self.__optimizer_details_from_yml
ip = tcp_data['tcp']['ip']
port = tcp_data['tcp']['port']
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
return 0 == s.connect_ex((ip, port))
def start(self):
"""Starts the TCP server"""
# start the server in a separate thread
if self.__optimizer_path is None:
raise Exception("No optimizer path provided - cannot start a remote server")
if self.__check_if_server_is_running():
msg = "Port %d not available" % self.__optimizer_details_from_yml['tcp']['port']
raise Exception(msg)
logging.info("Starting TCP/IP server thread")
thread = Thread(target=self.__threaded_start)
thread.start()
# ping the server until it responds so that we know it's
# up and running
logging.info("Waiting for server to start")
time.sleep(2)
self.ping()
def kill(self):
"""Kills the server"""
logging.info("Killing server")
request = '{"Kill":1}'
self.__send_receive_data(request)
def call(self, p, initial_guess=None,
initial_y=None,
initial_penalty=None,
buffer_len=4096,
max_data_size=1048576) -> SolverResponse:
"""Calls the server
Consumes the parametric optimizer by providing a parameter vector
and, optionally, an initial guess
Args:
p: vector of parameters (list of float)
initial_guess: initial guess vector (list of float)
initial_y: initial vector of Lagrange multipliers (list of float)
initial_penalty: initial penalty parameter (float)
buffer_len: buffer length used to read the server response
(default value: 4096)
max_data_size: maximum data size that is expected to be
received from the TCP server (default value: 1048576)
Returns:
Instance of SolverResponse
"""
# Make request
logging.debug("Sending request to TCP/IP server")
run_message = '{"Run" : {"parameter": ['
run_message += ','.join(map(str, p))
run_message += ']'
if initial_guess is not None:
run_message += ', "initial_guess": ['
run_message += ','.join(map(str, initial_guess))
run_message += ']'
if initial_y is not None:
run_message += ', "initial_lagrange_multipliers": ['
run_message += ','.join(map(str, initial_y))
run_message += ']'
if initial_penalty is not None:
run_message += ', "initial_penalty": ' + str(float(initial_penalty))
run_message += '}}'
data = self.__send_receive_data(run_message, buffer_len, max_data_size)
return SolverResponse(json.loads(data))
|
pretty_progress.py
|
# coding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
try:
from Queue import Empty # for python 2
except ImportError:
from queue import Empty # for python 3
from multiprocessing import Process, Queue
from os import environ
from os.path import join
from re import sub
from sys import stderr, stdout
from time import sleep
from timeit import default_timer as timer
from ansible.constants import COLOR_ERROR, COLOR_OK, COLOR_SKIP
from ansible.playbook import Playbook
from ansible.playbook.play import Play
from ansible.playbook.task import Task
from ansible.plugins.callback import CallbackBase
from backports.shutil_get_terminal_size import get_terminal_size
RUNNING_PREFIX = 'RUNNING'
SUCCESS_PREFIX = 'SUCCESS'
FAILURE_PREFIX = 'FAILURE'
IGNORED_PREFIX = 'IGNORED'
ERRORED_PREFIX = 'ERRORED'
SKIPPED_PREFIX = 'SKIPPED'
# poll at 20 Hz
POLL_DURATION = 0.05
# we need a reasonable amount of width but we do not
# want to take up more than the width of a single line
# a line will look like:
# PREFIX | IDENT [DETAILS DETAILS ...] ------ [TIMESTAMP]
OUTPUT_WIDTH = min(get_terminal_size().columns, 150)
prefix_width = 7 # e.g. `SUCCESS`
prefix_separator_width = 3 # e.g. ` | `
name_padding_width = 1 # e.g. ` ` after name
time_padding_width = 2 # `- ` before time
time_width = 11 # e.g. `[00:00.000]`
IDENTIFIER_WIDTH = OUTPUT_WIDTH - \
prefix_width - prefix_separator_width - \
name_padding_width - \
time_padding_width - time_width
# TODO: determine if there is a better way
MOVE_UP_ONE_LINE = b'\033[F'
CLEAR_LINE = b'\033[K'
def display_workload(queue):
"""
Async worker to display the workloads as fed
by the queue. Will attempt to fetch new data
from the queue at 20Hz, but failing that, it
will re-render and display the last seen data
to keep the refresh rate at or above 20Hz.
:param queue: queue to consume data from
"""
last_workload = []
last_num_lines = 0
while True:
try:
workload = queue.get(timeout=POLL_DURATION)
except Empty:
workload = last_workload
# navigate to the top to over-write the last output
for i in range(last_num_lines):
try: # for python3
stdout.buffer.write(MOVE_UP_ONE_LINE)
except AttributeError: # for python2
stdout.write(MOVE_UP_ONE_LINE)
if i < last_num_lines - len(workload):
# if there are lines in the old output which
# we may not overwrite, just clear them
try: # for python3
stdout.buffer.write(CLEAR_LINE)
except AttributeError: # for python2
stdout.write(CLEAR_LINE)
# re-render and print the new output
last_num_lines = 0
for item in workload:
last_num_lines += item.render()
last_workload = workload
class CallbackModule(CallbackBase):
"""
This module allows for CLI-based Ansible invocations
to have nicely formatted output that cleanly indicates
to the users what their CLI call is doing and how far
along it is in the process.
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'pretty_progress'
def __init__(self, *args, **kwargs):
# a container for the workloads we are tracking
self._workloads = []
# set up for the async worker we use to update
# the screen at a rate more frequent than that
# which we get callbacks at
self._queue = Queue()
self._worker = Process(target=display_workload, args=(self._queue, ))
# ensure that the worker thread is reaped if
# the main thread dies by marking the thread
# as a daemon
self._worker.daemon = True
self._worker.start()
super(CallbackModule, self).__init__(*args, **kwargs)
def update_last_workload(self, status):
"""
Update the last workload to complete and send
the updated list of workloads to the consumer.
:param status: status to update to
"""
self._workloads[-1].complete(status)
def finalize_last_play(self, status):
"""
Update the last play to be complete and remove
any trace of the tasks that were displayed for it
while it was running if it succeeded.
:param status: status to update to
"""
last_play_index = -1
for i, workload in reversed(list(enumerate(self._workloads))):
if 'PLAY [' in workload.identifier:
last_play_index = i
break
if last_play_index != -1:
# we are called on play start, as there is no
# callback hook for play end, so if we are called
# on the start of the first play, there will be
# no previous play to update
if status == SUCCESS_PREFIX:
# if we succeeded, nobody cares what tasks ran,
# so we can hide them; otherwise, we want the
# users to see the failed tasks
self._workloads = self._workloads[:last_play_index + 1]
self._workloads[last_play_index].complete(status)
def finalize_playbook(self, status):
"""
Update the playbook and last play status to
reflect the end of execution.
:param status: status we want to update to
"""
self.finalize_last_play(status)
# we only ever run one playbook before we
# reset the internal state, so we can assume
# that there is only one playbook in the
# list of workloads, and that it is the first
# item in the list
self._workloads[0].complete(status)
def v2_playbook_on_start(self, playbook):
"""
Implementation of the callback endpoint to be
fired when execution of a new playbook begins.
We know that we will only ever run one playbook
at a time, so we take some liberties with this:
- we don't attempt to update the current
workload state as we assume it is empty
:param playbook: playbook that just started
"""
self._workloads.append(Workload(playbook))
def v2_playbook_on_play_start(self, play):
"""
Implementation of the callback endpoint to be
fired when execution of a new play begins. We
need to clean up the last play before we add
the new one to the workloads.
:param play: play that just started
"""
self.finalize_last_play(SUCCESS_PREFIX)
self._workloads.append(Workload(play))
def v2_playbook_on_task_start(self, task, is_conditional):
"""
Implementation of the callback endpoint to be
fired when execution of a new task begins. We
only keep track of the last running task, so
if there is already a task displayed for the
current play we over-write it.
:param task: task that just started
:param is_conditional: if the task is conditional
"""
if 'TASK' in self._workloads[-1].identifier:
self._workloads[-1] = Workload(task)
else:
self._workloads.append(Workload(task))
def v2_runner_on_ok(self, result):
"""
Implementation of the callback endpoint to be
fired when a task finishes executing successfully.
We assume that the last workload is the last task.
:param result: result of the last task
"""
self.update_last_workload(SUCCESS_PREFIX)
def v2_runner_on_failed(self, result, ignore_errors=False):
"""
Implementation of the callback endpoint to be
fired when a task fails to execute successfully.
If we are not ignoring errors, we will not only
show the task as failed, but also add the error
information to the output stream.
:param result: result of the last task
:param ignore_errors: if we should consider this a failure
"""
status = IGNORED_PREFIX if ignore_errors else FAILURE_PREFIX
self.update_last_workload(status)
if not ignore_errors:
self._workloads.append(Failure(result))
def v2_runner_on_unreachable(self, result):
"""
Implementation of the callback endpoint to be
fired when a task can't reach it's target host.
We will show the task as errored and append the
error information to the output stream.
:param result: result of the last task
"""
self.update_last_workload(ERRORED_PREFIX)
self._workloads.append(Failure(result))
def v2_runner_on_skipped(self, result):
"""
Implementation of the callback endpoint to be
fired when task execution is skipped.
:param result: result of the last task
"""
self.update_last_workload(SKIPPED_PREFIX)
def v2_on_any(self, *args, **kwargs):
"""
Implementation of the callback endpoint to be
fired *after* any other callback is fired. We
know that if a callback happened, it could have
changed the state of the workloads we track,
so we send the updated state to the consumer
thread after any callback. If the callback did
not change the internal state, the consumer
will just refresh faster than normal, which is
not an problem. We also will trigger after the
`v2_playbook_on_stats` endpoint, after which
we will not have anyone listening to the other
end of the queue, but we will also be cleaning
up and exiting soon anyway, so again it is not
an issue.
:param args: arguments [ignored]
:param kwargs: keyword arguments [ignored]
"""
self._queue.put(self._workloads)
def v2_playbook_on_stats(self, stats):
"""
Implementation of the callback endpoint to be
fired when a playbook is finished. As we are
only running one playbook at a time, we can
again make some assumptions about what to do
here. Specifically:
- we can assume the last playbook that ran is
the first item in our workload queue
- we can clean up the worker thread as there
will be no more tasks running after this
:param stats:
"""
# there isn't a good API for determining failures,
# so we need to search for them ourselves
status = SUCCESS_PREFIX
# task failures are recorded per host per type of
# failure, so we need to check that any hosts in
# these sections have occurrences of the failure
# recorded
for host in stats.dark:
if stats.dark[host] > 0:
# tasks failed to reach their host
status = FAILURE_PREFIX
break
for host in stats.failures:
if stats.failures[host] > 0:
# tasks failed to execute
status = FAILURE_PREFIX
break
self.finalize_playbook(status)
# we need to manually trigger this queue update
self._queue.put(self._workloads)
# wait for consumer to post everything we have
while not self._queue.empty():
sleep(POLL_DURATION)
# we are using the multiprocessing queue, which
# does not implement join() and task_done(), so
# we cannot reliably know that the consumer has
# worked on the last element when we see that the
# queue is empty on our end. No implementation
# exists with a peek(), either, so we just have
# to wait for one timeout iteration here and
# hope for the best.
sleep(POLL_DURATION)
self._worker.terminate()
self._worker.join()
def format_identifier(workload):
"""
Determine an identifier for the workload.
:param workload: workload to identify
:return: identifier for the workload
"""
if isinstance(workload, Playbook):
# unfortunately there is no nice way to self-
# identify for a playbook, so we must access
# a protected member. Furthermore, we do not
# necessarily need the full path to the play-
# book and we can live with the relative path
# from the origin-ci-tool root.
# TODO: do this with os.path?
return 'PLAYBOOK [{}]'.format('origin-ci-tool{}'.format(sub('^.*origin-ci-tool', '', workload._file_name)))
elif isinstance(workload, Play):
return 'PLAY [{}]'.format(workload.get_name())
elif isinstance(workload, Task):
return 'TASK [{}]'.format(workload.get_name())
else:
return 'UNKNOWN'
def format_status(status):
"""
Format the status of a workload, with
colors where appropriate.
:param status: status prefix
:return: formatted status
"""
color = 'normal'
if status == SUCCESS_PREFIX:
color = COLOR_OK
elif status == FAILURE_PREFIX or status == ERRORED_PREFIX:
color = COLOR_ERROR
elif status == IGNORED_PREFIX or status == SKIPPED_PREFIX:
color = COLOR_SKIP
return colorize(status, color=color)
class Workload(object):
"""
A wrapper for an Ansible workload like a play,
playbook, task, etc. that knows how to display
information about the workload in a pretty way.
"""
def __init__(self, workload):
"""
Create a Workload wrapper for an Ansible workload.
:param workload: a play, playbook, task, etc.
"""
self.identifier = format_identifier(workload)
self.status = RUNNING_PREFIX
self.start_time = timer()
# to be set when we finish this workload
self.elapsed_time = None
def __str__(self):
return self.format()
def complete(self, status):
"""
Mark the workload as having been completed.
:param status: new status to update to
"""
self.status = format_status(status)
self.elapsed_time = self.format_runtime()
def render(self):
"""
Render a representation of this workload onto
the screen using stdout.
:return: number of lines written
"""
stdout.write(self.format())
return 1
def format(self):
"""
Format a string containing:
PREFIX | NAME -------------------- [TIME]
Where PREFIX is one of the above constants,
NAME is an identifier for the Ansible play
or playbook being run, and time is a time-
stamp with format MM:SS.SSS.
We will truncate the name so that everything
fits in the allotted width. If the name is
not going to fit, we will append an ellipsis.
:return: formatted self-representation
"""
if len(self.identifier) > IDENTIFIER_WIDTH:
self.identifier = '{}...'.format(self.identifier[:IDENTIFIER_WIDTH - 3])
fill_width = IDENTIFIER_WIDTH - len(self.identifier)
return '{} | {} -{} [{}]\n'.format(
self.status,
self.identifier,
'-' * fill_width,
self.format_runtime(),
)
def format_runtime(self):
"""
Format the current running time of this
Workload as a nice string like MM:SS.SSS.
:return: formatted time
"""
if self.elapsed_time:
return self.elapsed_time
else:
return '{:02.0f}:{:06.3f}'.format(*divmod(timer() - self.start_time, 60))
def format_result(result):
"""
Attempt to extract and format information
about an Ansible workload result.
:param result: result to inspect
:return: message
"""
full_message = format_failure_message(result)
full_message += format_item_failures(result)
full_message += format_terminal_output(result)
# detect internal module failures
full_message += format_terminal_output(result, stdout_key='module_stdout', stderr_key='module_stderr')
# detect internal stacktrace crashes
full_message += format_internal_exception_output(result)
full_message += format_parsing_error(result)
# filter out empty lines and lines of only whitespace
full_message = [line for line in full_message.splitlines() if line and line.strip()]
return "\n".join(full_message) + "\n"
def format_failure_message(result):
"""
Output a formatted version of the failure
message, if the result contains one.
:param result: result to inspect
:return: message
"""
if 'msg' in result:
# this is most likely a module failure
if isinstance(result['msg'], list):
error_message = '\n'.join(result['msg'])
else:
error_message = result['msg']
return '{}\n'.format(error_message)
return ''
def format_item_failures(result):
"""
Output a formatted version of the item
failures, if the result contains any.
:param result: result to inspect
:return: message
"""
if 'results' in result:
# this is most likely a failure from with_items
item_preamble = 'The following error messages came from items:'
item_messages = []
for item_result in result['results']:
# the item could possibly contain any
# valid result output, as any Ansible
# workload can be looped over
item_messages.append(format_result(item_result))
item_messages = [message for message in item_messages if len(message) > 0]
if len(item_messages) > 0:
return '{}\n{}'.format(item_preamble, '\n'.join(item_messages))
return ''
def format_terminal_output(result, stdout_key='stdout', stderr_key='stderr'):
"""
Output a formatted version of the terminal
output (std{out,err}), if the result contains
either.
:param stdout_key: where stdout is recorded
:param stderr_key: where stderr is recorded
:param result: result to inspect
:return: formatted output message
"""
output_message = ''
if stdout_key in result:
# this is most likely a shell/command/raw failure
if len(result[stdout_key]) > 0:
output_message += '{}\n{}\n'.format('Output to stdout:', result[stdout_key])
if stderr_key in result:
if len(result[stderr_key]) > 0:
output_message += '{}\n{}\n'.format(colorize('Output to stderr:', color=COLOR_ERROR), result[stderr_key])
if stdout_key in result and len(result[stdout_key]) == 0 and stderr_key in result and len(result[stderr_key]) == 0:
output_message = colorize('No output was written to stdout or stderr!', color=COLOR_ERROR)
return output_message
def format_internal_exception_output(result):
"""
Output a formatted version of any internal
errors that Ansible runs into when executing,
if any are present.
:param result: result to inspect
:return: formatted output message
"""
if 'exception' in result:
return 'An internal exception occurred:\n{}'.format(result['exception'])
return ''
def format_parsing_error(result):
"""
Output a formatted version of any parsing
errors that Ansible runs into when looking
at a playbook, if any are present.
:param result: result to inspect
:return: formatted output message
"""
if 'reason' in result:
return 'Parsing the playbook failed:\n{}'.format(result['reason'])
return ''
class Failure(object):
"""
Holds information about a failure that happened
and can render itself onto the screen.
"""
def __init__(self, result):
self.identifier = 'FAILURE'
self.result = result._result
self.host = result._host
def __str__(self):
return self.format()
def render(self):
"""
Render a representation of this failure
onto the terminal screen.
:return: number of lines written
"""
full_message = self.format()
stderr.write(full_message)
return full_message.count('\n')
def format(self):
"""
Format the failure result nicely.
:return: the formatted error
"""
full_message = colorize('A task failed on host `{}`!\n'.format(self.host), color=COLOR_ERROR)
result = format_result(self.result)
if len(result.splitlines()) == 0:
# we have not been able to get any use-able
# messages from the result, so we should
# tell the user to look at the logs
# TODO: better OS-agnostic filesystem code for this
log_location = join(environ.get('ANSIBLE_LOG_ROOT_PATH', join('tmp', 'ansible', 'log')), '/', '{}'.format(self.host))
full_message += 'No useful error messages could be extracted, see full output at {}\n'.format(log_location)
else:
full_message += result
return full_message
# --- begin "pretty"
#
# pretty - A miniature library that provides a Python print and stdout
# wrapper that makes colored terminal text easier to use (e.g. without
# having to mess around with ANSI escape sequences). This code is public
# domain - there is no license except that you must leave this header.
#
# Copyright (C) 2008 Brian Nez <thedude at bri1 dot com>
#
# http://nezzen.net/2008/06/23/colored-text-in-python-using-ansi-escape-sequences/
codeCodes = {
'black': '0;30',
'bright gray': '0;37',
'blue': '0;34',
'white': '1;37',
'green': '0;32',
'bright blue': '1;34',
'cyan': '0;36',
'bright green': '1;32',
'red': '0;31',
'bright cyan': '1;36',
'purple': '0;35',
'bright red': '1;31',
'yellow': '0;33',
'bright purple': '1;35',
'dark gray': '1;30',
'bright yellow': '1;33',
'magenta': '0;35',
'bright magenta': '1;35',
'normal': '0',
}
def colorize(text, color):
"""String in color."""
return u"\033[%sm%s\033[0m" % (codeCodes[color], text)
# --- end "pretty"
|
computersinger.py
|
'''
Function:
让电脑主板上的蜂鸣器哼歌
Author:
Charles
微信公众号:
Charles的皮卡丘
'''
import os
import sys
import time
import threading
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
'''让电脑主板上的蜂鸣器哼歌'''
class ComputerSinger(QWidget):
tool_name = '让电脑主板上的蜂鸣器哼歌'
def __init__(self, parent=None, title='让电脑主板上的蜂鸣器哼歌 —— Charles的皮卡丘', **kwargs):
super(ComputerSinger, self).__init__(parent)
import ctypes
rootdir = os.path.split(os.path.abspath(__file__))[0]
self.rootdir = rootdir
self.setFixedSize(500, 100)
self.setWindowTitle(title)
self.setWindowIcon(QIcon(os.path.join(rootdir, 'resources/icon.ico')))
self.grid = QGridLayout()
# 定义必要的组件
# --label
self.musicfilepath_label = QLabel('音乐简谱路径:')
# --输入框
self.musicfilepath_edit = QLineEdit(os.path.join(rootdir, 'resources/musicfiles/小幸运'))
# --按钮
self.choose_button = QPushButton('选择')
self.play_button = QPushButton('播放')
# 布局
self.grid.addWidget(self.musicfilepath_label, 0, 0, 1, 1)
self.grid.addWidget(self.musicfilepath_edit, 0, 1, 1, 4)
self.grid.addWidget(self.choose_button, 1, 3, 1, 1)
self.grid.addWidget(self.play_button, 1, 4, 1, 1)
self.setLayout(self.grid)
# 事件绑定
self.choose_button.clicked.connect(self.openfile)
self.play_button.clicked.connect(lambda _: threading.Thread(target=self.play).start())
# 一些常量
self.pitchs_dict = {'l': 0.5, 'm': 1., 'h': 2.}
self.tone2freq_dict = {'C': 523, 'D': 587, 'E': 659, 'F': 698, 'G': 784, 'A': 880, 'B': 988}
self.tone_scale = 1.06
self.beats = 1000 * 60 / 65
self.beep_player = ctypes.windll.kernel32
'''打开文件'''
def openfile(self):
filepath = QFileDialog.getOpenFileName(self, '请选取音乐简谱', self.rootdir)
self.musicfilepath_edit.setText(filepath[0])
'''解析音乐简谱'''
def parse(self, filepath):
song_info = open(filepath, 'r').read().replace('\n', '').split(',')
tone = song_info[0]
song_info = song_info[1:]
return tone, song_info
'''播放'''
def play(self):
filepath = self.musicfilepath_edit.text()
if not os.path.isfile(filepath):
return
tone, song_info = self.parse(filepath)
do = self.tone2freq_dict[tone]
re = int(do * self.tone_scale * self.tone_scale)
mi = int(re * self.tone_scale * self.tone_scale)
fa = int(mi * self.tone_scale * self.tone_scale)
sol = int(fa * self.tone_scale * self.tone_scale)
la = int(sol * self.tone_scale * self.tone_scale)
si = int(la * self.tone_scale * self.tone_scale)
notes = [0, do, re, mi, fa, sol, la, si]
for item in song_info:
if notes[int(item[0])] == 0:
time.sleep(self.beats / 1000)
else:
self.beep_player.Beep(int(notes[int(item[0])]*self.pitchs_dict[item[1]]), int(self.beats * float(item[2:])))
|
main.py
|
import cv2, difflib, re, sys, time, snap7, threading, os, Img, subprocess
import numpy as np
from PyQt6 import QtWidgets, QtCore, QtGui
from paddleocr import PaddleOCR, draw_ocr
from style import Ui_mainWindow
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
class MainWindow(QtWidgets.QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.ui = Ui_mainWindow() # 前端布局文件导入
self.ui.setupUi(self)
# 共享元素
self.ppocr = None # 模型
self.show = None # cv2预处理的图像
self.showend = None # 绘制完成输出图像
self.midimg = None # 未标记的输出图像
self.testtarget = "" # 输入的检测目标
self.historylist = [] # 历史记录
self.save = "" # 需要保存的结果
self.com_out_ans_bool = 0 # 检测输出的结果
self.thetime = None # 显示的时间
self.hashmark = [0, 0, 0, 0, 0, 0, 0]
self.hashans = [0, 0, 0, 0, 0, 0, 0]
self.bad_str = ''
# 状态变量
self.readly_check = False # 准备检测
self.checking = False # 检测中
self.need_check = False # 需要检测
self.need_clean = False # 报警清除
self.need_save_bool = False # 需要保存状态位
self.need_freshen = False # 需要更新输出
self.need_com_freshen = False # hmi输出更新
self.need_learn = False # 需要重新学习模板
# 参数变量
self.img_colour_mod = 1 # 图像模式
self.img_mod = []
self.img_blus_mod = '0' # 滤波模式
self.img_colour2_mod = 0 # 彩图处理
self.img_gray_mod = 0 # 直方图处理模式
self.usecn = 0 # 使用中文
self.usechar = 0 # 使用符号
self.opentime = 0 # 腐蚀次数
self.closetime = 0 # 膨胀次数
self.jdtime = 3 # 均值精度
self.lbtime = 5 # 均值参数
self.jd2time = 3 # 颜色提取精度
self.lb2time = 5 # 颜色提取参数
self.maxbad = 0 # 容错率
self.maxmark = 95 # 容错率
self.com_out_bad = 0 # 输出的不良字符数
self.bad_mark = 0 # 不良分值
# 通讯变量
self.com_out = 0 # 通讯输出
'''vb5000>vb5001
Q0.0 1.电源
Q0.1 2.准备检测
Q0.2 4.严重报警
Q0.3 8.结果1:合格
Q0.4 16.结果2:不良
'''
self.com_in = None # 通讯值
self.com_in_en = False
'''vb5002
I0.0 1.使能
I0.1 2.触发检测
I0.2 4.触发学习(未使用)
I0.3 8.设备报警清除
'''
self.badnum = [1, 1, 0, 1, 0, 0, 0, 0] # 报错代码
self.com_bad = 0
'''vb5003
0.无错误
Q1.0 1.模型准备中
Q1.1 2.摄像头连接中断(硬件)
Q1.2 4.
Q1.3 8.通讯断开
Q1.4 16.短期内不良瓶数过多警告
Q1.5 32.未设置检测目标基准
Q1.6 64.
Q1.7 128.
'''
self.com_out_time = 0 # 本次检测时间ms
self.com_out_optimize = '' # plc等待时间
'''vw5004
int
'''
self.com_out_ans_len = 0 # 字符串长度
'''vw5006
vw5005:int 字符串长度
'''
self.com_out_fullans = '' # 字符串
self.com_out_ans = '' # 不带换行与中文的字符串
'''vb5008-vb5108
string
vb5008-vb5108:ascii字符(无中文)
'''
self.com_out_mark = 0 # 置信度
# 输出颜色
self.colour_en = False
self.colour_check = False
self.colour_clean = False
self.colour_readly = False
self.colour_bad = False
self.colour_str = False
# 初始化
self.doit() # 设置按钮事件
self.startset() # 读取存档并设置参数
self.time_mod = threading.Thread(target=self.mod_start) # 模型初始化
self.time_mod.setDaemon(True)
self.time_mod.start()
self.cap = cv2.VideoCapture()
# 画面宽度设定为 640
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
# 画面高度度设定为 480
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
self.cap.open(0)
self.time_com_camera = QtCore.QTimer(self)
self.time_com_camera.timeout.connect(self.show_camera) # 图像读取
self.time_com_camera.start(30)
self.plc = snap7.client.Client() # plc通讯
self.plc.set_connection_type(3)
self.time_com = threading.Thread(target=self.plc_com) # 通讯线程
self.time_com.setDaemon(True)
self.time_com.start()
self.time_com_iq = QtCore.QTimer(self)
self.time_com_iq.timeout.connect(self.iq_count) # 界面数据刷新
self.time_com_iq.start(20)
self.time_main = threading.Thread(target=self.plcdo) # 副线程
self.time_main.setDaemon(True)
self.time_main.start()
self.time_clock = threading.Thread(target=self.clock) # 时钟线程
self.time_clock.setDaemon(True)
self.time_clock.start()
def mod_start(self):
"""
模型初始化
"""
self.ppocr = PaddleOCR(
use_gpu=True, # 使用gpu
cls=True, # 角度分类
det_limit_side_len=320, # 检测算法前向时图片长边的最大尺寸,
det_limit_type='max', # 限制输入图片的大小,可选参数为limit_type[max, min] 一般设置为 32 的倍数,如 960。
ir_optim=False,
use_fp16=False, # 16位半精度
use_tensorrt=False, # 使用张量
gpu_mem=6000, # 初始化占用的GPU内存大小
cpu_threads=20,
enable_mkldnn=True, # 是否启用mkldnn
max_batch_size=512, # 图片尺寸最大大小
cls_model_dir='data/mod/2.3.0.1/ocr/cls/ch_ppocr_mobile_v2.0_cls_infer',
# cls模型位置
# image_dir="", # 通过命令行调用时间执行预测的图片或文件夹路径
det_algorithm='DB', # 使用的检测算法类型DB/EAST
det_model_dir='data/mod/2.3.0.1/ocr/det/ch/ch_PP-OCRv2_det_infer',
# 检测模型所在文件夹。传参方式有两种,1. None: 自动下载内置模型到 ~/.paddleocr/det;2.自己转换好的inference模型路径,模型路径下必须包含model和params文件
# DB(还有east,SAST)
det_db_thresh=0.3, # DB模型输出预测图的二值化阈值
det_db_box_thresh=0.6, # DB模型输出框的阈值,低于此值的预测框会被丢弃
det_db_unclip_ratio=1.5, # DB模型输出框扩大的比例
use_dilation=True, #缩放图片
det_db_score_mode="fast", #计算分数模式,fast对应原始的rectangle方式,slow对应polygon方式。
# 文本识别器的参数
rec_algorithm='CRNN', # 使用的识别算法类型
rec_model_dir='data/mod/2.3.0.1/ocr/rec/ch/ch_PP-OCRv2_rec_infer',
# 识别模型所在文件夹。传承那方式有两种,1. None: 自动下载内置模型到 ~/.paddleocr/rec;2.自己转换好的inference模型路径,模型路径下必须包含model和params文件
rec_image_shape="3,32,320", # 识别算法的输入图片尺寸
cls_batch_num=36, #
cls_thresh=0.9, #
lang='ch', # 语言
det=True, # 检测文字位置
rec=True, # 识别文字内容
use_angle_cls=False, # 识别竖排文字
rec_batch_num=36, # 进行识别时,同时前向的图片数
max_text_length=25, # 识别算法能识别的最大文字长度
rec_char_dict_path='data/mod/2.3.0.1/ppocr_keys_v1.txt', # 识别模型字典路径,当rec_model_dir使用方式2传参时需要修改为自己的字典路径
use_space_char=True, # 是否识别空格
)
self.badnum[0] = 0
def plc_com(self):
"""
通讯线程
"""
while 1:
if self.plc.get_connected():
try:
self.badnum[3] = 0
# 输出
self.plc.write_area(snap7.types.Areas.DB, 1, 5000, bytearray([self.com_out]))
self.plc.write_area(snap7.types.Areas.DB, 1, 5003, bytearray([self.com_bad]))
# 读取输入的信号
self.com_in = self.plc.read_area(snap7.types.Areas.DB, 1, 5002, 1)
comin = self.com_in[0]
if comin % 2 == 1:
self.com_in_en = True
comin -= 1
if comin >= 8:
self.need_clean = True
comin -= 8
if comin == 2 and self.checking == False:
self.need_check = True
else:
self.com_in_en = False
self.plc.write_area(snap7.types.Areas.DB, 1, 5002, bytearray(b'\x00'))
# 刷新输出结果
if self.need_com_freshen:
dw = bytearray()
le = bytearray()
snap7.util.set_int(dw, 256, self.com_out_time)
self.plc.write_area(snap7.types.Areas.DB, 1, 5004, dw)
snap7.util.set_int(le, 256, self.com_out_ans_len)
self.plc.write_area(snap7.types.Areas.DB, 1, 5006, le)
x = re.sub("[年]", "(N)", self.com_out_ans)
x = re.sub("[月]", "(Y)", x)
x = re.sub("[日]", "(R)", x)
data = bytearray(100)
snap7.util.set_string(data, 0, x, 255)
self.plc.write_area(snap7.types.Areas.DB, 1, 5008, data)
self.freshen = False
except:
self.plc.disconnect()
self.com_in_en = False
self.badnum[3] = 1
else:
self.com_in_en = False
try:
self.plc.connect("192.168.2.1", 0, 1)
except:
self.badnum[3] = 1
time.sleep(0.0001)
def iq_count(self):
"""
信号统计
"""
# 准备检测
if self.badnum[0:2] == [0, 0] and self.checking == False:
self.readly_check = True
else:
self.readly_check = False
# 报警清除
if self.need_clean:
self.bad_mark = 0
self.badnum[4] = 0
self.need_clean = False
self.com_out_optimize = '无'
# 报警统计
if self.testtarget == '':
self.badnum[5] = 1
else:
self.badnum[5] = 0
if self.bad_mark > 3:
self.badnum[4] = 1
else:
self.badnum[4] = 0
# 输出错误码
j = 0
for i in range(7):
if self.badnum[i] == 1:
j += 2 ** i
self.com_bad = j
# 输出的信号
q = 1
if self.readly_check: q += 2
if self.badnum[0:3] != [0, 0, 0]: q += 4
if self.com_out_ans_bool == 1:
q += 8
elif self.com_out_ans_bool == 2:
q += 16
self.com_out = q
# 刷新界面
if self.readly_check != self.colour_readly:
self.colour_readly = self.readly_check
if self.readly_check:
self.ui.label__com_out_readly.setStyleSheet('border-radius: 10px; background-color: rgb(0, 250, 0);')
else:
self.ui.label__com_out_readly.setStyleSheet('border-radius: 10px; background-color: red;')
if (self.badnum[0:4] != [0, 0, 0, 0]) != self.colour_bad:
self.colour_bad = self.badnum[0:4] != [0, 0, 0, 0]
if self.colour_bad:
self.ui.label_com_out_bad.setStyleSheet('border-radius: 10px; background-color: rgb(0, 250, 0);')
else:
self.ui.label_com_out_bad.setStyleSheet('border-radius: 10px; background-color: red;')
if self.com_in_en != self.colour_en:
self.colour_en = self.com_in_en
if self.com_in_en:
self.ui.label_com_in_en.setStyleSheet('border-radius: 10px; background-color: rgb(0, 250, 0);')
else:
self.ui.label_com_in_en.setStyleSheet('border-radius: 10px; background-color: red;')
if self.need_check != self.colour_check:
self.colour_check = self.need_check
if self.need_check:
self.ui.label_com_in_do.setStyleSheet('border-radius: 10px; background-color: rgb(0, 250, 0);')
else:
self.ui.label_com_in_do.setStyleSheet('border-radius: 10px; background-color: red;')
if self.need_clean != self.colour_clean:
self.colour_clean = self.need_clean
if self.need_clean:
self.ui.label_com_in_clean.setStyleSheet('border-radius: 10px; background-color: rgb(0, 250, 0);')
else:
self.ui.label_com_in_clean.setStyleSheet('border-radius: 10px; background-color: red;')
srt = ''
if self.badnum[0:6] == [0, 0, 0, 0, 0, 0]:
colour = True
srt = '设备运行正常'
else:
colour = False
if self.badnum[0] == 1:
srt += '等待模型初始化\n'
if self.badnum[1] == 1:
srt += '摄像头连接中断\n'
if self.badnum[3] == 1:
srt += 'PLC通讯断开\n'
if self.badnum[4] == 1:
srt += '短期内不良数过多\n'
if self.badnum[5] == 1:
srt += '未设置检测目标基准\n'
self.need_learn = True
self.ui.label_bad_str.setText(srt)
if self.colour_str != colour:
self.colour_str = colour
if self.colour_str:
self.ui.label_bad_str.setStyleSheet('color: rgb(0, 200, 0);')
else:
self.ui.label_bad_str.setStyleSheet('color: rgb(255, 0, 0);')
# 刷新输出
if self.need_freshen:
self.freshen_interface()
self.need_freshen = False
def clock(self):
"""
时钟
"""
while 1:
self.thetime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
self.ui.label_time.setText(str(self.thetime))
time.sleep(1)
def show_camera(self):
"""
摄像头图像传输(线程1)
"""
if self.cap.isOpened() == True:
self.badnum[1] = 0
flag, img0 = self.cap.read()
img1 = cv2.cvtColor(img0, cv2.COLOR_BGR2RGB)
# 这里是正常图像
showImage = QtGui.QImage(img1.data, img1.shape[1], img1.shape[0], QtGui.QImage.Format.Format_RGB888)
self.show = img0
self.ui.label_11.setPixmap(QtGui.QPixmap.fromImage(showImage))
else:
self.badnum[1] = 1
def doit(self):
"""
按钮槽函数写入
"""
self.ui.butmain.clicked.connect(self.butmain)
self.ui.buthelp.clicked.connect(self.buthelp)
self.ui.but.clicked.connect(self.butdo)
self.ui.but_1.clicked.connect(self.tab_1)
self.ui.but_2.clicked.connect(self.tab_2)
self.ui.but_3.clicked.connect(self.butclean)
# 虚拟键盘
self.ui.Button0.clicked.connect(self.button0)
self.ui.Button1.clicked.connect(self.button1)
self.ui.Button2.clicked.connect(self.button2)
self.ui.Button3.clicked.connect(self.button3)
self.ui.Button4.clicked.connect(self.button4)
self.ui.Button5.clicked.connect(self.button5)
self.ui.Button6.clicked.connect(self.button6)
self.ui.Button7.clicked.connect(self.button7)
self.ui.Button8.clicked.connect(self.button8)
self.ui.Button9.clicked.connect(self.button9)
self.ui.Buttonback.clicked.connect(self.buttonback)
self.ui.Buttonc.clicked.connect(self.buttonc)
self.ui.Buttond.clicked.connect(self.buttond)
self.ui.Buttondd.clicked.connect(self.buttondd)
self.ui.Buttonent.clicked.connect(self.buttonent)
self.ui.Buttoni.clicked.connect(self.buttoni)
self.ui.Buttonl.clicked.connect(self.buttonl)
self.ui.Buttonyear.clicked.connect(self.buttoyear)
self.ui.Buttonmonth.clicked.connect(self.buttonmonth)
self.ui.Buttonday.clicked.connect(self.buttonday)
self.ui.pushButton_4.clicked.connect(self.add)
self.ui.pushButton_5.clicked.connect(self.dec)
self.ui.radioButtongray.clicked.connect(self.radioButtongray_check)
self.ui.radioButtonmould.clicked.connect(self.radioButtonmould_check)
self.ui.radioButtonblack.clicked.connect(self.radioButtonblack_check)
self.ui.radioButtoncolor.clicked.connect(self.radioButtoncolor_check)
self.ui.radioButton_colorhis.clicked.connect(self.radioButton_colorhis_check)
self.ui.radioButtongauss.clicked.connect(self.radioButton_check_end)
self.ui.radioButtoneven.clicked.connect(self.radioButton_check_end)
self.ui.radioButton_his.clicked.connect(self.radioButton_his_check)
self.ui.radioButton_hisauto.clicked.connect(self.radioButton_hisauto_check)
self.ui.radioButton_hsv.clicked.connect(self.radioButton_hsv_check)
self.ui.radioButton_hisall.clicked.connect(self.radioButton_hisall_check)
self.ui.Buttoncn.clicked.connect(self.Buttoncn_check)
self.ui.Buttoncn_2.clicked.connect(self.Buttoncn_2_check)
self.ui.radioButtonopen.clicked.connect(self.radioButton_check_end)
self.ui.radioButtonclose.clicked.connect(self.radioButton_check_end)
self.ui.pc_close.clicked.connect(self.pc_close)
self.ui.pushButton_close1.clicked.connect(self.pc_close1)
self.ui.pushButton_close2.clicked.connect(self.pc_close2)
self.ui.pushButton_open1.clicked.connect(self.pc_open1)
self.ui.pushButton_open2.clicked.connect(self.pc_open2)
self.ui.radioButton_otus.clicked.connect(self.butblack)
self.ui.radioButton_200.clicked.connect(self.butblack)
self.ui.radioButton_mean.clicked.connect(self.butblack)
self.ui.pushButton_6.clicked.connect(self.submaxmark)
self.ui.pushButton_7.clicked.connect(self.addmaxmark)
self.ui.buthelp_2.clicked.connect(self.help_2)
self.ui.pushButton_jd1.clicked.connect(self.subjd)
self.ui.pushButton_jd2.clicked.connect(self.addjd)
self.ui.pushButton_lb1.clicked.connect(self.sublb)
self.ui.pushButton_lb2.clicked.connect(self.addlb)
self.ui.pushButton_jd1_2.clicked.connect(self.subjd_2)
self.ui.pushButton_jd2_2.clicked.connect(self.addjd_2)
self.ui.pushButton_lb1_2.clicked.connect(self.sublb_2)
self.ui.pushButton_lb2_2.clicked.connect(self.addlb_2)
self.ui.pushButtoncleanlearn.clicked.connect(self.mod_clean)
def radioButton_colorhis_check(self):
if self.ui.radioButton_colorhis.isChecked():
self.img_colour2_mod = 1
else:
self.img_colour2_mod = 0
self.need_save_bool = True
def mod_clean(self):
self.need_learn = True
def subjd(self):
if self.jdtime > 3:
self.jdtime -= 2
self.ui.label_jd.setText(str(self.jdtime))
self.need_save_bool = True
self.need_learn = True
def addjd(self):
if self.jdtime < 60:
self.jdtime += 2
self.ui.label_jd.setText(str(self.jdtime))
self.need_save_bool = True
self.need_learn = True
def sublb(self):
if self.lbtime > 1:
self.lbtime -= 1
self.ui.label_lb.setText(str(self.lbtime))
self.need_save_bool = True
self.need_learn = True
def addlb(self):
if self.lbtime < 20:
self.lbtime += 1
self.ui.label_lb.setText(str(self.lbtime))
self.need_save_bool = True
self.need_learn = True
def subjd_2(self):
if self.jd2time > 1:
self.jd2time -= 1
self.ui.label_jd_2.setText(str(self.jd2time))
self.need_save_bool = True
self.need_learn = True
def addjd_2(self):
if self.jd2time < 10:
self.jd2time += 1
self.ui.label_jd_2.setText(str(self.jd2time))
self.need_save_bool = True
self.need_learn = True
def sublb_2(self):
if self.lb2time > 1:
self.lb2time -= 1
self.ui.label_lb_2.setText(str(self.lb2time))
self.need_save_bool = True
self.need_learn = True
def addlb_2(self):
if self.lb2time < 20:
self.lb2time += 1
self.ui.label_lb_2.setText(str(self.lb2time))
self.need_save_bool = True
self.need_learn = True
def help_2(self):
self.ui.tabWidget.setCurrentIndex(2)
def addmaxmark(self):
if self.maxmark < 100:
self.maxmark += 1
self.ui.label_bad_4.setText(f'{self.maxmark}')
self.need_save_bool = True
def submaxmark(self):
if self.maxmark > 60:
self.maxmark -= 1
self.ui.label_bad_4.setText(f'{self.maxmark}')
self.need_save_bool = True
def butblack(self):
if self.ui.radioButton_otus.isChecked():
self.img_black_mod = 1
self.ui.widget_7.hide()
self.ui.widget_8.hide()
elif self.ui.radioButton_mean.isChecked():
self.img_black_mod = 2
self.ui.widget_7.show()
self.ui.widget_8.hide()
elif self.ui.radioButton_200.isChecked():
self.img_black_mod = 3
self.ui.widget_8.show()
self.ui.widget_7.hide()
self.need_save_bool = True
self.need_learn = True
def butdo(self):
self.do()
def butclean(self):
self.need_clean = True
def button0(self):
self.testtarget = self.testtarget + "0"
self.ui.inin.setPlainText(self.testtarget)
self.need_save_bool = True
self.need_learn = True
def button1(self):
self.testtarget += "1"
self.ui.inin.setPlainText(self.testtarget)
self.need_save_bool = True
self.need_learn = True
def button2(self):
self.testtarget += "2"
self.ui.inin.setPlainText(self.testtarget)
self.need_save_bool = True
self.need_learn = True
def button3(self):
self.testtarget += "3"
self.ui.inin.setPlainText(self.testtarget)
self.need_save_bool = True
self.need_learn = True
def button4(self):
self.testtarget += "4"
self.ui.inin.setPlainText(self.testtarget)
self.need_save_bool = True
self.need_learn = True
def button5(self):
self.testtarget += "5"
self.ui.inin.setPlainText(self.testtarget)
self.need_save_bool = True
self.need_learn = True
def button6(self):
self.testtarget += "6"
self.ui.inin.setPlainText(self.testtarget)
self.need_save_bool = True
self.need_learn = True
def button7(self):
self.testtarget += "7"
self.ui.inin.setPlainText(self.testtarget)
self.need_save_bool = True
self.need_learn = True
def button8(self):
self.testtarget += "8"
self.ui.inin.setPlainText(self.testtarget)
self.need_save_bool = True
self.need_learn = True
def button9(self):
self.testtarget += "9"
self.ui.inin.setPlainText(self.testtarget)
self.need_save_bool = True
self.need_learn = True
def buttonback(self):
self.testtarget = self.testtarget[:-1]
self.ui.inin.setPlainText(self.testtarget)
self.need_save_bool = True
self.need_learn = True
def buttonc(self):
self.testtarget = ""
self.ui.inin.setPlainText(self.testtarget)
self.need_save_bool = True
self.need_learn = True
def buttond(self):
self.testtarget += "."
self.ui.inin.setPlainText(self.testtarget)
self.need_save_bool = True
self.need_learn = True
def buttondd(self):
self.testtarget += ":"
self.ui.inin.setPlainText(self.testtarget)
self.need_save_bool = True
self.need_learn = True
def buttonent(self):
self.testtarget += "\n"
self.ui.inin.setPlainText(self.testtarget)
self.need_learn = True
def buttoni(self):
self.testtarget += "-"
self.ui.inin.setPlainText(self.testtarget)
self.need_save_bool = True
self.need_learn = True
def buttonl(self):
self.testtarget += "/"
self.ui.inin.setPlainText(self.testtarget)
self.need_save_bool = True
self.need_learn = True
def buttoyear(self):
self.testtarget += "年"
self.ui.inin.setPlainText(self.testtarget)
self.need_save_bool = True
self.need_learn = True
def buttonmonth(self):
self.testtarget += "月"
self.ui.inin.setPlainText(self.testtarget)
self.need_save_bool = True
self.need_learn = True
def buttonday(self):
self.testtarget += "日"
self.ui.inin.setPlainText(self.testtarget)
self.need_save_bool = True
self.need_learn = True
# 色彩模式按钮
def radioButtoncolor_check(self):
self.img_colour_mod = 4
self.ui.widget_6.show()
self.ui.widget_5.hide()
self.ui.widget_3.hide()
self.ui.widget_4.hide()
self.ui.pushButtoncleanlearn.hide()
self.radioButtonblack_check_end()
self.need_save_bool = True
def radioButtongray_check(self):
self.img_colour_mod = 1
self.ui.widget_3.show()
self.ui.widget_5.hide()
self.ui.widget_6.hide()
self.ui.pushButtoncleanlearn.hide()
self.radioButtonblack_check_end()
if self.img_gray_mod == 0:
self.ui.widget_4.hide()
else:
self.ui.widget_4.show()
self.need_save_bool = True
def radioButtonmould_check(self):
self.img_colour_mod = 3
self.ui.widget_4.show()
self.ui.widget_3.hide()
self.ui.widget_6.hide()
self.ui.widget_5.show()
self.ui.pushButtoncleanlearn.show()
self.radioButtonblack_check_end()
self.need_save_bool = True
def radioButtonblack_check(self):
self.img_colour_mod = 2
self.ui.widget_4.show()
self.ui.widget_3.hide()
self.ui.widget_6.hide()
self.ui.widget_5.show()
self.ui.pushButtoncleanlearn.hide()
self.radioButtonblack_check_end()
self.need_save_bool = True
def radioButtonblack_check_end(self):
if self.img_colour_mod != 1:
self.ui.radioButtongray.setChecked(False)
if self.img_colour_mod != 3:
self.ui.radioButtonmould.setChecked(False)
if self.img_colour_mod != 2:
self.ui.radioButtonblack.setChecked(False)
if self.img_colour_mod != 4:
self.ui.radioButtoncolor.setChecked(False)
if self.img_black_mod == 1:
self.ui.widget_7.hide()
self.ui.widget_8.hide()
elif self.img_black_mod == 2:
self.ui.widget_7.show()
self.ui.widget_8.hide()
elif self.img_black_mod == 3:
self.ui.widget_8.show()
self.ui.widget_7.hide()
# 直方图按钮
def radioButton_his_check(self):
self.img_gray_mod = 1
self.radioButtonhis_check_end()
def radioButton_hsv_check(self):
self.img_gray_mod = 4
self.radioButtonhis_check_end()
def radioButton_hisauto_check(self):
self.img_gray_mod = 2
self.radioButtonhis_check_end()
def radioButton_hisall_check(self):
self.img_gray_mod = 3
self.radioButtonhis_check_end()
def radioButtonhis_check_end(self):
"""
直方图数据处理
"""
self.ui.radioButton_hisauto.setChecked(False)
self.ui.radioButton_hisall.setChecked(False)
self.ui.radioButton_his.setChecked(False)
self.ui.radioButton_hsv.setChecked(False)
if self.img_gray_mod == 1:
self.ui.radioButton_his.setChecked(True)
elif self.img_gray_mod == 2:
self.ui.radioButton_hisauto.setChecked(True)
elif self.img_gray_mod == 3:
self.ui.radioButton_hisall.setChecked(True)
elif self.img_gray_mod == 4:
self.ui.radioButton_hsv.setChecked(True)
if self.img_gray_mod == 0:
self.ui.widget_4.hide()
else:
self.ui.widget_4.show()
self.need_save_bool = True
self.need_learn = True
# 滤波按钮
def radioButton_check_end(self):
self.img_blus_mod = '0'
if self.ui.radioButtongauss.isChecked():
self.img_blus_mod += '1'
if self.ui.radioButtoneven.isChecked():
self.img_blus_mod += '2'
if self.ui.radioButtonopen.isChecked():
self.img_blus_mod += '3'
if self.ui.radioButtonclose.isChecked():
self.img_blus_mod += '4'
self.need_save_bool = True
def pc_open1(self):
if self.opentime > 0:
self.opentime -= 1
self.ui.label_open.setText(str(self.opentime))
self.need_save_bool = True
self.need_learn = True
def pc_open2(self):
if self.opentime < 5:
self.opentime += 1
self.ui.label_open.setText(str(self.opentime))
self.need_save_bool = True
self.need_learn = True
def pc_close1(self):
if self.closetime > 0:
self.closetime -= 1
self.ui.label_close.setText(str(self.closetime))
self.need_save_bool = True
self.need_learn = True
def pc_close2(self):
if self.closetime < 5:
self.closetime += 1
self.ui.label_close.setText(str(self.closetime))
self.need_save_bool = True
self.need_learn = True
def pc_close(self):
"""
关机
"""
self.scvechange()
# 没有测试过的代码
# path = os.getcwd()
# cmd1 = f'C:\Windows\System32\schtasks /create /tn "My App" /tr {path}\开始程序.exe /sc onlogon'
# cmd2 = 'C:\Windows\System32\schtasks /Query /tn "My App"'
# p = subprocess.run(cmd2, capture_output=True, shell=True, encoding="gbk")
# if len(p.stderr)!=0:
# p = subprocess.run(cmd1, capture_output=True, shell=True, encoding="gbk")
subprocess.run('C:\Windows\System32\shutdown -s -t 0')
sys.exit()
# 容错率调整
def add(self):
if self.maxbad < 10:
self.maxbad += 1
self.ui.label_bad.setText(str(self.maxbad))
self.need_save_bool = True
def dec(self):
if self.maxbad > 0:
self.maxbad -= 1
self.ui.label_bad.setText(str(self.maxbad))
self.need_save_bool = True
def Buttoncn_check(self):
if self.usecn == 1:
self.usecn = 0
else:
self.usecn = 1
if self.usecn == 1:
self.ui.Buttoncn.setText('禁用\n中文')
else:
self.ui.Buttoncn.setText('使用\n中文')
self.need_save_bool = True
self.need_learn = True
def Buttoncn_2_check(self):
if self.usechar == 1:
self.usechar = 0
else:
self.usechar = 1
if self.usechar == 1:
self.ui.Buttoncn_2.setText('禁用\n符号')
else:
self.ui.Buttoncn_2.setText('使用\n符号')
self.need_save_bool = True
self.need_learn = True
# tab切换
def butmain(self):
self.ui.tabWidget.setCurrentIndex(0)
# tab切换
def buthelp(self):
self.ui.tabWidget.setCurrentIndex(1)
# 滤波
def blur(self, img):
kernel = np.ones((3, 3), np.uint8)
if re.sub("[^2]", "", self.img_blus_mod) == "2":
img = cv2.blur(img, (3, 3), 0)
if re.sub("[^1]", "", self.img_blus_mod) == "1":
img = cv2.GaussianBlur(img, (3, 3), 0)
if re.sub("[^3]", "", self.img_blus_mod) == "3":
img = cv2.erode(img, kernel, iterations=self.opentime)
img = cv2.dilate(img, kernel, iterations=self.closetime)
if re.sub("[^4]", "", self.img_blus_mod) == "4":
img = cv2.dilate(img, kernel, iterations=self.closetime)
img = cv2.erode(img, kernel, iterations=self.opentime)
return img
def plcdo(self):
while 1:
if self.checking != True and self.need_check:
self.need_check = False
self.checking = True
t = time.time()
if self.need_save_bool:
self.scvechange()
if self.img_colour_mod == 3:
self.match_learn()
else:
self.ocr()
else:
if self.img_colour_mod == 3:
self.match()
else:
self.ocr()
self.com_out_time = int((time.time() - t) * 1000) # 检测时间
self.need_freshen = True
self.need_com_freshen = True
self.checking = False
time.sleep(0.0001)
# 检测触发
def do(self):
if self.checking != True:
t = time.time()
self.need_check = False
self.checking = True
if self.need_save_bool:
self.scvechange()
if self.img_colour_mod == 3:
if self.need_learn:
self.match_learn()
else:
self.match()
else:
self.ocr()
self.com_out_time = int((time.time() - t) * 1000) # 检测时间
self.need_freshen = True
self.need_com_freshen = True
self.checking = False
# 模板学习
def match_learn(self):
if self.testtarget == '':
self.ui.radioButtonmould.setChecked(False)
self.ocr()
self.com_out_ans_bool = 4
self.com_out_bad = 0
self.save = f'{str(time.strftime("%H:%M:%S", time.localtime()))} 学习失败 未设置检测目标\n'
self.com_out_fullans = '模板匹配模式'
self.com_out_optimize = '请设置检测目标'
else:
t0 = time.time()
self.midimg = self.img_processing()
result = self.ppocr.ocr(self.midimg, det=True, rec=True, cls=False)
text, real = self.data_processing(result)
self.com_out_mark = int(real * 100) # 输出的置信度
self.save = str(time.strftime("%H:%M:%S", time.localtime())) # 历史记录
z = self.testtarget.split() # 识别对比参数
outplc = ''
outpc = ''
outlen = 0
znum = 0
bada = 0
badb = 0
result = {}
if len(text) == len(z):
for line in text:
outplc += f'{line[1]} '
outlen += len(line[1])
outpc += f'{line[1]}\n'
if line[1] != z[znum]:
for x in line[1]:
result[x] = result.get(x, 0) + 1
for x in z[znum]:
result[x] = result.get(x, 0) - 1
znum += 1
for x in result:
if result[x] < 0:
bada -= result[x]
if result[x] > 0:
badb += result[x]
if bada < badb: bada = badb
if bada == 0 and self.com_out_mark >= 95:
self.com_out_ans_bool = 3
self.com_out_bad = bada
self.cut_img(text)
self.save += f' 学习完成 学习时间:{(time.time() - t0) * 1000:.0f}ms 置信度:{self.com_out_mark}%\n'
self.com_out_fullans = outpc
self.com_out_ans = outplc
self.com_out_ans_len = outlen
else:
self.com_out_ans_bool = 4
self.com_out_bad = bada
self.cut_img(text)
self.save += f' 学习失败 学习时间:{(time.time() - t0) * 1000:.0f}ms 不良字符:{bada} 置信度:{self.com_out_mark}%\n'
self.com_out_fullans = outpc
self.com_out_ans = outplc
self.com_out_ans_len = outlen
else:
if len(text) < len(z):
for line in z:
for x in line:
result[x] = result.get(x, 0) - 1
for line in text:
outplc += f'{line[1]} '
outlen += len(line[1])
outpc += f'{line[1]}\n'
for x in line[1]:
result[x] = result.get(x, 0) + 1
for x in result:
if result[x] < 0:
bada -= result[x]
else:
badb += result[x]
if bada < badb: bada = badb
if bada == 0 and self.com_out_mark >= 95:
self.com_out_ans_bool = 3
self.com_out_bad = bada
self.cut_img(text)
self.save += f' 学习成功 学习时间:{(time.time() - t0) * 1000:.0f}ms 置信度:{self.com_out_mark}%\n'
self.com_out_fullans = outpc
self.com_out_ans = outplc
self.com_out_ans_len = outlen
else:
self.com_out_ans_bool = 4
self.com_out_bad = bada
self.cut_img(text)
self.save += f' 学习失败 检测时间:{(time.time() - t0) * 1000:.0f}ms 不良字符:{bada} 置信度:{self.com_out_mark}%\n'
self.com_out_fullans = outpc
self.com_out_ans = outplc
self.com_out_ans_len = outlen
if len(text) > len(z):
text2 = text.copy()
for line in z:
i = 0
stri = ""
numi = 0
num = 0
for line2 in text2:
s = difflib.SequenceMatcher(None, line, line2[1]).ratio()
if s >= i:
i = s
stri = line2
num = numi
numi += 1
if i == 1.0:
del text2[num]
else:
for x in line:
result[x] = result.get(x, 0) + 1
for x in stri[1]:
result[x] = result.get(x, 0) - 1
del text2[num]
for list in text2:
l = list[1]
m = 0
for list2 in text:
if l == list2[1]:
del text[m]
m += 1
mark = 0
for list in text:
outplc += f'{list[1]} '
outlen += len(list[1])
outpc += f'{list[1]}\n'
mark += list[2]
if len(text) != 0:
mark = mark / len(text)
else:
mark = 0
for x in result:
if result[x] < 0:
bada -= result[x]
if result[x] > 0:
badb += result[x]
self.com_out_mark = int(mark * 100) # 输出的置信度
if bada < badb: bada = badb
if bada == 0 and self.com_out_mark >= 85:
self.com_out_ans_bool = 3
self.com_out_bad = bada
self.cut_img(text)
self.save += f' 学习成功 学习时间:{(time.time() - t0) * 1000:.0f}ms 不良字符:{bada} 置信度:{self.com_out_mark}%\n'
self.com_out_fullans = outpc
self.com_out_ans = outplc
self.com_out_ans_len = outlen
else:
self.com_out_ans_bool = 4
self.com_out_bad = bada
self.cut_img(text)
self.save += f' 学习失败 学习时间:{(time.time() - t0) * 1000:.0f}ms 不良字符:{bada} 置信度:{self.com_out_mark}%\n'
self.com_out_fullans = outpc
self.com_out_ans = outplc
self.com_out_ans_len = outlen
boxes = [line[0] for line in text]
endimg = draw_ocr(self.midimg, boxes)
showend = QtGui.QImage(endimg.data, endimg.shape[1], endimg.shape[0],
QtGui.QImage.Format.Format_RGB888)
self.ui.label_22.setPixmap(QtGui.QPixmap.fromImage(showend))
def cut_img(self, text):
if self.com_out_ans_bool == 3:
self.com_out_optimize = '无'
self.need_learn = False
if self.img_colour_mod == 3:
self.img_mod = []
num = 0
for line in text:
img = Img.cut(self.midimg, line[0])
img = Img.Intelligent_cut(img)
self.img_mod.append(img)
for img in self.img_mod:
cv2.imwrite(f'data/mod/img/{num}.jpg', img)
num += 1
elif self.com_out_ans_bool == 4:
self.com_out_optimize = '需要调整图像设置'
def thread_hash(self,num, line):
try:
img1 = self.img_mod[num]
img2 = Img.cut(self.midimg, line[0])
img3 = Img.Intelligent_cut(img2)
height1, width1 = img1.shape[:2]
img3 = cv2.resize(img3, (width1, height1))
hash1 = Img.aHash(img1)
hash2 = Img.aHash(img3)
real = Img.cmpHash(hash1, hash2)
self.hashmark[num] = int(real * 100)
self.hashans[num] = 0
except:
self.hashmark[num] = 0
self.hashans[num] = 0
def match(self):
"""
模板匹配
"""
t0 = time.time()
self.midimg = self.img_processing()
result = self.ppocr.ocr(self.midimg, det=True, rec=True, cls=False)
text, real = self.data_processing(result)
self.save = str(time.strftime("%H:%M:%S", time.localtime())) # 历史记录
z = self.testtarget.split() # 识别对比参数
outplc = '';
outpc = '模板匹配模式';
outlen = 0;
result = {}
if len(text) == len(z):
for line in text:
outplc += f'{line[1]} '
outlen += len(line[1])
else:
if len(text) < len(z):
for line in text:
outplc += f'{line[1]} '
outlen += len(line[1])
if len(text) > len(z):
text2 = text.copy()
for line in z:
i = 0
stri = ""
numi = 0
num = 0
for line2 in text2:
s = difflib.SequenceMatcher(None, line, line2[1]).ratio()
if s >= i:
i = s
stri = line2
num = numi
numi += 1
if i == 1.0:
del text2[num]
else:
for x in line:
result[x] = result.get(x, 0) + 1
for x in stri[1]:
result[x] = result.get(x, 0) - 1
del text2[num]
for list in text2:
l = list[1]
m = 0
for list2 in text:
if l == list2[1]:
del text[m]
m += 1
for list in text:
outplc += f'{list[1]} '
outlen += len(list[1])
num = 0
minmark = 100
mark=0
for line in text:
self.hashans[num] = 1
self.time_main = threading.Thread(target=self.thread_hash(num ,line)) # 创建对比线程
self.time_main.start()
num += 1
while self.hashans != [0, 0, 0, 0, 0, 0, 0]:
time.sleep(0.001)
for i in range(num+1):
if mark ==0 :
mark = self.hashmark[i]
else:
mark+=self.hashmark[i]
mark=int(mark/num)
minmark = min(self.hashmark[0:num])
self.com_out_mark = mark
if minmark >= self.maxmark:
self.com_out_ans_bool = 1
self.com_out_optimize = '无'
self.save += f' 合格 检测时间:{(time.time() - t0) * 1000:.0f}ms 置信度:{self.com_out_mark}% 最低匹配度:{int(minmark)}%\n'
self.com_out_fullans = outpc
self.com_out_ans = outplc
self.com_out_ans_len = outlen
self.com_out_optimize = '无'
if self.bad_mark > 0:
if self.bad_mark < 0.8:
self.bad_mark = 0
else:
self.bad_mark *= 0.98
else:
self.com_out_ans_bool = 2
self.com_out_optimize = '无'
self.save += f' 不良 检测时间:{(time.time() - t0) * 1000:.0f}ms 置信度:{self.com_out_mark}% 最低匹配度:{int(minmark)}%\n'
self.com_out_fullans = outpc
self.com_out_ans = outplc
self.com_out_ans_len = outlen
self.bad_mark += 1
if self.badnum[4] == 1:
self.com_out_optimize = '建议重新匹配模板'
else:
self.com_out_optimize = '无'
boxes = [line[0] for line in text]
endimg = draw_ocr(self.midimg, boxes)
showend = QtGui.QImage(endimg.data, endimg.shape[1], endimg.shape[0],
QtGui.QImage.Format.Format_RGB888)
self.ui.label_22.setPixmap(QtGui.QPixmap.fromImage(showend))
def data_processing(self, result):
"""
# 数据处理
:param result:
:return: test,real
"""
# 提取过滤结果
text = [] # 结果存放列表
real = 0.0 # 置信度
for line in result:
y = []
if self.usechar == 1 and self.usecn == 1:
x = re.sub("[^-年月日/.:0-9]", "", line[1][0])
elif self.usecn == 1:
x = re.sub("[^年月日0-9]", "", line[1][0])
elif self.usechar == 1:
x = re.sub("[^-/.:0-9]", "", line[1][0])
else:
x = re.sub("[^0-9]", "", line[1][0])
if x != "":
y.append(line[0])
y.append(x)
y.append(line[1][1])
real += y[2]
text.append(y)
if real != 0:
real /= len(text)
return text, real
def img_HSV(self, img):
"""
彩图HSV自适应归一化
:param img:
:return: img
"""
hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
channels = cv2.split(hsv)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
clahe.apply(channels[2], channels[2])
cv2.merge(channels, hsv)
img = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
return img
def img_processing(self):
"""
图像处理
:return: img
"""
if self.img_colour_mod == 1: # 灰度图
if self.img_gray_mod == 1:
# img0 = self.img_HSV(self.show)
img0 = cv2.cvtColor(self.show, cv2.COLOR_RGB2GRAY)
min, max = cv2.minMaxLoc(img0)[:2]
Omin, Omax = 0, 255
a = float(Omax - Omin) / (max - min)
b = Omin - a * min
out = a * img0 + b
img1 = out.astype(np.uint8)
elif self.img_gray_mod == 2:
# img0 = self.img_HSV(self.show)
img0 = cv2.cvtColor(self.show, cv2.COLOR_RGB2GRAY)
# 限制对比度的自适应阈值均衡化
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
img1 = clahe.apply(img0)
elif self.img_gray_mod == 3:
# img0 = self.img_HSV(self.show)
img0 = cv2.cvtColor(self.show, cv2.COLOR_RGB2GRAY)
img1 = cv2.equalizeHist(img0)
elif self.img_gray_mod == 0:
img0 = cv2.cvtColor(self.show, cv2.COLOR_RGB2GRAY)
img1 = img0
elif self.img_gray_mod == 4:
hsv = cv2.cvtColor(self.show, cv2.COLOR_BGR2HSV)
channels = cv2.split(hsv)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
clahe.apply(channels[2], channels[2])
cv2.merge(channels, hsv)
img1 = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
img1 = self.blur(img1)
img9 = cv2.cvtColor(img1, cv2.COLOR_GRAY2RGB)
elif self.img_colour_mod == 4: # 彩图
img9 = cv2.cvtColor(self.show, cv2.COLOR_BGR2RGB)
if self.img_colour2_mod == 1:
img9 = self.img_HSV(img9)
else: # 黑白图
img0 = self.img_HSV(self.show)
if self.img_black_mod == 1:
img0 = cv2.cvtColor(img0, cv2.COLOR_RGB2GRAY)
ret, img1 = cv2.threshold(img0, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
elif self.img_black_mod == 2:
img0 = cv2.cvtColor(img0, cv2.COLOR_RGB2GRAY)
img1 = cv2.adaptiveThreshold(img0, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, self.jdtime,
self.lbtime)
elif self.img_black_mod == 3:
img1 = cv2.cvtColor(img0, cv2.COLOR_HSV2RGB)
img2 = ((img1[:, :, 0] + self.jd2time * 10 >= img1[:, :, 1])
* (img1[:, :, 0] + self.jd2time * 10 >= img1[:, :, 2])
* (img1[:, :, 2] + self.jd2time * 10 >= img1[:, :, 1])
* (img1[:, :, 2] + self.jd2time * 10 >= img1[:, :, 0])
* (img1[:, :, 1] + self.jd2time * 10 >= img1[:, :, 2])
* (img1[:, :, 1] + self.jd2time * 10 >= img1[:, :, 0])
* (img1[:, :, 0] < self.lb2time * 10)
* (img1[:, :, 1] < self.lb2time * 10)
* (img1[:, :, 2] < self.lb2time * 10))
img2 = np.invert(img2)
img2.dtype = 'uint8'
img1 = img2 * 255
img1 = self.blur(img1)
img9 = cv2.cvtColor(img1, cv2.COLOR_GRAY2RGB)
return img9
def ocr(self):
t0 = time.time()
# 调用ocr
self.midimg = self.img_processing()
# self.midimg = self.show
result = self.ppocr.ocr(self.midimg, det=True, rec=True, cls=False)
text, real = self.data_processing(result)
self.com_out_mark = int(real * 100) # 输出的置信度
self.save = str(time.strftime("%H:%M:%S", time.localtime())) # 历史记录
# self.com_out_time = int((time.time() - t0) * 1000) # 检测时间
z = self.testtarget.split() # 识别对比参数
# 结果对比与输出统计
if self.testtarget == '':
self.com_out_bad = 0
if self.com_out_mark >= self.maxmark:
self.com_out_ans_bool = 1
self.save += f' 合格 检测时间:{(time.time() - t0) * 1000:.0f}ms 不良字符:0 置信度:{self.com_out_mark}%\n'
self.com_out_optimize = '未设置检测标准'
else:
self.com_out_ans_bool = 2
self.save += f' 不良 检测时间:{(time.time() - t0) * 1000:.0f}ms 不良字符:0 置信度:{self.com_out_mark}%\n'
self.com_out_optimize = '置信度异常'
self.com_out_fullans = ''
self.com_out_ans = ''
self.com_out_ans_len = 0
for line in text:
self.com_out_fullans += f'{line[1]}\n'
self.com_out_ans += f'{line[1]} '
self.com_out_ans_len += len(line[1])
if self.bad_mark > 0:
if self.bad_mark < 0.8:
self.bad_mark = 0
else:
self.bad_mark *= 0.98
else:
outplc = ''
outpc = ''
outlen = 0
znum = 0
bada = 0
badb = 0
result = {}
if len(text) == len(z):
for line in text:
outplc += f'{line[1]} '
outlen += len(line[1])
outpc += f'{line[1]}\n'
if line[1] != z[znum]:
for x in line[1]:
result[x] = result.get(x, 0) + 1
for x in z[znum]:
result[x] = result.get(x, 0) - 1
znum += 1
for x in result:
if result[x] < 0:
bada -= result[x]
if result[x] > 0:
badb += result[x]
if bada < badb: bada = badb
if self.maxbad >= bada and self.com_out_mark >= self.maxmark:
self.com_out_ans_bool = 1
self.com_out_bad = bada
self.save += f' 合格 检测时间:{(time.time() - t0) * 1000:.0f}ms 不良字符:{bada} 置信度:{self.com_out_mark}%\n'
self.com_out_fullans = outpc
self.com_out_ans = outplc
self.com_out_ans_len = outlen
if self.bad_mark > 0:
if self.bad_mark < 0.8:
self.bad_mark = 0
else:
self.bad_mark *= 0.98
else:
self.com_out_ans_bool = 2
self.com_out_bad = bada
self.save += f' 不良 检测时间:{(time.time() - t0) * 1000:.0f}ms 不良字符:{bada} 置信度:{self.com_out_mark}%\n'
self.com_out_fullans = outpc
self.com_out_ans = outplc
self.com_out_ans_len = outlen
self.bad_mark += 1
else:
if len(text) < len(z):
for line in z:
for x in line:
result[x] = result.get(x, 0) - 1
for line in text:
outplc += f'{line[1]} '
outlen += len(line[1])
outpc += f'{line[1]}\n'
for x in line[1]:
result[x] = result.get(x, 0) + 1
for x in result:
if result[x] < 0:
bada -= result[x]
else:
badb += result[x]
if bada < badb: bada = badb
if self.maxbad >= bada and self.com_out_mark >= self.maxmark:
self.com_out_ans_bool = 1
self.com_out_bad = bada
self.save += f' 合格 检测时间:{(time.time() - t0) * 1000:.0f}ms 不良字符:{bada} 置信度:{self.com_out_mark}%\n'
self.com_out_fullans = outpc
self.com_out_ans = outplc
self.com_out_ans_len = outlen
if self.bad_mark > 0:
if self.bad_mark < 0.8:
self.bad_mark = 0
else:
self.bad_mark *= 0.98
else:
self.com_out_ans_bool = 2
self.com_out_bad = bada
self.save += f' 不良 检测时间:{(time.time() - t0) * 1000:.0f}ms 不良字符:{bada} 置信度:{self.com_out_mark}%\n'
self.com_out_fullans = outpc
self.com_out_ans = outplc
self.com_out_ans_len = outlen
self.bad_mark += 1
if len(text) > len(z):
text2 = text.copy()
for line in z:
i = 0
stri = ""
numi = 0
num = 0
for line2 in text2:
s = difflib.SequenceMatcher(None, line, line2[1]).ratio()
if s >= i:
i = s
stri = line2
num = numi
numi += 1
if i == 1.0:
del text2[num]
else:
for x in line:
result[x] = result.get(x, 0) + 1
for x in stri[1]:
result[x] = result.get(x, 0) - 1
del text2[num]
for list in text2:
l = list[1]
m = 0
for list2 in text:
if l == list2[1]:
del text[m]
m += 1
mark = 0
for list in text:
outplc += f'{list[1]} '
outlen += len(list[1])
outpc += f'{list[1]}\n'
mark += list[2]
mark = mark / len(text)
for x in result:
if result[x] < 0:
bada -= result[x]
if result[x] > 0:
badb += result[x]
self.com_out_mark = int(mark * 100) # 输出的置信度
if bada < badb: bada = badb
if self.maxbad >= bada and self.com_out_mark >= self.maxmark:
self.com_out_ans_bool = 1
self.com_out_bad = bada
self.save += f' 合格 检测时间:{(time.time() - t0) * 1000:.0f}ms 不良字符:{bada} 置信度:{self.com_out_mark}%\n'
self.com_out_fullans = outpc
self.com_out_ans = outplc
self.com_out_ans_len = outlen
if self.bad_mark > 0:
if self.bad_mark < 0.8:
self.bad_mark = 0
else:
self.bad_mark *= 0.98
else:
self.com_out_ans_bool = 2
self.com_out_bad = bada
self.save += f' 不良 检测时间:{(time.time() - t0) * 1000:.0f}ms 不良字符:{bada} 置信度:{self.com_out_mark}%\n'
self.com_out_fullans = outpc
self.com_out_ans = outplc
self.com_out_ans_len = outlen
self.bad_mark += 1
if self.badnum[4] == 1:
if self.com_out_ans_bool == 2:
if self.com_out_bad == 0:
self.com_out_optimize = '检查打码设备'
else:
self.com_out_optimize = '检查打码或设置'
else:
if self.com_out_ans_bool == 1:
if self.com_out_bad > 0:
self.com_out_optimize = '建议减少特殊符号'
else:
self.com_out_optimize = '无'
else:
if self.com_out_bad == 0 and self.com_out_mark >= self.maxmark:
self.com_out_optimize = '无'
else:
self.com_out_optimize = '建议检查打码/设置'
boxes = [line[0] for line in text]
endimg = draw_ocr(self.midimg, boxes)
showend = QtGui.QImage(endimg.data, endimg.shape[1], endimg.shape[0],
QtGui.QImage.Format.Format_RGB888)
self.ui.label_22.setPixmap(QtGui.QPixmap.fromImage(showend))
# 输出页面
def tab_1(self):
if self.need_save_bool:
self.scvechange()
self.ui.tabWidget_2.setCurrentIndex(0)
# 检测设置
def tab_2(self):
self.ui.tabWidget_2.setCurrentIndex(1)
def freshen_interface(self):
"""
输出当前检测结果
"""
self.ui.label_ans_time.setText(str(self.com_out_time) + 'ms')
if self.com_out_time > 60:
self.ui.label_ans_time.setStyleSheet('color: rgb(255, 100, 0);')
else:
self.ui.label_ans_time.setStyleSheet('')
self.ui.label_ans_optimize.setText(self.com_out_optimize)
if self.com_out_optimize == '无':
self.ui.label_ans_optimize.setStyleSheet('')
elif re.sub("[^建议]", "", self.com_out_optimize) == '建议':
self.ui.label_ans_optimize.setStyleSheet('color: rgb(255, 100, 0);')
else:
self.ui.label_ans_optimize.setStyleSheet('color: rgb(255, 0, 0);')
if self.com_out_ans_bool == 1:
self.ui.label_ans_2.setText('合格')
self.ui.label_ans.setText('合格')
self.ui.label_ans.setStyleSheet('color: rgb(0, 200, 0);')
self.ui.label_ans_2.setStyleSheet('')
self.ui.label_ans_bad_num.setText(str(self.com_out_bad))
if self.com_out_bad == 0:
self.ui.label_ans_bad_num.setStyleSheet('')
else:
self.ui.label_ans_bad_num.setStyleSheet('color: rgb(255, 100, 0);')
elif self.com_out_ans_bool == 2:
self.ui.label_ans_2.setText('不良')
self.ui.label_ans.setText('不良')
self.ui.label_ans.setStyleSheet('color: rgb(255, 0, 0);')
self.ui.label_ans_2.setStyleSheet('color: rgb(255, 0, 0);')
self.ui.label_ans_bad_num.setText(str(self.com_out_bad))
if self.com_out_bad == 0:
self.ui.label_ans_bad_num.setStyleSheet('')
else:
self.ui.label_ans_bad_num.setStyleSheet('color: rgb(255, 0, 0);')
elif self.com_out_ans_bool == 3:
self.ui.label_ans_2.setText('学习成功')
self.ui.label_ans.setText('学习成功')
self.ui.label_ans.setStyleSheet('color: rgb(0, 200, 0);')
self.ui.label_ans_2.setStyleSheet('')
self.ui.label_ans_bad_num.setText(str(self.com_out_bad))
if self.com_out_bad == 0:
self.ui.label_ans_bad_num.setStyleSheet('')
else:
self.ui.label_ans_bad_num.setStyleSheet('color: rgb(255, 100, 0);')
elif self.com_out_ans_bool == 4:
self.ui.label_ans_2.setText('学习失败')
self.ui.label_ans.setText('学习失败')
self.ui.label_ans.setStyleSheet('color: rgb(255, 0, 0);')
self.ui.label_ans_2.setStyleSheet('color: rgb(255, 0, 0);')
self.ui.label_ans_bad_num.setText(str(self.com_out_bad))
if self.com_out_bad == 0:
self.ui.label_ans_bad_num.setStyleSheet('')
else:
self.ui.label_ans_bad_num.setStyleSheet('color: rgb(255, 0, 0);')
self.ui.label_ans_reliability.setText(str(self.com_out_mark) + '%')
if self.com_out_mark < 80:
self.ui.label_ans_reliability.setStyleSheet('color: rgb(255, 100, 0);')
else:
self.ui.label_ans_reliability.setStyleSheet('')
self.ui.label_ans_str.setText(self.com_out_fullans)
if len(self.historylist) < 15:
self.historylist.append(self.save)
else:
del (self.historylist[0])
self.historylist.append(self.save)
s = ""
for i in self.historylist:
s += f'{i}'
self.ui.label_ans_his.setPlainText(s)
def scvechange(self):
"""
保存设置
"""
if self.need_learn:
bool_learn = 1
else:
bool_learn = 0
f = open('data/data', 'w')
a = f'{self.img_colour_mod}\n{self.img_gray_mod}\n{self.img_black_mod}\n{self.img_colour2_mod}\n{self.img_blus_mod}\n{self.maxbad}' \
f'\n{self.maxmark}\n{self.opentime}\n{self.closetime}\n{self.jdtime}\n{self.lbtime}\n{self.jd2time}\n{self.lb2time}\n{self.usecn}' \
f'\n{self.usechar}\n{bool_learn}'
f.write(a)
l = open('data/data2', 'w')
a = self.testtarget
l.write(a)
self.need_save_bool = False
def startset(self):
"""
初始化设置参数与激活
"""
f = open('data/data', 'r')
lines = f.readlines() # 读取全部内容 ,并以列表方式返回
self.img_colour_mod = int(lines[0].strip('\n'))
self.img_gray_mod = int(lines[1].strip('\n'))
self.img_black_mod = int(lines[2].strip('\n'))
self.img_colour2_mod = int(lines[3].strip('\n'))
self.img_blus_mod = lines[4].strip('\n')
self.maxbad = int(lines[5].strip('\n'))
self.maxmark = int(lines[6].strip('\n'))
self.opentime = int(lines[7].strip('\n'))
self.ui.label_open.setText(str(self.opentime))
self.closetime = int(lines[8].strip('\n'))
self.ui.label_close.setText(str(self.closetime))
self.jdtime = int(lines[9].strip('\n'))
self.ui.label_jd.setText(str(self.jdtime))
self.lbtime = int(lines[10].strip('\n'))
self.ui.label_lb.setText(str(self.lbtime))
self.jd2time = int(lines[11].strip('\n'))
self.ui.label_jd_2.setText(str(self.jd2time))
self.lb2time = int(lines[12].strip('\n'))
self.ui.label_lb_2.setText(str(self.lb2time))
self.usecn = int(lines[13].strip('\n'))
if self.usecn == 1:
self.ui.Buttoncn.setText('禁用\n中文')
else:
self.ui.Buttoncn.setText('使用\n中文')
self.usechar = int(lines[14].strip('\n'))
if self.usechar == 1:
self.ui.Buttoncn_2.setText('禁用\n符号')
else:
self.ui.Buttoncn_2.setText('使用\n符号')
bool_learn = int(lines[15].strip('\n'))
if bool_learn == 1:
self.need_learn = True
if self.img_colour_mod == 1:
self.ui.radioButtongray.setChecked(True)
elif self.img_colour_mod == 2:
self.ui.radioButtonblack.setChecked(True)
elif self.img_colour_mod == 3:
self.ui.radioButtonmould.setChecked(True)
elif self.img_colour_mod == 4:
self.ui.radioButtoncolor.setChecked(True)
if self.img_gray_mod == 1:
self.ui.radioButton_his.setChecked(True)
elif self.img_gray_mod == 2:
self.ui.radioButton_hisauto.setChecked(True)
elif self.img_gray_mod == 3:
self.ui.radioButton_hisall.setChecked(True)
elif self.img_gray_mod == 4:
self.ui.radioButton_hsv.setChecked(True)
if self.img_black_mod == 1:
self.ui.radioButton_otus.setChecked(True)
self.ui.widget_7.hide()
self.ui.widget_8.hide()
elif self.img_black_mod == 2:
self.ui.radioButton_mean.setChecked(True)
self.ui.widget_7.show()
self.ui.widget_8.hide()
elif self.img_black_mod == 3:
self.ui.radioButton_200.setChecked(True)
self.ui.widget_8.show()
self.ui.widget_7.hide()
if re.sub("[^1]", "", self.img_blus_mod) != "":
self.ui.radioButtongauss.setChecked(True)
if re.sub("[^2]", "", self.img_blus_mod) != "":
self.ui.radioButtoneven.setChecked(True)
if re.sub("[^3]", "", self.img_blus_mod) != "":
self.ui.radioButtonopen.setChecked(True)
if re.sub("[^4]", "", self.img_blus_mod) != "":
self.ui.radioButtonclose.setChecked(True)
if self.img_colour_mod == 3:
self.ui.pushButtoncleanlearn.show()
else:
self.ui.pushButtoncleanlearn.hide()
if self.img_colour_mod == 1:
self.ui.widget_3.show()
self.ui.widget_5.hide()
self.ui.widget_6.hide()
if self.img_gray_mod == 0:
self.ui.widget_4.hide()
else:
self.ui.widget_4.show()
elif self.img_colour_mod == 4:
self.ui.widget_6.show()
self.ui.widget_5.hide()
self.ui.widget_3.hide()
else:
self.ui.widget_4.show()
self.ui.widget_5.show()
self.ui.widget_6.hide()
self.ui.widget_3.hide()
if self.img_colour2_mod == 1:
self.ui.radioButton_colorhis.setChecked(True)
self.ui.label_bad_4.setText(str(self.maxmark))
self.ui.label_bad.setText(str(self.maxbad))
l = open('data/data2', encoding="gb2312", mode='r')
file = l.read()
self.testtarget = file
self.ui.inin.setPlainText(self.testtarget)
for num in range(len(self.testtarget.split())):
i = cv2.imread(f'data/mod/img/{num}.jpg')
self.img_mod.append(i)
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
window = MainWindow()
window.showFullScreen()
app.exec()
|
threading_simpleargs.py
|
#!/usr/bin/env python
# encoding: UTF-8
import threading
def worker(num):
print'Worker:%s'%num
return
threads=[]
for i in range(5):
t=threading.Thread(target=worker,args=(i,))
threads.append(t)
t.start()
|
Missile.py
|
import time
from Mechanics import *
from Zone import Zone
from Projectile import Projectile
class Missile(Projectile):
# how often aim-updaing function to run
def __init__(self, img, x, y,
distance=0,
rotation_speed=0.0,
max_speed=0,
damage=0,
acceleration=0,
hit_range=0,
expl_params=None,):
super().__init__(img, x, y,
distance=distance,
max_speed=max_speed,
damage=damage,
)
self.compute_tempo = 5
self.compute_count = 0
self.d_ang = rotation_speed
self.d_speed = acceleration
self.max_speed = max_speed
self.hit_range = hit_range
self.expl_params = expl_params
self.hp = damage
self.mod_speed = 0
self.dist_prev = 500
self.dist = None
State.missiles.add(self)
State.projectiles.remove(self)
self.aim = self.lock_closest()
self.expAnimation = None
def rotate_to_aim(self):
aim_dir = self.get_aim_dir(self.aim)
x = (self.look_dir - aim_dir)
if abs(x) > 180:
self.rotate(self.d_ang*np.sign(x))
else:
self.rotate(-self.d_ang*np.sign(x))
def lock_closest(self):
arr = []
for x in State.asteroids:
arr.append(self.get_distance(x))
if len(arr) > 0:
return State.asteroids.sprites()[arr.index(min(arr))]
else:
return None
def pursue(self):
r = copy.copy(self.rect)
# create engine particles
FX_Track(particle, r, 40, look_dir=random.randint(0,358),
fading=(20,16), enlarging=(20,16),
color=(200,200,200,random.randint(40,130)),
speed=[random.uniform(-0.5,0.5), random.uniform(-0.5,0.5)])
brightness = max(0.0, random.gauss(0.5, 0.2))
FX_Glow(r, 1, int(20 * brightness), int(20 * brightness), (255, 200, 125, int(brightness * 10)))
self.rotate_to_aim()
self.mod_speed += self.d_speed
# If missile is close enough to aim but fails to hit it (starts to get
# further from aim), missile will detonate.
self.dist = self.get_distance(self.aim)
if self.dist > self.dist_prev and self.dist < self.hit_range:
self.blow_up()
return
self.dist_prev = self.dist
a1 = self.speed[0] + self.d_speed*np.cos(np.deg2rad(self.look_dir-90))
if a1 >= self.max_speed or a1 <= -self.max_speed:
a1 = self.max_speed*np.cos(np.deg2rad(self.look_dir-90))
a2 = self.speed[1] + self.d_speed*np.sin(np.deg2rad(self.look_dir-90))
if a2 >= self.max_speed or a2 <= -self.max_speed:
a2 = self.max_speed*np.sin(np.deg2rad(self.look_dir-90))
self.speed = (a1, a2)
def update(self):
if self.aim in State.asteroids:
self.pursue()
else:
self.aim = self.lock_closest()
self.compute_count += 1
if self.compute_count > self.compute_tempo:
self.compute_count = 0
self.aim = self.lock_closest()
def blow_up(self):
x = Zone(self.rect.x, self.rect.y, self.hit_range, self.hp, 2)
prm_hash = dict_hash(self.expl_params)
if self.expAnimation:
explAnimation = self.expAnimation
elif prm_hash in State.buff_explosions:
explAnimation = random.choice(State.buff_explosions[prm_hash])
else:
while not prm_hash in State.buff_explosions:
time.sleep(0.1)
explAnimation = random.choice(State.buff_explosions[prm_hash])
Animation.FX_explosion(self.rect.centerx, self.rect.centery,
xpl=explAnimation, radius=(self.hit_range*3,self.hit_range*3), randdir=False)
State.hit_waves.add(x)
State.time_dependent.add(x)
self.kill()
@staticmethod
def shot(self, direction, missile):
def _delayed_action():
skipped_len = self.rect.height // 2
for _ in range(State.missile_types[missile]['volley']):
# don't shoot if the launches is dead
if self.hp <= 0:
return
shot = Missile(State.missile_types[missile]['image'],
self.rect.centerx,
self.rect.centery,
damage=State.missile_types[missile]['damage'],
distance=State.missile_types[missile]['distance'],
max_speed=State.missile_types[missile]['speed'],
acceleration=State.missile_types[missile]['acceleration'],
rotation_speed=State.missile_types[missile]['rotation_speed'],
hit_range=State.missile_types[missile]['hit_range'],
expl_params=State.missile_types[missile]['expl_params'],
)
if direction:
shot.look_dir = direction
else:
shot.look_dir = self.look_dir
shot.rect.centerx = (self.rect.centerx
- skipped_len * np.cos(np.deg2rad(shot.look_dir
+ 90)))
shot.rect.centery = (self.rect.centery
- skipped_len * np.sin(np.deg2rad(shot.look_dir
+ 90)))
shot.speed = [State.missile_types[missile]['speed']
* np.cos(np.deg2rad(self.look_dir - 90)),
State.missile_types[missile]['speed']
* np.sin(np.deg2rad(self.look_dir - 90))]
shot.rotate(0)
# delay between shots
time.sleep(0.2)
# fire missiles in thread so they can be fired with a delay
threading.Thread(target=_delayed_action).start()
|
utils.py
|
# Copyright (c) 2013 Ondrej Kipila <ok100 at openmailbox dot org>
# This work is free. You can redistribute it and/or modify it under the
# terms of the Do What The Fuck You Want To Public License, Version 2,
# as published by Sam Hocevar. See the COPYING file for more details.
"""Common functions used across the whole package."""
import socket
import subprocess as sp
from threading import Thread
from psutil import process_iter
def check_output(command):
"""Return an output of the given command."""
try:
return sp.check_output(command, shell=True, stderr=sp.DEVNULL).decode()
except sp.CalledProcessError:
return ''
def process_fifo(file, command):
"""Send a command to the given fifo.
Keyword arguments:
file -- the path to the fifo file
command -- the command without newline character at the end
"""
with open(file, 'w') as f:
f.write(command + '\n')
def process_socket(sock, command):
"""Send a command to the given socket.
Keyword arguments:
file -- the path to the socket
command -- the command without newline character at the end
"""
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.connect(sock)
s.send((command + '\n').encode())
def running(process_name):
"""Return True if the given process is running, otherwise return False.
Keyword arguments:
process_name -- the name of the process
"""
for p in process_iter():
if p.name == process_name:
return True
return False
def thread(target, args=()):
"""Run the given callable object in a new daemon thread.
Keyword arguments:
target -- the target object
args -- a tuple of arguments to be passed to the target object
"""
worker = Thread(target=target, args=args)
worker.daemon = True
worker.start()
|
presubmit_support.py
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Enables directory-specific presubmit checks to run at upload and/or commit.
"""
from __future__ import print_function
__version__ = '2.0.0'
# TODO(joi) Add caching where appropriate/needed. The API is designed to allow
# caching (between all different invocations of presubmit scripts for a given
# change). We should add it as our presubmit scripts start feeling slow.
import argparse
import ast # Exposed through the API.
import contextlib
import cpplint
import fnmatch # Exposed through the API.
import glob
import inspect
import itertools
import json # Exposed through the API.
import logging
import multiprocessing
import os # Somewhat exposed through the API.
import random
import re # Exposed through the API.
import signal
import six
import sys # Parts exposed through API.
import tempfile # Exposed through the API.
import threading
import time
import traceback
import unittest # Exposed through the API.
from warnings import warn
# Local imports.
import fix_encoding
import gclient_paths # Exposed through the API
import gclient_utils
import git_footers
import gerrit_util
import owners as owners_db
import owners_client
import owners_finder
import presubmit_canned_checks
import rdb_wrapper
import scm
import subprocess2 as subprocess # Exposed through the API.
if sys.version_info.major == 2:
# TODO(1009814): Expose urllib2 only through urllib_request and urllib_error
import urllib2 # Exposed through the API.
import urlparse
import urllib2 as urllib_request
import urllib2 as urllib_error
else:
import urllib.parse as urlparse
import urllib.request as urllib_request
import urllib.error as urllib_error
# Ask for feedback only once in program lifetime.
_ASKED_FOR_FEEDBACK = False
def time_time():
# Use this so that it can be mocked in tests without interfering with python
# system machinery.
return time.time()
class PresubmitFailure(Exception):
pass
class CommandData(object):
def __init__(self, name, cmd, kwargs, message, python3=False):
self.name = name
self.cmd = cmd
self.stdin = kwargs.get('stdin', None)
self.kwargs = kwargs.copy()
self.kwargs['stdout'] = subprocess.PIPE
self.kwargs['stderr'] = subprocess.STDOUT
self.kwargs['stdin'] = subprocess.PIPE
self.message = message
self.info = None
self.python3 = python3
# Adapted from
# https://github.com/google/gtest-parallel/blob/master/gtest_parallel.py#L37
#
# An object that catches SIGINT sent to the Python process and notices
# if processes passed to wait() die by SIGINT (we need to look for
# both of those cases, because pressing Ctrl+C can result in either
# the main process or one of the subprocesses getting the signal).
#
# Before a SIGINT is seen, wait(p) will simply call p.wait() and
# return the result. Once a SIGINT has been seen (in the main process
# or a subprocess, including the one the current call is waiting for),
# wait(p) will call p.terminate().
class SigintHandler(object):
sigint_returncodes = {-signal.SIGINT, # Unix
-1073741510, # Windows
}
def __init__(self):
self.__lock = threading.Lock()
self.__processes = set()
self.__got_sigint = False
self.__previous_signal = signal.signal(signal.SIGINT, self.interrupt)
def __on_sigint(self):
self.__got_sigint = True
while self.__processes:
try:
self.__processes.pop().terminate()
except OSError:
pass
def interrupt(self, signal_num, frame):
with self.__lock:
self.__on_sigint()
self.__previous_signal(signal_num, frame)
def got_sigint(self):
with self.__lock:
return self.__got_sigint
def wait(self, p, stdin):
with self.__lock:
if self.__got_sigint:
p.terminate()
self.__processes.add(p)
stdout, stderr = p.communicate(stdin)
code = p.returncode
with self.__lock:
self.__processes.discard(p)
if code in self.sigint_returncodes:
self.__on_sigint()
return stdout, stderr
sigint_handler = SigintHandler()
class Timer(object):
def __init__(self, timeout, fn):
self.completed = False
self._fn = fn
self._timer = threading.Timer(timeout, self._onTimer) if timeout else None
def __enter__(self):
if self._timer:
self._timer.start()
return self
def __exit__(self, _type, _value, _traceback):
if self._timer:
self._timer.cancel()
def _onTimer(self):
self._fn()
self.completed = True
class ThreadPool(object):
def __init__(self, pool_size=None, timeout=None):
self.timeout = timeout
self._pool_size = pool_size or multiprocessing.cpu_count()
self._messages = []
self._messages_lock = threading.Lock()
self._tests = []
self._tests_lock = threading.Lock()
self._nonparallel_tests = []
def _GetCommand(self, test):
vpython = 'vpython'
if test.python3:
vpython += '3'
if sys.platform == 'win32':
vpython += '.bat'
cmd = test.cmd
if cmd[0] == 'python':
cmd = list(cmd)
cmd[0] = vpython
elif cmd[0].endswith('.py'):
cmd = [vpython] + cmd
# On Windows, scripts on the current directory take precedence over PATH, so
# that when testing depot_tools on Windows, calling `vpython.bat` will
# execute the copy of vpython of the depot_tools under test instead of the
# one in the bot.
# As a workaround, we run the tests from the parent directory instead.
if (cmd[0] == vpython and
'cwd' in test.kwargs and
os.path.basename(test.kwargs['cwd']) == 'depot_tools'):
test.kwargs['cwd'] = os.path.dirname(test.kwargs['cwd'])
cmd[1] = os.path.join('depot_tools', cmd[1])
return cmd
def _RunWithTimeout(self, cmd, stdin, kwargs):
p = subprocess.Popen(cmd, **kwargs)
with Timer(self.timeout, p.terminate) as timer:
stdout, _ = sigint_handler.wait(p, stdin)
if timer.completed:
stdout = 'Process timed out after %ss\n%s' % (self.timeout, stdout)
return p.returncode, stdout.decode('utf-8', 'ignore');
def CallCommand(self, test):
"""Runs an external program.
This function converts invocation of .py files and invocations of 'python'
to vpython invocations.
"""
cmd = self._GetCommand(test)
try:
start = time_time()
returncode, stdout = self._RunWithTimeout(cmd, test.stdin, test.kwargs)
duration = time_time() - start
except Exception:
duration = time_time() - start
return test.message(
'%s\n%s exec failure (%4.2fs)\n%s' % (
test.name, ' '.join(cmd), duration, traceback.format_exc()))
if returncode != 0:
return test.message(
'%s\n%s (%4.2fs) failed\n%s' % (
test.name, ' '.join(cmd), duration, stdout))
if test.info:
return test.info('%s\n%s (%4.2fs)' % (test.name, ' '.join(cmd), duration))
def AddTests(self, tests, parallel=True):
if parallel:
self._tests.extend(tests)
else:
self._nonparallel_tests.extend(tests)
def RunAsync(self):
self._messages = []
def _WorkerFn():
while True:
test = None
with self._tests_lock:
if not self._tests:
break
test = self._tests.pop()
result = self.CallCommand(test)
if result:
with self._messages_lock:
self._messages.append(result)
def _StartDaemon():
t = threading.Thread(target=_WorkerFn)
t.daemon = True
t.start()
return t
while self._nonparallel_tests:
test = self._nonparallel_tests.pop()
result = self.CallCommand(test)
if result:
self._messages.append(result)
if self._tests:
threads = [_StartDaemon() for _ in range(self._pool_size)]
for worker in threads:
worker.join()
return self._messages
def normpath(path):
'''Version of os.path.normpath that also changes backward slashes to
forward slashes when not running on Windows.
'''
# This is safe to always do because the Windows version of os.path.normpath
# will replace forward slashes with backward slashes.
path = path.replace(os.sep, '/')
return os.path.normpath(path)
def _RightHandSideLinesImpl(affected_files):
"""Implements RightHandSideLines for InputApi and GclChange."""
for af in affected_files:
lines = af.ChangedContents()
for line in lines:
yield (af, line[0], line[1])
def prompt_should_continue(prompt_string):
sys.stdout.write(prompt_string)
sys.stdout.flush()
response = sys.stdin.readline().strip().lower()
return response in ('y', 'yes')
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitResult(object):
"""Base class for result objects."""
fatal = False
should_prompt = False
def __init__(self, message, items=None, long_text=''):
"""
message: A short one-line message to indicate errors.
items: A list of short strings to indicate where errors occurred.
long_text: multi-line text output, e.g. from another tool
"""
self._message = message
self._items = items or []
self._long_text = long_text.rstrip()
def handle(self):
sys.stdout.write(self._message)
sys.stdout.write('\n')
for index, item in enumerate(self._items):
sys.stdout.write(' ')
# Write separately in case it's unicode.
sys.stdout.write(str(item))
if index < len(self._items) - 1:
sys.stdout.write(' \\')
sys.stdout.write('\n')
if self._long_text:
sys.stdout.write('\n***************\n')
# Write separately in case it's unicode.
sys.stdout.write(self._long_text)
sys.stdout.write('\n***************\n')
def json_format(self):
return {
'message': self._message,
'items': [str(item) for item in self._items],
'long_text': self._long_text,
'fatal': self.fatal
}
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitError(_PresubmitResult):
"""A hard presubmit error."""
fatal = True
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitPromptWarning(_PresubmitResult):
"""An warning that prompts the user if they want to continue."""
should_prompt = True
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitNotifyResult(_PresubmitResult):
"""Just print something to the screen -- but it's not even a warning."""
pass
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _MailTextResult(_PresubmitResult):
"""A warning that should be included in the review request email."""
def __init__(self, *args, **kwargs):
super(_MailTextResult, self).__init__()
raise NotImplementedError()
class GerritAccessor(object):
"""Limited Gerrit functionality for canned presubmit checks to work.
To avoid excessive Gerrit calls, caches the results.
"""
def __init__(self, url=None, project=None, branch=None):
self.host = urlparse.urlparse(url).netloc if url else None
self.project = project
self.branch = branch
self.cache = {}
self.code_owners_enabled = None
def _FetchChangeDetail(self, issue):
# Separate function to be easily mocked in tests.
try:
return gerrit_util.GetChangeDetail(
self.host, str(issue),
['ALL_REVISIONS', 'DETAILED_LABELS', 'ALL_COMMITS'])
except gerrit_util.GerritError as e:
if e.http_status == 404:
raise Exception('Either Gerrit issue %s doesn\'t exist, or '
'no credentials to fetch issue details' % issue)
raise
def GetChangeInfo(self, issue):
"""Returns labels and all revisions (patchsets) for this issue.
The result is a dictionary according to Gerrit REST Api.
https://gerrit-review.googlesource.com/Documentation/rest-api.html
However, API isn't very clear what's inside, so see tests for example.
"""
assert issue
cache_key = int(issue)
if cache_key not in self.cache:
self.cache[cache_key] = self._FetchChangeDetail(issue)
return self.cache[cache_key]
def GetChangeDescription(self, issue, patchset=None):
"""If patchset is none, fetches current patchset."""
info = self.GetChangeInfo(issue)
# info is a reference to cache. We'll modify it here adding description to
# it to the right patchset, if it is not yet there.
# Find revision info for the patchset we want.
if patchset is not None:
for rev, rev_info in info['revisions'].items():
if str(rev_info['_number']) == str(patchset):
break
else:
raise Exception('patchset %s doesn\'t exist in issue %s' % (
patchset, issue))
else:
rev = info['current_revision']
rev_info = info['revisions'][rev]
return rev_info['commit']['message']
def GetDestRef(self, issue):
ref = self.GetChangeInfo(issue)['branch']
if not ref.startswith('refs/'):
# NOTE: it is possible to create 'refs/x' branch,
# aka 'refs/heads/refs/x'. However, this is ill-advised.
ref = 'refs/heads/%s' % ref
return ref
def _GetApproversForLabel(self, issue, label):
change_info = self.GetChangeInfo(issue)
label_info = change_info.get('labels', {}).get(label, {})
values = label_info.get('values', {}).keys()
if not values:
return []
max_value = max(int(v) for v in values)
return [v for v in label_info.get('all', [])
if v.get('value', 0) == max_value]
def IsBotCommitApproved(self, issue):
return bool(self._GetApproversForLabel(issue, 'Bot-Commit'))
def IsOwnersOverrideApproved(self, issue):
return bool(self._GetApproversForLabel(issue, 'Owners-Override'))
def GetChangeOwner(self, issue):
return self.GetChangeInfo(issue)['owner']['email']
def GetChangeReviewers(self, issue, approving_only=True):
changeinfo = self.GetChangeInfo(issue)
if approving_only:
reviewers = self._GetApproversForLabel(issue, 'Code-Review')
else:
reviewers = changeinfo.get('reviewers', {}).get('REVIEWER', [])
return [r.get('email') for r in reviewers]
def UpdateDescription(self, description, issue):
gerrit_util.SetCommitMessage(self.host, issue, description, notify='NONE')
def IsCodeOwnersEnabledOnRepo(self):
if self.code_owners_enabled is None:
self.code_owners_enabled = gerrit_util.IsCodeOwnersEnabledOnRepo(
self.host, self.project)
return self.code_owners_enabled
class OutputApi(object):
"""An instance of OutputApi gets passed to presubmit scripts so that they
can output various types of results.
"""
PresubmitResult = _PresubmitResult
PresubmitError = _PresubmitError
PresubmitPromptWarning = _PresubmitPromptWarning
PresubmitNotifyResult = _PresubmitNotifyResult
MailTextResult = _MailTextResult
def __init__(self, is_committing):
self.is_committing = is_committing
self.more_cc = []
def AppendCC(self, cc):
"""Appends a user to cc for this change."""
self.more_cc.append(cc)
def PresubmitPromptOrNotify(self, *args, **kwargs):
"""Warn the user when uploading, but only notify if committing."""
if self.is_committing:
return self.PresubmitNotifyResult(*args, **kwargs)
return self.PresubmitPromptWarning(*args, **kwargs)
class InputApi(object):
"""An instance of this object is passed to presubmit scripts so they can
know stuff about the change they're looking at.
"""
# Method could be a function
# pylint: disable=no-self-use
# File extensions that are considered source files from a style guide
# perspective. Don't modify this list from a presubmit script!
#
# Files without an extension aren't included in the list. If you want to
# filter them as source files, add r'(^|.*?[\\\/])[^.]+$' to the allow list.
# Note that ALL CAPS files are skipped in DEFAULT_FILES_TO_SKIP below.
DEFAULT_FILES_TO_CHECK = (
# C++ and friends
r'.+\.c$', r'.+\.cc$', r'.+\.cpp$', r'.+\.h$', r'.+\.m$', r'.+\.mm$',
r'.+\.inl$', r'.+\.asm$', r'.+\.hxx$', r'.+\.hpp$', r'.+\.s$', r'.+\.S$',
# Scripts
r'.+\.js$', r'.+\.py$', r'.+\.sh$', r'.+\.rb$', r'.+\.pl$', r'.+\.pm$',
# Other
r'.+\.java$', r'.+\.mk$', r'.+\.am$', r'.+\.css$', r'.+\.mojom$',
r'.+\.fidl$'
)
# Path regexp that should be excluded from being considered containing source
# files. Don't modify this list from a presubmit script!
DEFAULT_FILES_TO_SKIP = (
r'testing_support[\\\/]google_appengine[\\\/].*',
r'.*\bexperimental[\\\/].*',
# Exclude third_party/.* but NOT third_party/{WebKit,blink}
# (crbug.com/539768 and crbug.com/836555).
r'.*\bthird_party[\\\/](?!(WebKit|blink)[\\\/]).*',
# Output directories (just in case)
r'.*\bDebug[\\\/].*',
r'.*\bRelease[\\\/].*',
r'.*\bxcodebuild[\\\/].*',
r'.*\bout[\\\/].*',
# All caps files like README and LICENCE.
r'.*\b[A-Z0-9_]{2,}$',
# SCM (can happen in dual SCM configuration). (Slightly over aggressive)
r'(|.*[\\\/])\.git[\\\/].*',
r'(|.*[\\\/])\.svn[\\\/].*',
# There is no point in processing a patch file.
r'.+\.diff$',
r'.+\.patch$',
)
# TODO(https://crbug.com/1098562): Remove once no longer used
@property
def DEFAULT_WHITE_LIST(self):
return self.DEFAULT_FILES_TO_CHECK
# TODO(https://crbug.com/1098562): Remove once no longer used
@DEFAULT_WHITE_LIST.setter
def DEFAULT_WHITE_LIST(self, value):
self.DEFAULT_FILES_TO_CHECK = value
# TODO(https://crbug.com/1098562): Remove once no longer used
@property
def DEFAULT_ALLOW_LIST(self):
return self.DEFAULT_FILES_TO_CHECK
# TODO(https://crbug.com/1098562): Remove once no longer used
@DEFAULT_ALLOW_LIST.setter
def DEFAULT_ALLOW_LIST(self, value):
self.DEFAULT_FILES_TO_CHECK = value
# TODO(https://crbug.com/1098562): Remove once no longer used
@property
def DEFAULT_BLACK_LIST(self):
return self.DEFAULT_FILES_TO_SKIP
# TODO(https://crbug.com/1098562): Remove once no longer used
@DEFAULT_BLACK_LIST.setter
def DEFAULT_BLACK_LIST(self, value):
self.DEFAULT_FILES_TO_SKIP = value
# TODO(https://crbug.com/1098562): Remove once no longer used
@property
def DEFAULT_BLOCK_LIST(self):
return self.DEFAULT_FILES_TO_SKIP
# TODO(https://crbug.com/1098562): Remove once no longer used
@DEFAULT_BLOCK_LIST.setter
def DEFAULT_BLOCK_LIST(self, value):
self.DEFAULT_FILES_TO_SKIP = value
def __init__(self, change, presubmit_path, is_committing,
verbose, gerrit_obj, dry_run=None, thread_pool=None, parallel=False):
"""Builds an InputApi object.
Args:
change: A presubmit.Change object.
presubmit_path: The path to the presubmit script being processed.
is_committing: True if the change is about to be committed.
gerrit_obj: provides basic Gerrit codereview functionality.
dry_run: if true, some Checks will be skipped.
parallel: if true, all tests reported via input_api.RunTests for all
PRESUBMIT files will be run in parallel.
"""
# Version number of the presubmit_support script.
self.version = [int(x) for x in __version__.split('.')]
self.change = change
self.is_committing = is_committing
self.gerrit = gerrit_obj
self.dry_run = dry_run
self.parallel = parallel
self.thread_pool = thread_pool or ThreadPool()
# We expose various modules and functions as attributes of the input_api
# so that presubmit scripts don't have to import them.
self.ast = ast
self.basename = os.path.basename
self.cpplint = cpplint
self.fnmatch = fnmatch
self.gclient_paths = gclient_paths
# TODO(yyanagisawa): stop exposing this when python3 become default.
# Since python3's tempfile has TemporaryDirectory, we do not need this.
self.temporary_directory = gclient_utils.temporary_directory
self.glob = glob.glob
self.json = json
self.logging = logging.getLogger('PRESUBMIT')
self.os_listdir = os.listdir
self.os_path = os.path
self.os_stat = os.stat
self.os_walk = os.walk
self.re = re
self.subprocess = subprocess
self.sys = sys
self.tempfile = tempfile
self.time = time
self.unittest = unittest
if sys.version_info.major == 2:
self.urllib2 = urllib2
self.urllib_request = urllib_request
self.urllib_error = urllib_error
self.is_windows = sys.platform == 'win32'
# Set python_executable to 'vpython' in order to allow scripts in other
# repos (e.g. src.git) to automatically pick up that repo's .vpython file,
# instead of inheriting the one in depot_tools.
self.python_executable = 'vpython'
# Offer a python 3 executable for use during the migration off of python 2.
self.python3_executable = 'vpython3'
self.environ = os.environ
# InputApi.platform is the platform you're currently running on.
self.platform = sys.platform
self.cpu_count = multiprocessing.cpu_count()
# The local path of the currently-being-processed presubmit script.
self._current_presubmit_path = os.path.dirname(presubmit_path)
# We carry the canned checks so presubmit scripts can easily use them.
self.canned_checks = presubmit_canned_checks
# Temporary files we must manually remove at the end of a run.
self._named_temporary_files = []
self.owners_client = None
if self.gerrit:
self.owners_client = owners_client.GetCodeOwnersClient(
root=change.RepositoryRoot(),
upstream=change.UpstreamBranch(),
host=self.gerrit.host,
project=self.gerrit.project,
branch=self.gerrit.branch)
self.owners_db = owners_db.Database(
change.RepositoryRoot(), fopen=open, os_path=self.os_path)
self.owners_finder = owners_finder.OwnersFinder
self.verbose = verbose
self.Command = CommandData
# Replace <hash_map> and <hash_set> as headers that need to be included
# with 'base/containers/hash_tables.h' instead.
# Access to a protected member _XX of a client class
# pylint: disable=protected-access
self.cpplint._re_pattern_templates = [
(a, b, 'base/containers/hash_tables.h')
if header in ('<hash_map>', '<hash_set>') else (a, b, header)
for (a, b, header) in cpplint._re_pattern_templates
]
def SetTimeout(self, timeout):
self.thread_pool.timeout = timeout
def PresubmitLocalPath(self):
"""Returns the local path of the presubmit script currently being run.
This is useful if you don't want to hard-code absolute paths in the
presubmit script. For example, It can be used to find another file
relative to the PRESUBMIT.py script, so the whole tree can be branched and
the presubmit script still works, without editing its content.
"""
return self._current_presubmit_path
def AffectedFiles(self, include_deletes=True, file_filter=None):
"""Same as input_api.change.AffectedFiles() except only lists files
(and optionally directories) in the same directory as the current presubmit
script, or subdirectories thereof. Note that files are listed using the OS
path separator, so backslashes are used as separators on Windows.
"""
dir_with_slash = normpath('%s/' % self.PresubmitLocalPath())
if len(dir_with_slash) == 1:
dir_with_slash = ''
return list(filter(
lambda x: normpath(x.AbsoluteLocalPath()).startswith(dir_with_slash),
self.change.AffectedFiles(include_deletes, file_filter)))
def LocalPaths(self):
"""Returns local paths of input_api.AffectedFiles()."""
paths = [af.LocalPath() for af in self.AffectedFiles()]
logging.debug('LocalPaths: %s', paths)
return paths
def AbsoluteLocalPaths(self):
"""Returns absolute local paths of input_api.AffectedFiles()."""
return [af.AbsoluteLocalPath() for af in self.AffectedFiles()]
def AffectedTestableFiles(self, include_deletes=None, **kwargs):
"""Same as input_api.change.AffectedTestableFiles() except only lists files
in the same directory as the current presubmit script, or subdirectories
thereof.
"""
if include_deletes is not None:
warn('AffectedTestableFiles(include_deletes=%s)'
' is deprecated and ignored' % str(include_deletes),
category=DeprecationWarning,
stacklevel=2)
return list(filter(
lambda x: x.IsTestableFile(),
self.AffectedFiles(include_deletes=False, **kwargs)))
def AffectedTextFiles(self, include_deletes=None):
"""An alias to AffectedTestableFiles for backwards compatibility."""
return self.AffectedTestableFiles(include_deletes=include_deletes)
def FilterSourceFile(self,
affected_file,
files_to_check=None,
files_to_skip=None,
allow_list=None,
block_list=None):
"""Filters out files that aren't considered 'source file'.
If files_to_check or files_to_skip is None, InputApi.DEFAULT_FILES_TO_CHECK
and InputApi.DEFAULT_FILES_TO_SKIP is used respectively.
The lists will be compiled as regular expression and
AffectedFile.LocalPath() needs to pass both list.
Note: Copy-paste this function to suit your needs or use a lambda function.
"""
if files_to_check is None:
files_to_check = self.DEFAULT_FILES_TO_CHECK
if files_to_skip is None:
files_to_skip = self.DEFAULT_FILES_TO_SKIP
def Find(affected_file, items):
local_path = affected_file.LocalPath()
for item in items:
if self.re.match(item, local_path):
return True
return False
return (Find(affected_file, files_to_check) and
not Find(affected_file, files_to_skip))
def AffectedSourceFiles(self, source_file):
"""Filter the list of AffectedTestableFiles by the function source_file.
If source_file is None, InputApi.FilterSourceFile() is used.
"""
if not source_file:
source_file = self.FilterSourceFile
return list(filter(source_file, self.AffectedTestableFiles()))
def RightHandSideLines(self, source_file_filter=None):
"""An iterator over all text lines in 'new' version of changed files.
Only lists lines from new or modified text files in the change that are
contained by the directory of the currently executing presubmit script.
This is useful for doing line-by-line regex checks, like checking for
trailing whitespace.
Yields:
a 3 tuple:
the AffectedFile instance of the current file;
integer line number (1-based); and
the contents of the line as a string.
Note: The carriage return (LF or CR) is stripped off.
"""
files = self.AffectedSourceFiles(source_file_filter)
return _RightHandSideLinesImpl(files)
def ReadFile(self, file_item, mode='r'):
"""Reads an arbitrary file.
Deny reading anything outside the repository.
"""
if isinstance(file_item, AffectedFile):
file_item = file_item.AbsoluteLocalPath()
if not file_item.startswith(self.change.RepositoryRoot()):
raise IOError('Access outside the repository root is denied.')
return gclient_utils.FileRead(file_item, mode)
def CreateTemporaryFile(self, **kwargs):
"""Returns a named temporary file that must be removed with a call to
RemoveTemporaryFiles().
All keyword arguments are forwarded to tempfile.NamedTemporaryFile(),
except for |delete|, which is always set to False.
Presubmit checks that need to create a temporary file and pass it for
reading should use this function instead of NamedTemporaryFile(), as
Windows fails to open a file that is already open for writing.
with input_api.CreateTemporaryFile() as f:
f.write('xyz')
f.close()
input_api.subprocess.check_output(['script-that', '--reads-from',
f.name])
Note that callers of CreateTemporaryFile() should not worry about removing
any temporary file; this is done transparently by the presubmit handling
code.
"""
if 'delete' in kwargs:
# Prevent users from passing |delete|; we take care of file deletion
# ourselves and this prevents unintuitive error messages when we pass
# delete=False and 'delete' is also in kwargs.
raise TypeError('CreateTemporaryFile() does not take a "delete" '
'argument, file deletion is handled automatically by '
'the same presubmit_support code that creates InputApi '
'objects.')
temp_file = self.tempfile.NamedTemporaryFile(delete=False, **kwargs)
self._named_temporary_files.append(temp_file.name)
return temp_file
@property
def tbr(self):
"""Returns if a change is TBR'ed."""
return 'TBR' in self.change.tags or self.change.TBRsFromDescription()
def RunTests(self, tests_mix, parallel=True):
tests = []
msgs = []
for t in tests_mix:
if isinstance(t, OutputApi.PresubmitResult) and t:
msgs.append(t)
else:
assert issubclass(t.message, _PresubmitResult)
tests.append(t)
if self.verbose:
t.info = _PresubmitNotifyResult
if not t.kwargs.get('cwd'):
t.kwargs['cwd'] = self.PresubmitLocalPath()
self.thread_pool.AddTests(tests, parallel)
# When self.parallel is True (i.e. --parallel is passed as an option)
# RunTests doesn't actually run tests. It adds them to a ThreadPool that
# will run all tests once all PRESUBMIT files are processed.
# Otherwise, it will run them and return the results.
if not self.parallel:
msgs.extend(self.thread_pool.RunAsync())
return msgs
class _DiffCache(object):
"""Caches diffs retrieved from a particular SCM."""
def __init__(self, upstream=None):
"""Stores the upstream revision against which all diffs will be computed."""
self._upstream = upstream
def GetDiff(self, path, local_root):
"""Get the diff for a particular path."""
raise NotImplementedError()
def GetOldContents(self, path, local_root):
"""Get the old version for a particular path."""
raise NotImplementedError()
class _GitDiffCache(_DiffCache):
"""DiffCache implementation for git; gets all file diffs at once."""
def __init__(self, upstream):
super(_GitDiffCache, self).__init__(upstream=upstream)
self._diffs_by_file = None
def GetDiff(self, path, local_root):
if not self._diffs_by_file:
# Compute a single diff for all files and parse the output; should
# with git this is much faster than computing one diff for each file.
diffs = {}
# Don't specify any filenames below, because there are command line length
# limits on some platforms and GenerateDiff would fail.
unified_diff = scm.GIT.GenerateDiff(local_root, files=[], full_move=True,
branch=self._upstream)
# This regex matches the path twice, separated by a space. Note that
# filename itself may contain spaces.
file_marker = re.compile('^diff --git (?P<filename>.*) (?P=filename)$')
current_diff = []
keep_line_endings = True
for x in unified_diff.splitlines(keep_line_endings):
match = file_marker.match(x)
if match:
# Marks the start of a new per-file section.
diffs[match.group('filename')] = current_diff = [x]
elif x.startswith('diff --git'):
raise PresubmitFailure('Unexpected diff line: %s' % x)
else:
current_diff.append(x)
self._diffs_by_file = dict(
(normpath(path), ''.join(diff)) for path, diff in diffs.items())
if path not in self._diffs_by_file:
raise PresubmitFailure(
'Unified diff did not contain entry for file %s' % path)
return self._diffs_by_file[path]
def GetOldContents(self, path, local_root):
return scm.GIT.GetOldContents(local_root, path, branch=self._upstream)
class AffectedFile(object):
"""Representation of a file in a change."""
DIFF_CACHE = _DiffCache
# Method could be a function
# pylint: disable=no-self-use
def __init__(self, path, action, repository_root, diff_cache):
self._path = path
self._action = action
self._local_root = repository_root
self._is_directory = None
self._cached_changed_contents = None
self._cached_new_contents = None
self._diff_cache = diff_cache
logging.debug('%s(%s)', self.__class__.__name__, self._path)
def LocalPath(self):
"""Returns the path of this file on the local disk relative to client root.
This should be used for error messages but not for accessing files,
because presubmit checks are run with CWD=PresubmitLocalPath() (which is
often != client root).
"""
return normpath(self._path)
def AbsoluteLocalPath(self):
"""Returns the absolute path of this file on the local disk.
"""
return os.path.abspath(os.path.join(self._local_root, self.LocalPath()))
def Action(self):
"""Returns the action on this opened file, e.g. A, M, D, etc."""
return self._action
def IsTestableFile(self):
"""Returns True if the file is a text file and not a binary file.
Deleted files are not text file."""
raise NotImplementedError() # Implement when needed
def IsTextFile(self):
"""An alias to IsTestableFile for backwards compatibility."""
return self.IsTestableFile()
def OldContents(self):
"""Returns an iterator over the lines in the old version of file.
The old version is the file before any modifications in the user's
workspace, i.e. the 'left hand side'.
Contents will be empty if the file is a directory or does not exist.
Note: The carriage returns (LF or CR) are stripped off.
"""
return self._diff_cache.GetOldContents(self.LocalPath(),
self._local_root).splitlines()
def NewContents(self):
"""Returns an iterator over the lines in the new version of file.
The new version is the file in the user's workspace, i.e. the 'right hand
side'.
Contents will be empty if the file is a directory or does not exist.
Note: The carriage returns (LF or CR) are stripped off.
"""
if self._cached_new_contents is None:
self._cached_new_contents = []
try:
self._cached_new_contents = gclient_utils.FileRead(
self.AbsoluteLocalPath(), 'rU').splitlines()
except IOError:
pass # File not found? That's fine; maybe it was deleted.
except UnicodeDecodeError as e:
# log the filename since we're probably trying to read a binary
# file, and shouldn't be.
print('Error reading %s: %s' % (self.AbsoluteLocalPath(), e))
raise
return self._cached_new_contents[:]
def ChangedContents(self, keeplinebreaks=False):
"""Returns a list of tuples (line number, line text) of all new lines.
This relies on the scm diff output describing each changed code section
with a line of the form
^@@ <old line num>,<old size> <new line num>,<new size> @@$
"""
# Don't return cached results when line breaks are requested.
if not keeplinebreaks and self._cached_changed_contents is not None:
return self._cached_changed_contents[:]
result = []
line_num = 0
# The keeplinebreaks parameter to splitlines must be True or else the
# CheckForWindowsLineEndings presubmit will be a NOP.
for line in self.GenerateScmDiff().splitlines(keeplinebreaks):
m = re.match(r'^@@ [0-9\,\+\-]+ \+([0-9]+)\,[0-9]+ @@', line)
if m:
line_num = int(m.groups(1)[0])
continue
if line.startswith('+') and not line.startswith('++'):
result.append((line_num, line[1:]))
if not line.startswith('-'):
line_num += 1
# Don't cache results with line breaks.
if keeplinebreaks:
return result;
self._cached_changed_contents = result
return self._cached_changed_contents[:]
def __str__(self):
return self.LocalPath()
def GenerateScmDiff(self):
return self._diff_cache.GetDiff(self.LocalPath(), self._local_root)
class GitAffectedFile(AffectedFile):
"""Representation of a file in a change out of a git checkout."""
# Method 'NNN' is abstract in class 'NNN' but is not overridden
# pylint: disable=abstract-method
DIFF_CACHE = _GitDiffCache
def __init__(self, *args, **kwargs):
AffectedFile.__init__(self, *args, **kwargs)
self._server_path = None
self._is_testable_file = None
def IsTestableFile(self):
if self._is_testable_file is None:
if self.Action() == 'D':
# A deleted file is not testable.
self._is_testable_file = False
else:
self._is_testable_file = os.path.isfile(self.AbsoluteLocalPath())
return self._is_testable_file
class Change(object):
"""Describe a change.
Used directly by the presubmit scripts to query the current change being
tested.
Instance members:
tags: Dictionary of KEY=VALUE pairs found in the change description.
self.KEY: equivalent to tags['KEY']
"""
_AFFECTED_FILES = AffectedFile
# Matches key/value (or 'tag') lines in changelist descriptions.
TAG_LINE_RE = re.compile(
'^[ \t]*(?P<key>[A-Z][A-Z_0-9]*)[ \t]*=[ \t]*(?P<value>.*?)[ \t]*$')
scm = ''
def __init__(
self, name, description, local_root, files, issue, patchset, author,
upstream=None):
if files is None:
files = []
self._name = name
# Convert root into an absolute path.
self._local_root = os.path.abspath(local_root)
self._upstream = upstream
self.issue = issue
self.patchset = patchset
self.author_email = author
self._full_description = ''
self.tags = {}
self._description_without_tags = ''
self.SetDescriptionText(description)
assert all(
(isinstance(f, (list, tuple)) and len(f) == 2) for f in files), files
diff_cache = self._AFFECTED_FILES.DIFF_CACHE(self._upstream)
self._affected_files = [
self._AFFECTED_FILES(path, action.strip(), self._local_root, diff_cache)
for action, path in files
]
def UpstreamBranch(self):
"""Returns the upstream branch for the change."""
return self._upstream
def Name(self):
"""Returns the change name."""
return self._name
def DescriptionText(self):
"""Returns the user-entered changelist description, minus tags.
Any line in the user-provided description starting with e.g. 'FOO='
(whitespace permitted before and around) is considered a tag line. Such
lines are stripped out of the description this function returns.
"""
return self._description_without_tags
def FullDescriptionText(self):
"""Returns the complete changelist description including tags."""
return self._full_description
def SetDescriptionText(self, description):
"""Sets the full description text (including tags) to |description|.
Also updates the list of tags."""
self._full_description = description
# From the description text, build up a dictionary of key/value pairs
# plus the description minus all key/value or 'tag' lines.
description_without_tags = []
self.tags = {}
for line in self._full_description.splitlines():
m = self.TAG_LINE_RE.match(line)
if m:
self.tags[m.group('key')] = m.group('value')
else:
description_without_tags.append(line)
# Change back to text and remove whitespace at end.
self._description_without_tags = (
'\n'.join(description_without_tags).rstrip())
def AddDescriptionFooter(self, key, value):
"""Adds the given footer to the change description.
Args:
key: A string with the key for the git footer. It must conform to
the git footers format (i.e. 'List-Of-Tokens') and will be case
normalized so that each token is title-cased.
value: A string with the value for the git footer.
"""
description = git_footers.add_footer(
self.FullDescriptionText(), git_footers.normalize_name(key), value)
self.SetDescriptionText(description)
def RepositoryRoot(self):
"""Returns the repository (checkout) root directory for this change,
as an absolute path.
"""
return self._local_root
def __getattr__(self, attr):
"""Return tags directly as attributes on the object."""
if not re.match(r'^[A-Z_]*$', attr):
raise AttributeError(self, attr)
return self.tags.get(attr)
def GitFootersFromDescription(self):
"""Return the git footers present in the description.
Returns:
footers: A dict of {footer: [values]} containing a multimap of the footers
in the change description.
"""
return git_footers.parse_footers(self.FullDescriptionText())
def BugsFromDescription(self):
"""Returns all bugs referenced in the commit description."""
tags = [b.strip() for b in self.tags.get('BUG', '').split(',') if b.strip()]
footers = []
parsed = self.GitFootersFromDescription()
unsplit_footers = parsed.get('Bug', []) + parsed.get('Fixed', [])
for unsplit_footer in unsplit_footers:
footers += [b.strip() for b in unsplit_footer.split(',')]
return sorted(set(tags + footers))
def ReviewersFromDescription(self):
"""Returns all reviewers listed in the commit description."""
# We don't support a 'R:' git-footer for reviewers; that is in metadata.
tags = [r.strip() for r in self.tags.get('R', '').split(',') if r.strip()]
return sorted(set(tags))
def TBRsFromDescription(self):
"""Returns all TBR reviewers listed in the commit description."""
tags = [r.strip() for r in self.tags.get('TBR', '').split(',') if r.strip()]
# TODO(crbug.com/839208): Remove support for 'Tbr:' when TBRs are
# programmatically determined by self-CR+1s.
footers = self.GitFootersFromDescription().get('Tbr', [])
return sorted(set(tags + footers))
# TODO(crbug.com/753425): Delete these once we're sure they're unused.
@property
def BUG(self):
return ','.join(self.BugsFromDescription())
@property
def R(self):
return ','.join(self.ReviewersFromDescription())
@property
def TBR(self):
return ','.join(self.TBRsFromDescription())
def AllFiles(self, root=None):
"""List all files under source control in the repo."""
raise NotImplementedError()
def AffectedFiles(self, include_deletes=True, file_filter=None):
"""Returns a list of AffectedFile instances for all files in the change.
Args:
include_deletes: If false, deleted files will be filtered out.
file_filter: An additional filter to apply.
Returns:
[AffectedFile(path, action), AffectedFile(path, action)]
"""
affected = list(filter(file_filter, self._affected_files))
if include_deletes:
return affected
return list(filter(lambda x: x.Action() != 'D', affected))
def AffectedTestableFiles(self, include_deletes=None, **kwargs):
"""Return a list of the existing text files in a change."""
if include_deletes is not None:
warn('AffectedTeestableFiles(include_deletes=%s)'
' is deprecated and ignored' % str(include_deletes),
category=DeprecationWarning,
stacklevel=2)
return list(filter(
lambda x: x.IsTestableFile(),
self.AffectedFiles(include_deletes=False, **kwargs)))
def AffectedTextFiles(self, include_deletes=None):
"""An alias to AffectedTestableFiles for backwards compatibility."""
return self.AffectedTestableFiles(include_deletes=include_deletes)
def LocalPaths(self):
"""Convenience function."""
return [af.LocalPath() for af in self.AffectedFiles()]
def AbsoluteLocalPaths(self):
"""Convenience function."""
return [af.AbsoluteLocalPath() for af in self.AffectedFiles()]
def RightHandSideLines(self):
"""An iterator over all text lines in 'new' version of changed files.
Lists lines from new or modified text files in the change.
This is useful for doing line-by-line regex checks, like checking for
trailing whitespace.
Yields:
a 3 tuple:
the AffectedFile instance of the current file;
integer line number (1-based); and
the contents of the line as a string.
"""
return _RightHandSideLinesImpl(
x for x in self.AffectedFiles(include_deletes=False)
if x.IsTestableFile())
def OriginalOwnersFiles(self):
"""A map from path names of affected OWNERS files to their old content."""
def owners_file_filter(f):
return 'OWNERS' in os.path.split(f.LocalPath())[1]
files = self.AffectedFiles(file_filter=owners_file_filter)
return dict([(f.LocalPath(), f.OldContents()) for f in files])
class GitChange(Change):
_AFFECTED_FILES = GitAffectedFile
scm = 'git'
def AllFiles(self, root=None):
"""List all files under source control in the repo."""
root = root or self.RepositoryRoot()
return subprocess.check_output(
['git', '-c', 'core.quotePath=false', 'ls-files', '--', '.'],
cwd=root).decode('utf-8', 'ignore').splitlines()
def ListRelevantPresubmitFiles(files, root):
"""Finds all presubmit files that apply to a given set of source files.
If inherit-review-settings-ok is present right under root, looks for
PRESUBMIT.py in directories enclosing root.
Args:
files: An iterable container containing file paths.
root: Path where to stop searching.
Return:
List of absolute paths of the existing PRESUBMIT.py scripts.
"""
files = [normpath(os.path.join(root, f)) for f in files]
# List all the individual directories containing files.
directories = set([os.path.dirname(f) for f in files])
# Ignore root if inherit-review-settings-ok is present.
if os.path.isfile(os.path.join(root, 'inherit-review-settings-ok')):
root = None
# Collect all unique directories that may contain PRESUBMIT.py.
candidates = set()
for directory in directories:
while True:
if directory in candidates:
break
candidates.add(directory)
if directory == root:
break
parent_dir = os.path.dirname(directory)
if parent_dir == directory:
# We hit the system root directory.
break
directory = parent_dir
# Look for PRESUBMIT.py in all candidate directories.
results = []
for directory in sorted(list(candidates)):
try:
for f in os.listdir(directory):
p = os.path.join(directory, f)
if os.path.isfile(p) and re.match(
r'PRESUBMIT.*\.py$', f) and not f.startswith('PRESUBMIT_test'):
results.append(p)
except OSError:
pass
logging.debug('Presubmit files: %s', ','.join(results))
return results
class GetTryMastersExecuter(object):
@staticmethod
def ExecPresubmitScript(script_text, presubmit_path, project, change):
"""Executes GetPreferredTryMasters() from a single presubmit script.
Args:
script_text: The text of the presubmit script.
presubmit_path: Project script to run.
project: Project name to pass to presubmit script for bot selection.
Return:
A map of try masters to map of builders to set of tests.
"""
context = {}
try:
exec(compile(script_text, 'PRESUBMIT.py', 'exec', dont_inherit=True),
context)
except Exception as e:
raise PresubmitFailure('"%s" had an exception.\n%s'
% (presubmit_path, e))
function_name = 'GetPreferredTryMasters'
if function_name not in context:
return {}
get_preferred_try_masters = context[function_name]
if not len(inspect.getargspec(get_preferred_try_masters)[0]) == 2:
raise PresubmitFailure(
'Expected function "GetPreferredTryMasters" to take two arguments.')
return get_preferred_try_masters(project, change)
class GetPostUploadExecuter(object):
@staticmethod
def ExecPresubmitScript(script_text, presubmit_path, gerrit_obj, change):
"""Executes PostUploadHook() from a single presubmit script.
Args:
script_text: The text of the presubmit script.
presubmit_path: Project script to run.
gerrit_obj: The GerritAccessor object.
change: The Change object.
Return:
A list of results objects.
"""
context = {}
try:
exec(compile(script_text, 'PRESUBMIT.py', 'exec', dont_inherit=True),
context)
except Exception as e:
raise PresubmitFailure('"%s" had an exception.\n%s'
% (presubmit_path, e))
function_name = 'PostUploadHook'
if function_name not in context:
return {}
post_upload_hook = context[function_name]
if not len(inspect.getargspec(post_upload_hook)[0]) == 3:
raise PresubmitFailure(
'Expected function "PostUploadHook" to take three arguments.')
return post_upload_hook(gerrit_obj, change, OutputApi(False))
def _MergeMasters(masters1, masters2):
"""Merges two master maps. Merges also the tests of each builder."""
result = {}
for (master, builders) in itertools.chain(masters1.items(),
masters2.items()):
new_builders = result.setdefault(master, {})
for (builder, tests) in builders.items():
new_builders.setdefault(builder, set([])).update(tests)
return result
def DoGetTryMasters(change,
changed_files,
repository_root,
default_presubmit,
project,
verbose,
output_stream):
"""Get the list of try masters from the presubmit scripts.
Args:
changed_files: List of modified files.
repository_root: The repository root.
default_presubmit: A default presubmit script to execute in any case.
project: Optional name of a project used in selecting trybots.
verbose: Prints debug info.
output_stream: A stream to write debug output to.
Return:
Map of try masters to map of builders to set of tests.
"""
presubmit_files = ListRelevantPresubmitFiles(changed_files, repository_root)
if not presubmit_files and verbose:
output_stream.write('Warning, no PRESUBMIT.py found.\n')
results = {}
executer = GetTryMastersExecuter()
if default_presubmit:
if verbose:
output_stream.write('Running default presubmit script.\n')
fake_path = os.path.join(repository_root, 'PRESUBMIT.py')
results = _MergeMasters(results, executer.ExecPresubmitScript(
default_presubmit, fake_path, project, change))
for filename in presubmit_files:
filename = os.path.abspath(filename)
if verbose:
output_stream.write('Running %s\n' % filename)
# Accept CRLF presubmit script.
presubmit_script = gclient_utils.FileRead(filename, 'rU')
results = _MergeMasters(results, executer.ExecPresubmitScript(
presubmit_script, filename, project, change))
# Make sets to lists again for later JSON serialization.
for builders in results.values():
for builder in builders:
builders[builder] = list(builders[builder])
if results and verbose:
output_stream.write('%s\n' % str(results))
return results
def DoPostUploadExecuter(change,
gerrit_obj,
verbose):
"""Execute the post upload hook.
Args:
change: The Change object.
gerrit_obj: The GerritAccessor object.
verbose: Prints debug info.
"""
presubmit_files = ListRelevantPresubmitFiles(
change.LocalPaths(), change.RepositoryRoot())
if not presubmit_files and verbose:
sys.stdout.write('Warning, no PRESUBMIT.py found.\n')
results = []
executer = GetPostUploadExecuter()
# The root presubmit file should be executed after the ones in subdirectories.
# i.e. the specific post upload hooks should run before the general ones.
# Thus, reverse the order provided by ListRelevantPresubmitFiles.
presubmit_files.reverse()
for filename in presubmit_files:
filename = os.path.abspath(filename)
if verbose:
sys.stdout.write('Running %s\n' % filename)
# Accept CRLF presubmit script.
presubmit_script = gclient_utils.FileRead(filename, 'rU')
results.extend(executer.ExecPresubmitScript(
presubmit_script, filename, gerrit_obj, change))
if not results:
return 0
sys.stdout.write('\n')
sys.stdout.write('** Post Upload Hook Messages **\n')
exit_code = 0
for result in results:
if result.fatal:
exit_code = 1
result.handle()
sys.stdout.write('\n')
return exit_code
class PresubmitExecuter(object):
def __init__(self, change, committing, verbose, gerrit_obj, dry_run=None,
thread_pool=None, parallel=False, use_python3=False):
"""
Args:
change: The Change object.
committing: True if 'git cl land' is running, False if 'git cl upload' is.
gerrit_obj: provides basic Gerrit codereview functionality.
dry_run: if true, some Checks will be skipped.
parallel: if true, all tests reported via input_api.RunTests for all
PRESUBMIT files will be run in parallel.
use_python3: if true, will use python3 instead of python2 by default
if USE_PYTHON3 is not specified.
"""
self.change = change
self.committing = committing
self.gerrit = gerrit_obj
self.verbose = verbose
self.dry_run = dry_run
self.more_cc = []
self.thread_pool = thread_pool
self.parallel = parallel
self.use_python3 = use_python3
def ExecPresubmitScript(self, script_text, presubmit_path):
"""Executes a single presubmit script.
Args:
script_text: The text of the presubmit script.
presubmit_path: The path to the presubmit file (this will be reported via
input_api.PresubmitLocalPath()).
Return:
A list of result objects, empty if no problems.
"""
# Change to the presubmit file's directory to support local imports.
main_path = os.getcwd()
presubmit_dir = os.path.dirname(presubmit_path)
os.chdir(presubmit_dir)
# Load the presubmit script into context.
input_api = InputApi(self.change, presubmit_path, self.committing,
self.verbose, gerrit_obj=self.gerrit,
dry_run=self.dry_run, thread_pool=self.thread_pool,
parallel=self.parallel)
output_api = OutputApi(self.committing)
context = {}
# Try to figure out whether these presubmit checks should be run under
# python2 or python3. We need to do this without actually trying to
# compile the text, since the text might compile in one but not the
# other.
m = re.search('^USE_PYTHON3 = (True|False)$', script_text,
flags=re.MULTILINE)
if m:
use_python3 = m.group(1) == 'True'
else:
use_python3 = self.use_python3
if (((sys.version_info.major == 2) and use_python3) or
((sys.version_info.major == 3) and not use_python3)):
return []
try:
exec(compile(script_text, 'PRESUBMIT.py', 'exec', dont_inherit=True),
context)
except Exception as e:
raise PresubmitFailure('"%s" had an exception.\n%s' % (presubmit_path, e))
context['__args'] = (input_api, output_api)
# Get path of presubmit directory relative to repository root.
# Always use forward slashes, so that path is same in *nix and Windows
root = input_api.change.RepositoryRoot()
rel_path = os.path.relpath(presubmit_dir, root)
rel_path = rel_path.replace(os.path.sep, '/')
# Get the URL of git remote origin and use it to identify host and project
host = project = ''
if self.gerrit:
host = self.gerrit.host or ''
project = self.gerrit.project or ''
# Prefix for test names
prefix = 'presubmit:%s/%s:%s/' % (host, project, rel_path)
# Perform all the desired presubmit checks.
results = []
try:
version = [
int(x) for x in context.get('PRESUBMIT_VERSION', '0.0.0').split('.')
]
with rdb_wrapper.client(prefix) as sink:
if version >= [2, 0, 0]:
for function_name in context:
if not function_name.startswith('Check'):
continue
if function_name.endswith('Commit') and not self.committing:
continue
if function_name.endswith('Upload') and self.committing:
continue
logging.debug('Running %s in %s', function_name, presubmit_path)
results.extend(
self._run_check_function(function_name, context, sink))
logging.debug('Running %s done.', function_name)
self.more_cc.extend(output_api.more_cc)
else: # Old format
if self.committing:
function_name = 'CheckChangeOnCommit'
else:
function_name = 'CheckChangeOnUpload'
if function_name in context:
logging.debug('Running %s in %s', function_name, presubmit_path)
results.extend(
self._run_check_function(function_name, context, sink))
logging.debug('Running %s done.', function_name)
self.more_cc.extend(output_api.more_cc)
finally:
for f in input_api._named_temporary_files:
os.remove(f)
# Return the process to the original working directory.
os.chdir(main_path)
return results
def _run_check_function(self, function_name, context, sink=None):
"""Evaluates and returns the result of a given presubmit function.
If sink is given, the result of the presubmit function will be reported
to the ResultSink.
Args:
function_name: the name of the presubmit function to evaluate
context: a context dictionary in which the function will be evaluated
sink: an instance of ResultSink. None, by default.
Returns:
the result of the presubmit function call.
"""
start_time = time_time()
try:
result = eval(function_name + '(*__args)', context)
self._check_result_type(result)
except Exception:
if sink:
elapsed_time = time_time() - start_time
sink.report(function_name, rdb_wrapper.STATUS_FAIL, elapsed_time)
# TODO(crbug.com/953884): replace reraise with native py3:
# raise .. from e
e_type, e_value, e_tb = sys.exc_info()
print('Evaluation of %s failed: %s' % (function_name, e_value))
six.reraise(e_type, e_value, e_tb)
elapsed_time = time_time() - start_time
if elapsed_time > 10.0:
sys.stdout.write(
'%s took %.1fs to run.\n' % (function_name, elapsed_time))
if sink:
status = rdb_wrapper.STATUS_PASS
if any(r.fatal for r in result):
status = rdb_wrapper.STATUS_FAIL
sink.report(function_name, status, elapsed_time)
return result
def _check_result_type(self, result):
"""Helper function which ensures result is a list, and all elements are
instances of OutputApi.PresubmitResult"""
if not isinstance(result, (tuple, list)):
raise PresubmitFailure('Presubmit functions must return a tuple or list')
if not all(isinstance(res, OutputApi.PresubmitResult) for res in result):
raise PresubmitFailure(
'All presubmit results must be of types derived from '
'output_api.PresubmitResult')
def DoPresubmitChecks(change,
committing,
verbose,
default_presubmit,
may_prompt,
gerrit_obj,
dry_run=None,
parallel=False,
json_output=None,
use_python3=False):
"""Runs all presubmit checks that apply to the files in the change.
This finds all PRESUBMIT.py files in directories enclosing the files in the
change (up to the repository root) and calls the relevant entrypoint function
depending on whether the change is being committed or uploaded.
Prints errors, warnings and notifications. Prompts the user for warnings
when needed.
Args:
change: The Change object.
committing: True if 'git cl land' is running, False if 'git cl upload' is.
verbose: Prints debug info.
default_presubmit: A default presubmit script to execute in any case.
may_prompt: Enable (y/n) questions on warning or error. If False,
any questions are answered with yes by default.
gerrit_obj: provides basic Gerrit codereview functionality.
dry_run: if true, some Checks will be skipped.
parallel: if true, all tests specified by input_api.RunTests in all
PRESUBMIT files will be run in parallel.
use_python3: if true, default to using Python3 for presubmit checks
rather than Python2.
Return:
1 if presubmit checks failed or 0 otherwise.
"""
old_environ = os.environ
try:
# Make sure python subprocesses won't generate .pyc files.
os.environ = os.environ.copy()
os.environ['PYTHONDONTWRITEBYTECODE'] = '1'
python_version = 'Python %s' % sys.version_info.major
if committing:
sys.stdout.write('Running %s presubmit commit checks ...\n' %
python_version)
else:
sys.stdout.write('Running %s presubmit upload checks ...\n' %
python_version)
start_time = time_time()
presubmit_files = ListRelevantPresubmitFiles(
change.AbsoluteLocalPaths(), change.RepositoryRoot())
if not presubmit_files and verbose:
sys.stdout.write('Warning, no PRESUBMIT.py found.\n')
results = []
thread_pool = ThreadPool()
executer = PresubmitExecuter(change, committing, verbose, gerrit_obj,
dry_run, thread_pool, parallel, use_python3)
if default_presubmit:
if verbose:
sys.stdout.write('Running default presubmit script.\n')
fake_path = os.path.join(change.RepositoryRoot(), 'PRESUBMIT.py')
results += executer.ExecPresubmitScript(default_presubmit, fake_path)
for filename in presubmit_files:
filename = os.path.abspath(filename)
if verbose:
sys.stdout.write('Running %s\n' % filename)
# Accept CRLF presubmit script.
presubmit_script = gclient_utils.FileRead(filename, 'rU')
results += executer.ExecPresubmitScript(presubmit_script, filename)
results += thread_pool.RunAsync()
messages = {}
should_prompt = False
presubmits_failed = False
for result in results:
if result.fatal:
presubmits_failed = True
messages.setdefault('ERRORS', []).append(result)
elif result.should_prompt:
should_prompt = True
messages.setdefault('Warnings', []).append(result)
else:
messages.setdefault('Messages', []).append(result)
sys.stdout.write('\n')
for name, items in messages.items():
sys.stdout.write('** Presubmit %s **\n' % name)
for item in items:
item.handle()
sys.stdout.write('\n')
total_time = time_time() - start_time
if total_time > 1.0:
sys.stdout.write(
'Presubmit checks took %.1fs to calculate.\n\n' % total_time)
if not should_prompt and not presubmits_failed:
sys.stdout.write('%s presubmit checks passed.\n' % python_version)
elif should_prompt:
sys.stdout.write('There were %s presubmit warnings. ' % python_version)
if may_prompt:
presubmits_failed = not prompt_should_continue(
'Are you sure you wish to continue? (y/N): ')
else:
sys.stdout.write('\n')
if json_output:
# Write the presubmit results to json output
presubmit_results = {
'errors': [
error.json_format()
for error in messages.get('ERRORS', [])
],
'notifications': [
notification.json_format()
for notification in messages.get('Messages', [])
],
'warnings': [
warning.json_format()
for warning in messages.get('Warnings', [])
],
'more_cc': executer.more_cc,
}
gclient_utils.FileWrite(
json_output, json.dumps(presubmit_results, sort_keys=True))
global _ASKED_FOR_FEEDBACK
# Ask for feedback one time out of 5.
if (len(results) and random.randint(0, 4) == 0 and not _ASKED_FOR_FEEDBACK):
sys.stdout.write(
'Was the presubmit check useful? If not, run "git cl presubmit -v"\n'
'to figure out which PRESUBMIT.py was run, then run git blame\n'
'on the file to figure out who to ask for help.\n')
_ASKED_FOR_FEEDBACK = True
return 1 if presubmits_failed else 0
finally:
os.environ = old_environ
def _scan_sub_dirs(mask, recursive):
if not recursive:
return [x for x in glob.glob(mask) if x not in ('.svn', '.git')]
results = []
for root, dirs, files in os.walk('.'):
if '.svn' in dirs:
dirs.remove('.svn')
if '.git' in dirs:
dirs.remove('.git')
for name in files:
if fnmatch.fnmatch(name, mask):
results.append(os.path.join(root, name))
return results
def _parse_files(args, recursive):
logging.debug('Searching for %s', args)
files = []
for arg in args:
files.extend([('M', f) for f in _scan_sub_dirs(arg, recursive)])
return files
def _parse_change(parser, options):
"""Process change options.
Args:
parser: The parser used to parse the arguments from command line.
options: The arguments parsed from command line.
Returns:
A GitChange if the change root is a git repository, or a Change otherwise.
"""
if options.files and options.all_files:
parser.error('<files> cannot be specified when --all-files is set.')
change_scm = scm.determine_scm(options.root)
if change_scm != 'git' and not options.files:
parser.error('<files> is not optional for unversioned directories.')
if options.files:
change_files = _parse_files(options.files, options.recursive)
elif options.all_files:
change_files = [('M', f) for f in scm.GIT.GetAllFiles(options.root)]
else:
change_files = scm.GIT.CaptureStatus(
options.root, options.upstream or None)
logging.info('Found %d file(s).', len(change_files))
change_class = GitChange if change_scm == 'git' else Change
return change_class(
options.name,
options.description,
options.root,
change_files,
options.issue,
options.patchset,
options.author,
upstream=options.upstream)
def _parse_gerrit_options(parser, options):
"""Process gerrit options.
SIDE EFFECTS: Modifies options.author and options.description from Gerrit if
options.gerrit_fetch is set.
Args:
parser: The parser used to parse the arguments from command line.
options: The arguments parsed from command line.
Returns:
A GerritAccessor object if options.gerrit_url is set, or None otherwise.
"""
gerrit_obj = None
if options.gerrit_url:
gerrit_obj = GerritAccessor(
url=options.gerrit_url,
project=options.gerrit_project,
branch=options.gerrit_branch)
if not options.gerrit_fetch:
return gerrit_obj
if not options.gerrit_url or not options.issue or not options.patchset:
parser.error(
'--gerrit_fetch requires --gerrit_url, --issue and --patchset.')
options.author = gerrit_obj.GetChangeOwner(options.issue)
options.description = gerrit_obj.GetChangeDescription(
options.issue, options.patchset)
logging.info('Got author: "%s"', options.author)
logging.info('Got description: """\n%s\n"""', options.description)
return gerrit_obj
@contextlib.contextmanager
def canned_check_filter(method_names):
filtered = {}
try:
for method_name in method_names:
if not hasattr(presubmit_canned_checks, method_name):
logging.warning('Skipping unknown "canned" check %s' % method_name)
continue
filtered[method_name] = getattr(presubmit_canned_checks, method_name)
setattr(presubmit_canned_checks, method_name, lambda *_a, **_kw: [])
yield
finally:
for name, method in filtered.items():
setattr(presubmit_canned_checks, name, method)
def main(argv=None):
parser = argparse.ArgumentParser(usage='%(prog)s [options] <files...>')
hooks = parser.add_mutually_exclusive_group()
hooks.add_argument('-c', '--commit', action='store_true',
help='Use commit instead of upload checks.')
hooks.add_argument('-u', '--upload', action='store_false', dest='commit',
help='Use upload instead of commit checks.')
hooks.add_argument('--post_upload', action='store_true',
help='Run post-upload commit hooks.')
parser.add_argument('-r', '--recursive', action='store_true',
help='Act recursively.')
parser.add_argument('-v', '--verbose', action='count', default=0,
help='Use 2 times for more debug info.')
parser.add_argument('--name', default='no name')
parser.add_argument('--author')
desc = parser.add_mutually_exclusive_group()
desc.add_argument('--description', default='', help='The change description.')
desc.add_argument('--description_file',
help='File to read change description from.')
parser.add_argument('--issue', type=int, default=0)
parser.add_argument('--patchset', type=int, default=0)
parser.add_argument('--root', default=os.getcwd(),
help='Search for PRESUBMIT.py up to this directory. '
'If inherit-review-settings-ok is present in this '
'directory, parent directories up to the root file '
'system directories will also be searched.')
parser.add_argument('--upstream',
help='Git only: the base ref or upstream branch against '
'which the diff should be computed.')
parser.add_argument('--default_presubmit')
parser.add_argument('--may_prompt', action='store_true', default=False)
parser.add_argument('--skip_canned', action='append', default=[],
help='A list of checks to skip which appear in '
'presubmit_canned_checks. Can be provided multiple times '
'to skip multiple canned checks.')
parser.add_argument('--dry_run', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--gerrit_url', help=argparse.SUPPRESS)
parser.add_argument('--gerrit_project', help=argparse.SUPPRESS)
parser.add_argument('--gerrit_branch', help=argparse.SUPPRESS)
parser.add_argument('--gerrit_fetch', action='store_true',
help=argparse.SUPPRESS)
parser.add_argument('--parallel', action='store_true',
help='Run all tests specified by input_api.RunTests in '
'all PRESUBMIT files in parallel.')
parser.add_argument('--json_output',
help='Write presubmit errors to json output.')
parser.add_argument('--all_files', action='store_true',
help='Mark all files under source control as modified.')
parser.add_argument('files', nargs='*',
help='List of files to be marked as modified when '
'executing presubmit or post-upload hooks. fnmatch '
'wildcards can also be used.')
parser.add_argument('--use-python3', action='store_true',
help='Use python3 for presubmit checks by default')
options = parser.parse_args(argv)
log_level = logging.ERROR
if options.verbose >= 2:
log_level = logging.DEBUG
elif options.verbose:
log_level = logging.INFO
log_format = ('[%(levelname).1s%(asctime)s %(process)d %(thread)d '
'%(filename)s] %(message)s')
logging.basicConfig(format=log_format, level=log_level)
if options.description_file:
options.description = gclient_utils.FileRead(options.description_file)
gerrit_obj = _parse_gerrit_options(parser, options)
change = _parse_change(parser, options)
try:
if options.post_upload:
return DoPostUploadExecuter(
change,
gerrit_obj,
options.verbose)
with canned_check_filter(options.skip_canned):
return DoPresubmitChecks(
change,
options.commit,
options.verbose,
options.default_presubmit,
options.may_prompt,
gerrit_obj,
options.dry_run,
options.parallel,
options.json_output,
options.use_python3)
except PresubmitFailure as e:
print(e, file=sys.stderr)
print('Maybe your depot_tools is out of date?', file=sys.stderr)
return 2
if __name__ == '__main__':
fix_encoding.fix_encoding()
try:
sys.exit(main())
except KeyboardInterrupt:
sys.stderr.write('interrupted\n')
sys.exit(2)
|
game_controller.py
|
import os
import threading
import time
import cv2
from utils.auto_settings import check_settings
from bot import Bot
from config import Config
from death_manager import DeathManager
from game_recovery import GameRecovery
from game_stats import GameStats
from health_manager import HealthManager
from logger import Logger
from messenger import Messenger
from screen import Screen
from ui.char_selector import CharSelector
from utils.misc import kill_thread
from utils.restart import restart_game
from utils.misc import kill_thread, set_d2r_always_on_top, restore_d2r_window_visibility
class GameController:
is_running = False
def __init__(self, config: Config):
self._config = config
self.screen = None
self.health_monitor_thread = None
self.health_manager = None
self.death_manager = None
self.death_monitor_thread = None
self.game_recovery = None
self.game_stats = None
self.game_controller_thread = None
self.bot_thread = None
self.bot = None
self.char_selector = None
def run_bot(self, pick_corpse: bool = False):
# Make sure the correct char is selected
if self.char_selector.has_char_template_saved():
Logger.info("Selecting original char")
self.char_selector.select_char()
else:
Logger.info("Saving top-most char as template")
self.char_selector.save_char_template()
# Start bot thread
self.bot = Bot(self.screen, self.game_stats, pick_corpse)
self.bot_thread = threading.Thread(target=self.bot.start)
self.bot_thread.daemon = True
self.bot_thread.start()
# Register that thread to the death and health manager so they can stop the bot thread if needed
self.death_manager.set_callback(lambda: self.bot.stop() or kill_thread(self.bot_thread))
self.health_manager.set_callback(lambda: self.bot.stop() or kill_thread(self.bot_thread))
self.health_manager.set_belt_manager(self.bot.get_belt_manager())
do_restart = False
messenger = Messenger()
while 1:
self.health_manager.update_location(self.bot.get_curr_location())
max_game_length_reached = self.game_stats.get_current_game_length() > self._config.general["max_game_length_s"]
if max_game_length_reached or self.death_manager.died() or self.health_manager.did_chicken():
# Some debug and logging
if max_game_length_reached:
Logger.info(f"Max game length reached. Attempting to restart {self._config.general['name']}!")
if self._config.general["info_screenshots"]:
cv2.imwrite("./info_screenshots/info_max_game_length_reached_" + time.strftime("%Y%m%d_%H%M%S") + ".png", self.screen.grab())
elif self.death_manager.died():
self.game_stats.log_death(self.death_manager._last_death_screenshot)
elif self.health_manager.did_chicken():
self.game_stats.log_chicken(self.health_manager._last_chicken_screenshot)
self.bot.stop()
kill_thread(self.bot_thread)
# Try to recover from whatever situation we are and go back to hero selection
do_restart = self.game_recovery.go_to_hero_selection()
break
time.sleep(0.5)
self.bot_thread.join()
if do_restart:
# Reset flags before running a new bot
self.death_manager.reset_death_flag()
self.health_manager.reset_chicken_flag()
self.game_stats.log_end_game(failed=max_game_length_reached)
return self.run_bot(True)
else:
if self._config.general["info_screenshots"]:
cv2.imwrite("./info_screenshots/info_could_not_recover_" + time.strftime("%Y%m%d_%H%M%S") + ".png", self.screen.grab())
Logger.error(
f"{self._config.general['name']} could not recover from a max game length violation. Restarting the Game.")
if self._config.general["custom_message_hook"]:
messenger.send_message(f"{self._config.general['name']}: got stuck and will now restart D2R")
if restart_game(self._config.general["d2r_path"]):
self.game_stats.log_end_game(failed=max_game_length_reached)
if self.setup_screen():
self.start_health_manager_thread()
self.start_death_manager_thread()
self.game_recovery = GameRecovery(self.screen, self.death_manager)
return self.run_bot(True)
Logger.error(f"{self._config.general['name']} could not restart the game. Quitting.")
messenger.send_message("Got stuck and could not restart the game. Quitting.")
os._exit(1)
def start(self):
# Check if we user should update the d2r settings
diff = check_settings(self._config)
if len(diff) > 0:
Logger.warning("Your D2R settings differ from the requiered ones. Please use Auto Settings to adjust them. The differences are:")
Logger.warning(f"{diff}")
if self._config.advanced_options['d2r_windows_always_on_top']:
set_d2r_always_on_top()
self.setup_screen()
self.start_health_manager_thread()
self.start_death_manager_thread()
self.game_recovery = GameRecovery(self.screen, self.death_manager)
self.game_stats = GameStats()
self.char_selector = CharSelector(self.screen, self._config)
self.start_game_controller_thread()
GameController.is_running = True
def stop(self):
if self._config.advanced_options['d2r_windows_always_on_top']:
restore_d2r_window_visibility()
if self.death_monitor_thread: kill_thread(self.death_monitor_thread)
if self.health_monitor_thread: kill_thread(self.health_monitor_thread)
if self.bot_thread: kill_thread(self.bot_thread)
if self.game_controller_thread: kill_thread(self.game_controller_thread)
GameController.is_running = False
def setup_screen(self):
self.screen = Screen(self._config.general["monitor"])
if self.screen.found_offsets:
return True
return False
def start_health_manager_thread(self):
# Run health monitor thread
self.health_manager = HealthManager(self.screen)
self.health_monitor_thread = threading.Thread(target=self.health_manager.start_monitor)
self.health_monitor_thread.daemon = True
self.health_monitor_thread.start()
def start_death_manager_thread(self):
# Run death monitor thread
self.death_manager = DeathManager(self.screen)
self.death_monitor_thread = threading.Thread(target=self.death_manager.start_monitor)
self.death_monitor_thread.daemon = True
self.death_monitor_thread.start()
def start_game_controller_thread(self):
# Run game controller thread
self.game_controller_thread = threading.Thread(target=self.run_bot)
self.game_controller_thread.daemon = False
self.game_controller_thread.start()
def toggle_pause_bot(self):
if self.bot: self.bot.toggle_pause()
|
loader_wsl.py
|
"""Detectron data loader. The design is generic and abstracted away from any
details of the minibatch. A minibatch is a dictionary of blob name keys and
their associated numpy (float32 or int32) ndarray values.
Outline of the data loader design:
loader thread\
loader thread \ / GPU 1 enqueue thread -> feed -> EnqueueOp
... -> minibatch queue -> ...
loader thread / \ GPU N enqueue thread -> feed -> EnqueueOp
loader thread/
<---------------------------- CPU -----------------------------|---- GPU ---->
A pool of loader threads construct minibatches that are put onto the shared
minibatch queue. Each GPU has an enqueue thread that pulls a minibatch off the
minibatch queue, feeds the minibatch blobs into the workspace, and then runs
an EnqueueBlobsOp to place the minibatch blobs into the GPU's blobs queue.
During each fprop the first thing the network does is run a DequeueBlobsOp
in order to populate the workspace with the blobs from a queued minibatch.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import deque
from collections import OrderedDict
import logging
import numpy as np
import numpy.random as npr
import signal
import threading
import time
import uuid
import random
from six.moves import queue as Queue
from caffe2.python import core, workspace
from detectron.core.config import cfg
from detectron.roi_data.minibatch_wsl import get_minibatch
from detectron.roi_data.minibatch_wsl import get_minibatch_blob_names
from detectron.utils.coordinator import coordinated_get
from detectron.utils.coordinator import coordinated_put
from detectron.utils.coordinator import Coordinator
import detectron.utils.c2 as c2_utils
logger = logging.getLogger(__name__)
class RoIDataLoader(object):
def __init__(
self,
roidb,
num_loaders=4,
minibatch_queue_size=64,
blobs_queue_capacity=8
):
self._roidb = roidb
self._lock = threading.Lock()
self._perm = deque(range(len(self._roidb)))
self._cur = 0 # _perm cursor
# The minibatch queue holds prepared training data in host (CPU) memory
# When training with N > 1 GPUs, each element in the minibatch queue
# is actually a partial minibatch which contributes 1 / N of the
# examples to the overall minibatch
self._minibatch_queue = Queue.Queue(maxsize=minibatch_queue_size)
self._blobs_queue_capacity = blobs_queue_capacity
# Random queue name in case one instantiates multple RoIDataLoaders
self._loader_id = uuid.uuid4()
self._blobs_queue_name = 'roi_blobs_queue_{}'.format(self._loader_id)
# Loader threads construct (partial) minibatches and put them on the
# minibatch queue
self._num_loaders = num_loaders
self._num_gpus = cfg.NUM_GPUS
self.coordinator = Coordinator()
if cfg.WEBLY.WEBLY_ON and cfg.WEBLY.BAGGING_MIXUP:
self._class2idx = {}
for im_i, entry in enumerate(self._roidb):
if im_i % 1000 == 0:
logger.info(' {:d}/{:d}'.format(im_i, len(self._roidb)))
gt_inds = np.where(entry['gt_classes'] > 0)[0]
# print(gt_inds, entry)
# assert len(gt_inds) == 1, 'Only one ground truth for image is allowed.'
gt_classes = entry['gt_classes'][gt_inds].copy()
if gt_classes[0] not in self._class2idx.keys():
self._class2idx[gt_classes[0]] = []
self._class2idx[gt_classes[0]].append(im_i)
self._output_names = get_minibatch_blob_names()
self._shuffle_roidb_inds()
self.create_threads()
def minibatch_loader_thread(self):
"""Load mini-batches and put them onto the mini-batch queue."""
with self.coordinator.stop_on_exception():
while not self.coordinator.should_stop():
blobs = self.get_next_minibatch()
# Blobs must be queued in the order specified by
# self.get_output_names
ordered_blobs = OrderedDict()
for key in self.get_output_names():
assert blobs[key].dtype in (np.int32, np.float32), \
'Blob {} of dtype {} must have dtype of ' \
'np.int32 or np.float32'.format(key, blobs[key].dtype)
ordered_blobs[key] = blobs[key]
coordinated_put(
self.coordinator, self._minibatch_queue, ordered_blobs
)
logger.info('Stopping mini-batch loading thread')
def enqueue_blobs_thread(self, gpu_id, blob_names):
"""Transfer mini-batches from a mini-batch queue to a BlobsQueue."""
with self.coordinator.stop_on_exception():
while not self.coordinator.should_stop():
if self._minibatch_queue.qsize == 0:
logger.warning('Mini-batch queue is empty')
blobs = coordinated_get(self.coordinator, self._minibatch_queue)
self.enqueue_blobs(gpu_id, blob_names, blobs.values())
logger.debug(
'batch queue size {}'.format(self._minibatch_queue.qsize())
)
logger.info('Stopping enqueue thread')
def get_next_minibatch(self):
"""Return the blobs to be used for the next minibatch. Thread safe."""
valid = False
while not valid:
db_inds = self._get_next_minibatch_inds()
bmp_prob = np.random.random()
if cfg.WEBLY.WEBLY_ON and cfg.WEBLY.BAGGING_MIXUP and bmp_prob > 0.8:
gt_inds = np.where(self._roidb[db_inds[0]]['gt_classes'] > 0)[0]
gt_classes = self._roidb[db_inds[0]]['gt_classes'][gt_inds].copy()
key = gt_classes[0]
population = self._class2idx[key]
im_idx = random.sample(population, 1)
db_inds.extend(im_idx)
minibatch_db = [self._roidb[i] for i in db_inds]
blobs, valid = get_minibatch(minibatch_db)
if cfg.WEBLY.WEBLY_ON and cfg.WEBLY.BAGGING_MIXUP and bmp_prob > 0.8:
alpha = cfg.WEBLY.BAGGING_MIXUP_ALPHA
lams = []
lam = npr.beta(alpha, alpha)
lams.append(lam)
lams.append(1 - lam)
# for i in range(cfg.TRAIN.IMS_PER_BATCH - 1):
# lam = npr.beta(alpha, alpha)
blobs_data = blobs['data']
blobs_labels_oh = blobs['labels_oh']
blobs['data'] = np.zeros((1, ) + blobs_data.shape[1:], dtype=np.float32)
blobs['labels_oh'] = np.zeros((1, ) + blobs_labels_oh.shape[1:], dtype=np.float32)
for i in range(2):
blobs['data'] += lams[i] * blobs_data[i:i + 1]
blobs['labels_oh'] += lams[i] * blobs_labels_oh[i:i + 1]
blobs['rois'][:, 0] = 0
blobs['data_ids'] = blobs['data_ids'][0:1]
blobs['labels_int32'] = blobs['labels_int32'][0:1]
return blobs
def _shuffle_roidb_inds(self):
"""Randomly permute the training roidb. Not thread safe."""
if cfg.TRAIN.ASPECT_GROUPING:
widths = np.array([r['width'] for r in self._roidb])
heights = np.array([r['height'] for r in self._roidb])
horz = (widths >= heights)
vert = np.logical_not(horz)
horz_inds = np.where(horz)[0]
vert_inds = np.where(vert)[0]
horz_inds = np.random.permutation(horz_inds)
vert_inds = np.random.permutation(vert_inds)
mb = cfg.TRAIN.IMS_PER_BATCH
horz_inds = horz_inds[:(len(horz_inds) // mb) * mb]
vert_inds = vert_inds[:(len(vert_inds) // mb) * mb]
inds = np.hstack((horz_inds, vert_inds))
inds = np.reshape(inds, (-1, mb))
row_perm = np.random.permutation(np.arange(inds.shape[0]))
inds = np.reshape(inds[row_perm, :], (-1, ))
self._perm = inds
else:
self._perm = np.random.permutation(np.arange(len(self._roidb)))
self._perm = deque(self._perm)
self._cur = 0
def _get_next_minibatch_inds(self):
"""Return the roidb indices for the next minibatch. Thread safe."""
with self._lock:
# We use a deque and always take the *first* IMS_PER_BATCH items
# followed by *rotating* the deque so that we see fresh items
# each time. If the length of _perm is not divisible by
# IMS_PER_BATCH, then we end up wrapping around the permutation.
db_inds = [self._perm[i] for i in range(cfg.TRAIN.IMS_PER_BATCH)]
self._perm.rotate(-cfg.TRAIN.IMS_PER_BATCH)
self._cur += cfg.TRAIN.IMS_PER_BATCH
if self._cur >= len(self._perm):
self._shuffle_roidb_inds()
return db_inds
def get_output_names(self):
return self._output_names
def enqueue_blobs(self, gpu_id, blob_names, blobs):
"""Put a mini-batch on a BlobsQueue."""
assert len(blob_names) == len(blobs)
t = time.time()
dev = c2_utils.CudaDevice(gpu_id)
queue_name = 'gpu_{}/{}'.format(gpu_id, self._blobs_queue_name)
blob_names = ['gpu_{}/{}'.format(gpu_id, b) for b in blob_names]
for (blob_name, blob) in zip(blob_names, blobs):
workspace.FeedBlob(blob_name, blob, device_option=dev)
logger.debug(
'enqueue_blobs {}: workspace.FeedBlob: {}'.
format(gpu_id, time.time() - t)
)
t = time.time()
op = core.CreateOperator(
'SafeEnqueueBlobs', [queue_name] + blob_names,
blob_names + [queue_name + '_enqueue_status'],
device_option=dev
)
workspace.RunOperatorOnce(op)
logger.debug(
'enqueue_blobs {}: workspace.RunOperatorOnce: {}'.
format(gpu_id, time.time() - t)
)
def create_threads(self):
# Create mini-batch loader threads, each of which builds mini-batches
# and places them into a queue in CPU memory
self._workers = [
threading.Thread(target=self.minibatch_loader_thread)
for _ in range(self._num_loaders)
]
# Create one BlobsQueue per GPU
# (enqueue_blob_names are unscoped)
enqueue_blob_names = self.create_blobs_queues()
# Create one enqueuer thread per GPU
self._enqueuers = [
threading.Thread(
target=self.enqueue_blobs_thread,
args=(gpu_id, enqueue_blob_names)
) for gpu_id in range(self._num_gpus)
]
def start(self, prefill=False):
for w in self._workers + self._enqueuers:
w.setDaemon(True)
w.start()
if prefill:
logger.info('Pre-filling mini-batch queue...')
while not self._minibatch_queue.full():
logger.info(
' [{:d}/{:d}]'.format(
self._minibatch_queue.qsize(),
self._minibatch_queue.maxsize
)
)
time.sleep(0.1)
# Detect failure and shutdown
if self.coordinator.should_stop():
self.shutdown()
break
def has_stopped(self):
return self.coordinator.should_stop()
def shutdown(self):
self.coordinator.request_stop()
self.coordinator.wait_for_stop()
self.close_blobs_queues()
for w in self._workers + self._enqueuers:
w.join()
def create_blobs_queues(self):
"""Create one BlobsQueue for each GPU to hold mini-batches."""
for gpu_id in range(self._num_gpus):
with c2_utils.GpuNameScope(gpu_id):
workspace.RunOperatorOnce(
core.CreateOperator(
'CreateBlobsQueue', [], [self._blobs_queue_name],
num_blobs=len(self.get_output_names()),
capacity=self._blobs_queue_capacity
)
)
return self.create_enqueue_blobs()
def close_blobs_queues(self):
"""Close a BlobsQueue."""
for gpu_id in range(self._num_gpus):
with core.NameScope('gpu_{}'.format(gpu_id)):
workspace.RunOperatorOnce(
core.CreateOperator(
'CloseBlobsQueue', [self._blobs_queue_name], []
)
)
def create_enqueue_blobs(self):
blob_names = self.get_output_names()
enqueue_blob_names = [
'{}_enqueue_{}'.format(b, self._loader_id) for b in blob_names
]
for gpu_id in range(self._num_gpus):
with c2_utils.NamedCudaScope(gpu_id):
for blob in enqueue_blob_names:
workspace.CreateBlob(core.ScopedName(blob))
return enqueue_blob_names
def register_sigint_handler(self):
def signal_handler(signal, frame):
logger.info(
'SIGINT: Shutting down RoIDataLoader threads and exiting...'
)
self.shutdown()
signal.signal(signal.SIGINT, signal_handler)
|
RedisScheduler.py
|
import redis, json, multiprocessing, pytz, uuid, boto3, botocore
from datetime import datetime, timezone
import dateutil.parser
from dateutil.tz import *
class RedisScheduler:
def __init__(self, host='localhost', port=6379, path=None, db=0, password=None):
try:
if path:
self.redis_client = redis.StrictRedis(path)
else:
self.redis_client = redis.StrictRedis(host=host, port=port)
if password:
self.redis_client.auth(password)
if db:
self.redis_client.select(db)
print(' -- Redis Connection Success -- ')
except Exception as e:
print(' -- Redis Connection Failed -- ')
print(e)
def add_key(self, key, value, ttl=604800):
try:
key_added = self.redis_client.set(key, '', ex=ttl)
shadow_key_added = self.redis_client.set('_' + key, value)
except Exception as e:
print(e)
print(' -- Error while setting key -- ')
key_added = False
return key_added
def register_event(self, value, expiry_time):
response = False
try:
if int(expiry_time) == 0:
print('now event', value)
# sqs_response = self.boto3_client.send_message(
# QueueUrl=self.queue_url,
# MessageBody=json.dumps(value)
# )
# print(sqs_response)
else:
ttl = int(self.get_timedelta(expiry_time))
if ttl > 0:
key = 'emails_'+str(uuid.uuid1())
response = self.redis_client.set(key, "0", ex=ttl)
shadow_key_added = self.redis_client.set('_' + key, value)
# print(response)
except Exception as e:
print(e)
print(' -- Error while setting key -- ')
return response
def register_event_key(self, value, expiry_time, event_key='emails'):
response = False
try:
ttl = int(self.get_timedelta(expiry_time))
if ttl>0:
key = event_key+'_'+str(uuid.uuid1())
response = self.redis_client.set(key, "0", ex=ttl)
shadow_key_added = self.redis_client.set('_' + key, value)
print(response)
except Exception as e:
print(e)
print(' -- Error while setting key -- ')
return response
def modify_event(self, key, value, scheduled_time):
response = False
try:
ttl = int(self.get_timedelta(scheduled_time))
if ttl>0:
# Check for existing shadow key
check_redis_key = self.redis_client.get(key)
if check_redis_key:
redis_key = self.redis_client.set(key, value, scheduled_time)
shadow_key_added = self.redis_client.set('_' + key, value)
else:
if key.startswith("emails_"):
self.register_event(value, scheduled_time)
else:
self.register_event_key(value, scheduled_time, key)
except Exception as e:
print(e)
print(' -- Error while setting key -- ')
return response
def subscribe_event(self, subscribe_channel='__keyevent@0__:expired', handler='sqs'):
# print(subscribe_channel, handler)
try:
pubsub_client = self.redis_client.pubsub()
pubsub_client.subscribe(subscribe_channel)
for message in pubsub_client.listen():
expired_key = self.get_key(message['data'])
shadow_key = '_%s' % expired_key
try:
if shadow_key:
expired_key_value = self.redis_client.get(shadow_key)
if expired_key_value:
expired_key_value = json.dumps(expired_key_value.decode('utf-8'))
expired_key_json = json.loads(expired_key_value)
if expired_key_json:
if expired_key.startswith("emails_"):
self.send_to_sqs(expired_key_json)
elif expired_key.startswith("checkpoint_dependency_"):
self.send_to_redis_tasks(expired_key_json)
except Exception as e:
print(e)
if shadow_key:
self.redis_client.delete(shadow_key)
except Exception as e:
print(e)
def get_key(self, s):
string = s
try:
if isinstance(s, bytes):
string = s.decode('utf-8')
except Exception as e:
print(e)
print(' -- in Exception -- ')
return string
def start_listening(self, subscribe_channel='__keyevent@0__:expired', handler='sqs'):
print(' -- listener initiating -- ')
print(subscribe_channel, handler)
try:
# listener_service = multiprocessing.Process(target=self.subscribe_event, args=(subscribe_channel, handler,))
# listener_service.start()
self.subscribe_event(subscribe_channel, handler)
except Exception as e:
print(e)
print(' -- listener initiated -- ')
def get_timedelta(self, timestamp):
# current_time = datetime.now(pytz.timezone('UTC')).strftime('%Y-%m-%d %H:%M:%S%z')
current_time = datetime.now(tzutc())
# parsed_timestamp = datetime.strptime(''.join(timestamp.rsplit(':', 1)), '%Y-%m-%dT%H:%M:%S%z')
parsed_timestamp = dateutil.parser.parse(timestamp)
# parsed_timestamp_in_utc = datetime_obj.astimezone(tz=timezone.utc)
parsed_timestamp_in_utc = parsed_timestamp.astimezone(tzutc())
return (parsed_timestamp_in_utc-current_time).total_seconds()
def set_sqs_keys(self, access_key, secret_key, queue_name, region='ap-south-1'):
try:
self.boto3_client = boto3.client(
'sqs',
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
region_name=region
)
self.queue_url = self.boto3_client.get_queue_url(QueueName=queue_name)['QueueUrl']
except Exception as e:
print(e)
print('^^ Some Error in connection to aws sqs ^^')
def send_to_sqs(self, msg):
try:
response = self.boto3_client.send_message(
QueueUrl=self.queue_url,
MessageBody=json.dumps(msg)
)
# print(response)
# print(' -- Sent to SQS -- ')
except Exception as e:
print(e)
def send_to_redis_tasks(self, msg):
try:
channel_name = 'dependency_execute'
self.redis_client.publish(channel_name, json.dumps(msg))
print(' -- Sent to SQS -- ')
except Exception as e:
print(e)
|
test_StandardCpuBenchmark.py
|
# -*- coding: utf-8 -*-
import threading
from pip_benchmark_python.standardbenchmarks.StandardCpuBenchmark import StandardCpuBenchmark
class TestStandardCpuBenchmark:
benchmark = None
def callback(self, arg=None):
print('Is Done!')
def setup_method(self):
self.benchmark = StandardCpuBenchmark()
self.benchmark.set_up(None)
def teardown_method(self):
self.benchmark.tear_down(self.callback)
def test_execute(self):
threads = []
active_threads = threading.active_count()
for i in range(100):
threads.append(threading.Thread(
target=lambda: self.benchmark.execute(None),
))
threads[-1].start()
for th in threads: # waiting for all threads
th.join()
assert active_threads == threading.active_count()
|
VideoUtil.py
|
from subprocess import call
import threading
player = 'D:/PotPlayer/PotPlayerMini64.exe'
def play(link):
"""
调用potplayer播放视频
:param link: 视频链接
:return: None
"""
# 开启线程播放
t = threading.Thread(target=_play_, name='播放', args=(link,))
t.start()
pass
def _play_(link):
call([player, link])
pass
|
main.py
|
#!/usr/bin/env python2
"""A simple terminal using Google spreadsheets as the menu. Meant for
casual RPG use.
"""
import curses
from linewindows import create_hline_window, create_vline_window
from main_terminal import MainTerminal
from time import sleep
from random import randint
from google_credentials import username, password
from multiprocessing import Manager, Process, Pipe
manager = Manager()
# Joe holds all the variables for the threads
joe = manager.Namespace()
stdscr = curses.initscr()
curses.noecho()
curses.cbreak()
stdscr.keypad(1)
stdscr.nodelay(1)
curses.curs_set(0)
# max_ticker_length = window_size_x-3
import gspread
from google_credentials import username, password
gc = gspread.login(username, password)
def get_news(news_ticker_input_pipe, gc, max_news_items=3):
"""A simple function to obtain the news string and update it. """
current_news_list = gc.open('rpg_news').worksheet('current_news').get_all_values()
tmp_list = []
for i in range(max_news_items+1):
# Take the first 3 news items and the title from the first column
try:
tmp_list.append(current_news_list[i][0])
except IndexError:
pass
news_ticker_input_pipe.send_bytes(" ".join(tmp_list) + " ")
def gui_that_ticks(_joe):
"""Gui refreshing."""
news_ticker_output_pipe, news_ticker_input_pipe = Pipe()
menu_output_pipe, menu_input_pipe = Pipe()
get_news_process = None
# TODO: set up so that only one google connection is necessary
gc = gspread.login(username, password)
# Setting up window dimesions
window_size_x = 80
window_size_y = 24
clock_w = 8
clock_h = 1
news_ticker_w = window_size_x - 2 - clock_w
news_ticker_h = 1
clock_y = 1
clock_x = news_ticker_w + 1
news_ticker_y = 1
news_ticker_x = 1
main_term_x = 2
main_term_y = 3
main_term_w = window_size_x - 2*main_term_x + 1
main_term_h = window_size_y - main_term_y - 1
clock = curses.newwin(clock_h, clock_w, clock_y, clock_x,)
news_ticker = curses.newwin(news_ticker_h, news_ticker_w,
news_ticker_y, news_ticker_x,)
main_term = MainTerminal(main_term_h, main_term_w, main_term_y,
main_term_x,)
loading_news = " "*news_ticker_w
for i in range(5):
loading_news += "Loading latest news..." + " "*news_ticker_w
_joe.current_news = loading_news
iter = 0
# global current_news
news = _joe.current_news
previous_news = _joe.current_news
latest_news = _joe.current_news
lside_border = create_vline_window(0, 0, window_size_y)
top_border = create_hline_window(0, 0, window_size_x)
middle_border = create_hline_window(2, 0, window_size_x)
bottom_border = create_hline_window(window_size_y-1, 0, window_size_x)
rside_border = create_vline_window(0, window_size_x-1, window_size_y)
curses.doupdate()
# Add # before the clock.
clock.addch("#")
# This string should be huge. But never wider than 76 characters
# main_term_string = ""
# main_term.addstr(0, 0, main_term_string)
visible_menu_dict = {}
# main_term.parse_menu()
while True:
c = stdscr.getch()
if c == ord('q'):
main_term.kill_menu_ss_process()
curses.nocbreak()
stdscr.keypad(0)
curses.echo()
curses.endwin()
break
elif c == ord('h'):
main_term.parse_menu()
for i in range(1,10):
if c == ord(str(i)):
try:
main_term.parse_menu(
wks_title=main_term.curr_menu_dict['option_' + str(i)]['action']
)
except KeyError:
pass
main_term.redraw()
lside_border.vline(0, 0, "#", window_size_y)
lside_border.noutrefresh()
for border in [top_border, middle_border, bottom_border]:
border.hline(0,0,"#",window_size_x)
border.noutrefresh()
rside_border.vline(0, 0, "#", window_size_y)
rside_border.noutrefresh()
# TODO change this to 10 or whatever
if iter % 1 == 0:
# time_hour = str(randint(10,99))
# time_minute = str(randint(10,99))
# clock.addstr(0, 2, time_hour + ":" + time_minute)
clock.addstr(0, 2, str(news_ticker_output_pipe.poll()))
clock.noutrefresh()
if iter % 1 == 0:
# News ticker action
news = news[1:] + news[0]
news_ticker.addstr(0,1,news[:news_ticker_w-2])
news_ticker.noutrefresh()
if iter % 100 == 0:
if get_news_process == None or not get_news_process.is_alive():
get_news_process = Process(
target=get_news,
args=(
news_ticker_input_pipe,
gc,
)
)
get_news_process.start()
# This should always happen after at least one get_news_process
# has started
if iter % 10 == 0:
if (not get_news_process.is_alive()
and news_ticker_output_pipe.poll()):
latest_news = news_ticker_output_pipe.recv_bytes()
# latest_news = " ".join(latest_news)
if latest_news != previous_news:
news = latest_news
previous_news = latest_news
# 10 000 iterations means about 15 minutes
if iter == 9999:
iter = 0
curses.doupdate()
iter += 1
# sleep(0.1)
curses.napms(100)
print "Loading ODIN software..."
# The main menu
print "Connecting to ODIN..."
print "Connected."
gui_process = Process(target=gui_that_ticks, args=(joe,))
gui_process.start()
gui_process.join()
curses.nocbreak()
stdscr.keypad(0)
curses.echo()
curses.endwin()
|
refactor.py
|
# Part of the awpa package: https://github.com/habnabit/awpa
# See LICENSE for copyright.
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Refactoring framework.
Used as a main program, this can refactor any number of files and/or
recursively descend down directories. Imported as a module, this
provides infrastructure to write your own refactoring tool.
"""
from __future__ import with_statement
__author__ = "Guido van Rossum <guido@python.org>"
# Python imports
import os
import sys
import logging
import operator
import collections
import io
from itertools import chain
# Local imports
from .pgen2 import driver, tokenize, token
from .fixer_util import find_root
from . import pytree, pygram
from . import btm_matcher as bm
def get_all_fix_names(fixer_pkg, remove_prefix=True):
"""Return a sorted list of all available fix names in the given package."""
pkg = __import__(fixer_pkg, [], [], ["*"])
fixer_dir = os.path.dirname(pkg.__file__)
fix_names = []
for name in sorted(os.listdir(fixer_dir)):
if name.startswith("fix_") and name.endswith(".py"):
if remove_prefix:
name = name[4:]
fix_names.append(name[:-3])
return fix_names
class _EveryNode(Exception):
pass
def _get_head_types(pat):
""" Accepts a pytree Pattern Node and returns a set
of the pattern types which will match first. """
if isinstance(pat, (pytree.NodePattern, pytree.LeafPattern)):
# NodePatters must either have no type and no content
# or a type and content -- so they don't get any farther
# Always return leafs
if pat.type is None:
raise _EveryNode
return {pat.type}
if isinstance(pat, pytree.NegatedPattern):
if pat.content:
return _get_head_types(pat.content)
raise _EveryNode # Negated Patterns don't have a type
if isinstance(pat, pytree.WildcardPattern):
# Recurse on each node in content
r = set()
for p in pat.content:
for x in p:
r.update(_get_head_types(x))
return r
raise Exception("Oh no! I don't understand pattern %s" %(pat))
def _get_headnode_dict(fixer_list):
""" Accepts a list of fixers and returns a dictionary
of head node type --> fixer list. """
head_nodes = collections.defaultdict(list)
every = []
for fixer in fixer_list:
if fixer.pattern:
try:
heads = _get_head_types(fixer.pattern)
except _EveryNode:
every.append(fixer)
else:
for node_type in heads:
head_nodes[node_type].append(fixer)
else:
if fixer._accept_type is not None:
head_nodes[fixer._accept_type].append(fixer)
else:
every.append(fixer)
for node_type in chain(pygram.python_grammar.symbol2number.values(),
pygram.python_grammar.tokens):
head_nodes[node_type].extend(every)
return dict(head_nodes)
def get_fixers_from_package(pkg_name):
"""
Return the fully qualified names for fixers in the package pkg_name.
"""
return [pkg_name + "." + fix_name
for fix_name in get_all_fix_names(pkg_name, False)]
def _identity(obj):
return obj
if sys.version_info < (3, 0):
import codecs
_open_with_encoding = codecs.open
# codecs.open doesn't translate newlines sadly.
def _from_system_newlines(input):
return input.replace("\r\n", "\n")
def _to_system_newlines(input):
if os.linesep != "\n":
return input.replace("\n", os.linesep)
else:
return input
else:
_open_with_encoding = open
_from_system_newlines = _identity
_to_system_newlines = _identity
def _detect_future_features(source):
have_docstring = False
gen = tokenize.generate_tokens(io.StringIO(source).readline)
def advance():
tok = next(gen)
return tok[0], tok[1]
ignore = frozenset({token.NEWLINE, tokenize.NL, token.COMMENT})
features = set()
try:
while True:
tp, value = advance()
if tp in ignore:
continue
elif tp == token.STRING:
if have_docstring:
break
have_docstring = True
elif tp == token.NAME and value == "from":
tp, value = advance()
if tp != token.NAME or value != "__future__":
break
tp, value = advance()
if tp != token.NAME or value != "import":
break
tp, value = advance()
if tp == token.OP and value == "(":
tp, value = advance()
while tp == token.NAME:
features.add(value)
tp, value = advance()
if tp != token.OP or value != ",":
break
tp, value = advance()
else:
break
except StopIteration:
pass
return frozenset(features)
class FixerError(Exception):
"""A fixer could not be loaded."""
class RefactoringTool(object):
_default_options = {"print_function" : False,
"write_unchanged_files" : False}
CLASS_PREFIX = "Fix" # The prefix for fixer classes
FILE_PREFIX = "fix_" # The prefix for modules with a fixer within
def __init__(self, fixer_names, options=None, explicit=None):
"""Initializer.
Args:
fixer_names: a list of fixers to import
options: a dict with configuration.
explicit: a list of fixers to run even if they are explicit.
"""
self.fixers = fixer_names
self.explicit = explicit or []
self.options = self._default_options.copy()
if options is not None:
self.options.update(options)
if self.options["print_function"]:
self.grammar = pygram.python_grammar_no_print_statement
else:
self.grammar = pygram.python_grammar
# When this is True, the refactor*() methods will call write_file() for
# files processed even if they were not changed during refactoring. If
# and only if the refactor method's write parameter was True.
self.write_unchanged_files = self.options.get("write_unchanged_files")
self.errors = []
self.logger = logging.getLogger("RefactoringTool")
self.fixer_log = []
self.wrote = False
self.driver = driver.Driver(self.grammar,
convert=pytree.convert,
logger=self.logger)
self.pre_order, self.post_order = self.get_fixers()
self.files = [] # List of files that were or should be modified
self.BM = bm.BottomMatcher()
self.bmi_pre_order = [] # Bottom Matcher incompatible fixers
self.bmi_post_order = []
for fixer in chain(self.post_order, self.pre_order):
if fixer.BM_compatible:
self.BM.add_fixer(fixer)
# remove fixers that will be handled by the bottom-up
# matcher
elif fixer in self.pre_order:
self.bmi_pre_order.append(fixer)
elif fixer in self.post_order:
self.bmi_post_order.append(fixer)
self.bmi_pre_order_heads = _get_headnode_dict(self.bmi_pre_order)
self.bmi_post_order_heads = _get_headnode_dict(self.bmi_post_order)
def get_fixers(self):
"""Inspects the options to load the requested patterns and handlers.
Returns:
(pre_order, post_order), where pre_order is the list of fixers that
want a pre-order AST traversal, and post_order is the list that want
post-order traversal.
"""
pre_order_fixers = []
post_order_fixers = []
for fix_mod_path in self.fixers:
mod = __import__(fix_mod_path, {}, {}, ["*"])
fix_name = fix_mod_path.rsplit(".", 1)[-1]
if fix_name.startswith(self.FILE_PREFIX):
fix_name = fix_name[len(self.FILE_PREFIX):]
parts = fix_name.split("_")
class_name = self.CLASS_PREFIX + "".join([p.title() for p in parts])
try:
fix_class = getattr(mod, class_name)
except AttributeError:
raise FixerError("Can't find %s.%s" % (fix_name, class_name))
fixer = fix_class(self.options, self.fixer_log)
if fixer.explicit and self.explicit is not True and \
fix_mod_path not in self.explicit:
self.log_message("Skipping optional fixer: %s", fix_name)
continue
self.log_debug("Adding transformation: %s", fix_name)
if fixer.order == "pre":
pre_order_fixers.append(fixer)
elif fixer.order == "post":
post_order_fixers.append(fixer)
else:
raise FixerError("Illegal fixer order: %r" % fixer.order)
key_func = operator.attrgetter("run_order")
pre_order_fixers.sort(key=key_func)
post_order_fixers.sort(key=key_func)
return (pre_order_fixers, post_order_fixers)
def log_error(self, msg, *args, **kwds):
"""Called when an error occurs."""
raise
def log_message(self, msg, *args):
"""Hook to log a message."""
if args:
msg = msg % args
self.logger.info(msg)
def log_debug(self, msg, *args):
if args:
msg = msg % args
self.logger.debug(msg)
def print_output(self, old_text, new_text, filename, equal):
"""Called with the old version, new version, and filename of a
refactored file."""
pass
def refactor(self, items, write=False, doctests_only=False):
"""Refactor a list of files and directories."""
for dir_or_file in items:
if os.path.isdir(dir_or_file):
self.refactor_dir(dir_or_file, write, doctests_only)
else:
self.refactor_file(dir_or_file, write, doctests_only)
def refactor_dir(self, dir_name, write=False, doctests_only=False):
"""Descends down a directory and refactor every Python file found.
Python files are assumed to have a .py extension.
Files and subdirectories starting with '.' are skipped.
"""
py_ext = os.extsep + "py"
for dirpath, dirnames, filenames in os.walk(dir_name):
self.log_debug("Descending into %s", dirpath)
dirnames.sort()
filenames.sort()
for name in filenames:
if (not name.startswith(".") and
os.path.splitext(name)[1] == py_ext):
fullname = os.path.join(dirpath, name)
self.refactor_file(fullname, write, doctests_only)
# Modify dirnames in-place to remove subdirs with leading dots
dirnames[:] = [dn for dn in dirnames if not dn.startswith(".")]
def _read_python_source(self, filename):
"""
Do our best to decode a Python source file correctly.
"""
try:
f = open(filename, "rb")
except OSError as err:
self.log_error("Can't open %s: %s", filename, err)
return None, None
try:
encoding = tokenize.detect_encoding(f.readline)[0]
finally:
f.close()
with _open_with_encoding(filename, "r", encoding=encoding) as f:
return _from_system_newlines(f.read()), encoding
def refactor_file(self, filename, write=False, doctests_only=False):
"""Refactors a file."""
input, encoding = self._read_python_source(filename)
if input is None:
# Reading the file failed.
return
input += "\n" # Silence certain parse errors
if doctests_only:
self.log_debug("Refactoring doctests in %s", filename)
output = self.refactor_docstring(input, filename)
if self.write_unchanged_files or output != input:
self.processed_file(output, filename, input, write, encoding)
else:
self.log_debug("No doctest changes in %s", filename)
else:
tree = self.refactor_string(input, filename)
if self.write_unchanged_files or (tree and tree.was_changed):
# The [:-1] is to take off the \n we added earlier
self.processed_file(str(tree)[:-1], filename,
write=write, encoding=encoding)
else:
self.log_debug("No changes in %s", filename)
def refactor_string(self, data, name):
"""Refactor a given input string.
Args:
data: a string holding the code to be refactored.
name: a human-readable name for use in error/log messages.
Returns:
An AST corresponding to the refactored input stream; None if
there were errors during the parse.
"""
features = _detect_future_features(data)
if "print_function" in features:
self.driver.grammar = pygram.python_grammar_no_print_statement
try:
tree = self.driver.parse_string(data)
except Exception as err:
self.log_error("Can't parse %s: %s: %s",
name, err.__class__.__name__, err)
return
finally:
self.driver.grammar = self.grammar
tree.future_features = features
self.log_debug("Refactoring %s", name)
self.refactor_tree(tree, name)
return tree
def refactor_stdin(self, doctests_only=False):
input = sys.stdin.read()
if doctests_only:
self.log_debug("Refactoring doctests in stdin")
output = self.refactor_docstring(input, "<stdin>")
if self.write_unchanged_files or output != input:
self.processed_file(output, "<stdin>", input)
else:
self.log_debug("No doctest changes in stdin")
else:
tree = self.refactor_string(input, "<stdin>")
if self.write_unchanged_files or (tree and tree.was_changed):
self.processed_file(str(tree), "<stdin>", input)
else:
self.log_debug("No changes in stdin")
def refactor_tree(self, tree, name):
"""Refactors a parse tree (modifying the tree in place).
For compatible patterns the bottom matcher module is
used. Otherwise the tree is traversed node-to-node for
matches.
Args:
tree: a pytree.Node instance representing the root of the tree
to be refactored.
name: a human-readable name for this tree.
Returns:
True if the tree was modified, False otherwise.
"""
for fixer in chain(self.pre_order, self.post_order):
fixer.start_tree(tree, name)
#use traditional matching for the incompatible fixers
self.traverse_by(self.bmi_pre_order_heads, tree.pre_order())
self.traverse_by(self.bmi_post_order_heads, tree.post_order())
# obtain a set of candidate nodes
match_set = self.BM.run(tree.leaves())
while any(match_set.values()):
for fixer in self.BM.fixers:
if fixer in match_set and match_set[fixer]:
#sort by depth; apply fixers from bottom(of the AST) to top
match_set[fixer].sort(key=pytree.Base.depth, reverse=True)
if fixer.keep_line_order:
#some fixers(eg fix_imports) must be applied
#with the original file's line order
match_set[fixer].sort(key=pytree.Base.get_lineno)
for node in list(match_set[fixer]):
if node in match_set[fixer]:
match_set[fixer].remove(node)
try:
find_root(node)
except ValueError:
# this node has been cut off from a
# previous transformation ; skip
continue
if node.fixers_applied and fixer in node.fixers_applied:
# do not apply the same fixer again
continue
results = fixer.match(node)
if results:
new = fixer.transform(node, results)
if new is not None:
node.replace(new)
#new.fixers_applied.append(fixer)
for node in new.post_order():
# do not apply the fixer again to
# this or any subnode
if not node.fixers_applied:
node.fixers_applied = []
node.fixers_applied.append(fixer)
# update the original match set for
# the added code
new_matches = self.BM.run(new.leaves())
for fxr in new_matches:
if not fxr in match_set:
match_set[fxr]=[]
match_set[fxr].extend(new_matches[fxr])
for fixer in chain(self.pre_order, self.post_order):
fixer.finish_tree(tree, name)
return tree.was_changed
def traverse_by(self, fixers, traversal):
"""Traverse an AST, applying a set of fixers to each node.
This is a helper method for refactor_tree().
Args:
fixers: a list of fixer instances.
traversal: a generator that yields AST nodes.
Returns:
None
"""
if not fixers:
return
for node in traversal:
for fixer in fixers[node.type]:
results = fixer.match(node)
if results:
new = fixer.transform(node, results)
if new is not None:
node.replace(new)
node = new
def processed_file(self, new_text, filename, old_text=None, write=False,
encoding=None):
"""
Called when a file has been refactored and there may be changes.
"""
self.files.append(filename)
if old_text is None:
old_text = self._read_python_source(filename)[0]
if old_text is None:
return
equal = old_text == new_text
self.print_output(old_text, new_text, filename, equal)
if equal:
self.log_debug("No changes to %s", filename)
if not self.write_unchanged_files:
return
if write:
self.write_file(new_text, filename, old_text, encoding)
else:
self.log_debug("Not writing changes to %s", filename)
def write_file(self, new_text, filename, old_text, encoding=None):
"""Writes a string to a file.
It first shows a unified diff between the old text and the new text, and
then rewrites the file; the latter is only done if the write option is
set.
"""
try:
f = _open_with_encoding(filename, "w", encoding=encoding)
except OSError as err:
self.log_error("Can't create %s: %s", filename, err)
return
try:
f.write(_to_system_newlines(new_text))
except OSError as err:
self.log_error("Can't write %s: %s", filename, err)
finally:
f.close()
self.log_debug("Wrote changes to %s", filename)
self.wrote = True
PS1 = ">>> "
PS2 = "... "
def refactor_docstring(self, input, filename):
"""Refactors a docstring, looking for doctests.
This returns a modified version of the input string. It looks
for doctests, which start with a ">>>" prompt, and may be
continued with "..." prompts, as long as the "..." is indented
the same as the ">>>".
(Unfortunately we can't use the doctest module's parser,
since, like most parsers, it is not geared towards preserving
the original source.)
"""
result = []
block = None
block_lineno = None
indent = None
lineno = 0
for line in input.splitlines(keepends=True):
lineno += 1
if line.lstrip().startswith(self.PS1):
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
block_lineno = lineno
block = [line]
i = line.find(self.PS1)
indent = line[:i]
elif (indent is not None and
(line.startswith(indent + self.PS2) or
line == indent + self.PS2.rstrip() + "\n")):
block.append(line)
else:
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
block = None
indent = None
result.append(line)
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
return "".join(result)
def refactor_doctest(self, block, lineno, indent, filename):
"""Refactors one doctest.
A doctest is given as a block of lines, the first of which starts
with ">>>" (possibly indented), while the remaining lines start
with "..." (identically indented).
"""
try:
tree = self.parse_block(block, lineno, indent)
except Exception as err:
if self.logger.isEnabledFor(logging.DEBUG):
for line in block:
self.log_debug("Source: %s", line.rstrip("\n"))
self.log_error("Can't parse docstring in %s line %s: %s: %s",
filename, lineno, err.__class__.__name__, err)
return block
if self.refactor_tree(tree, filename):
new = str(tree).splitlines(keepends=True)
# Undo the adjustment of the line numbers in wrap_toks() below.
clipped, new = new[:lineno-1], new[lineno-1:]
assert clipped == ["\n"] * (lineno-1), clipped
if not new[-1].endswith("\n"):
new[-1] += "\n"
block = [indent + self.PS1 + new.pop(0)]
if new:
block += [indent + self.PS2 + line for line in new]
return block
def summarize(self):
if self.wrote:
were = "were"
else:
were = "need to be"
if not self.files:
self.log_message("No files %s modified.", were)
else:
self.log_message("Files that %s modified:", were)
for file in self.files:
self.log_message(file)
if self.fixer_log:
self.log_message("Warnings/messages while refactoring:")
for message in self.fixer_log:
self.log_message(message)
if self.errors:
if len(self.errors) == 1:
self.log_message("There was 1 error:")
else:
self.log_message("There were %d errors:", len(self.errors))
for msg, args, kwds in self.errors:
self.log_message(msg, *args, **kwds)
def parse_block(self, block, lineno, indent):
"""Parses a block into a tree.
This is necessary to get correct line number / offset information
in the parser diagnostics and embedded into the parse tree.
"""
tree = self.driver.parse_tokens(self.wrap_toks(block, lineno, indent))
tree.future_features = frozenset()
return tree
def wrap_toks(self, block, lineno, indent):
"""Wraps a tokenize stream to systematically modify start/end."""
tokens = tokenize.generate_tokens(self.gen_lines(block, indent).__next__)
for type, value, (line0, col0), (line1, col1), line_text in tokens:
line0 += lineno - 1
line1 += lineno - 1
# Don't bother updating the columns; this is too complicated
# since line_text would also have to be updated and it would
# still break for tokens spanning lines. Let the user guess
# that the column numbers for doctests are relative to the
# end of the prompt string (PS1 or PS2).
yield type, value, (line0, col0), (line1, col1), line_text
def gen_lines(self, block, indent):
"""Generates lines as expected by tokenize from a list of lines.
This strips the first len(indent + self.PS1) characters off each line.
"""
prefix1 = indent + self.PS1
prefix2 = indent + self.PS2
prefix = prefix1
for line in block:
if line.startswith(prefix):
yield line[len(prefix):]
elif line == prefix.rstrip() + "\n":
yield "\n"
else:
raise AssertionError("line=%r, prefix=%r" % (line, prefix))
prefix = prefix2
while True:
yield ""
class MultiprocessingUnsupported(Exception):
pass
class MultiprocessRefactoringTool(RefactoringTool):
def __init__(self, *args, **kwargs):
super(MultiprocessRefactoringTool, self).__init__(*args, **kwargs)
self.queue = None
self.output_lock = None
def refactor(self, items, write=False, doctests_only=False,
num_processes=1):
if num_processes == 1:
return super(MultiprocessRefactoringTool, self).refactor(
items, write, doctests_only)
try:
import multiprocessing
except ImportError:
raise MultiprocessingUnsupported
if self.queue is not None:
raise RuntimeError("already doing multiple processes")
self.queue = multiprocessing.JoinableQueue()
self.output_lock = multiprocessing.Lock()
processes = [multiprocessing.Process(target=self._child)
for i in range(num_processes)]
try:
for p in processes:
p.start()
super(MultiprocessRefactoringTool, self).refactor(items, write,
doctests_only)
finally:
self.queue.join()
for i in range(num_processes):
self.queue.put(None)
for p in processes:
if p.is_alive():
p.join()
self.queue = None
def _child(self):
task = self.queue.get()
while task is not None:
args, kwargs = task
try:
super(MultiprocessRefactoringTool, self).refactor_file(
*args, **kwargs)
finally:
self.queue.task_done()
task = self.queue.get()
def refactor_file(self, *args, **kwargs):
if self.queue is not None:
self.queue.put((args, kwargs))
else:
return super(MultiprocessRefactoringTool, self).refactor_file(
*args, **kwargs)
|
joy_multi_xbox360.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ##
# @brief [py example simple] motion basic test for doosan robot
# @author Kab Kyoum Kim (kabkyoum.kim@doosan.com)
import rospy
import os
import threading, time
import sys
from sensor_msgs.msg import Joy
sys.dont_write_bytecode = True
sys.path.append( os.path.abspath(os.path.join(os.path.dirname(__file__),"../../../../common/imp")) ) # get import path : DSR_ROBOT.py
from DR_tcp_client import *
# for single robot
ROBOT_ID = "dsr01"
ROBOT_MODEL = "m1013"
import DR_init
DR_init.__dsr__id = ROBOT_ID
DR_init.__dsr__model = ROBOT_MODEL
from DSR_ROBOT import *
r = CDsrRobot(ROBOT_ID, ROBOT_MODEL)
m_stop_watch_time = 30 #sec
m_joyAnalogFlag = False
m_xyCompareFlag = False
m_joyButtonFlag = False
m_joyJogFlag = 0
m_joyJogVel = 0.0
g_sock = client_socket_open("192.168.137.2", 10004)
print("stop_watch server connect O.K!")
def shutdown():
print("shutdown time!")
print("shutdown time!")
print("shutdown time!")
pub_stop.publish(stop_mode=STOP_TYPE_QUICK)
return 0
def msgRobotState_cb(msg):
msgRobotState_cb.count += 1
if (0==(msgRobotState_cb.count % 100)):
rospy.loginfo("________ ROBOT STATUS ________")
print(" robot_state : %d" % (msg.robot_state))
print(" robot_state_str : %s" % (msg.robot_state_str))
print(" current_posj : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.current_posj[0],msg.current_posj[1],msg.current_posj[2],msg.current_posj[3],msg.current_posj[4],msg.current_posj[5]))
msgRobotState_cb.count = 0
def thread_subscriber():
rospy.Subscriber('/'+ROBOT_ID +ROBOT_MODEL+'/state', RobotState, msgRobotState_cb)
rospy.spin()
#rospy.spinner(2)
def thread_stop_watch():
print("thread_stop_watch running...")
while 1:
res, rx_data = client_socket_read(g_sock) #server로부터 수신 대기
print("XXXXXXXXXXXXXXXXXXXXX")
print("XXXXXXXXXXXXXXXXXXXXX")
print("XXXXXXXXXXXXXXXXXXXXX")
print("res={0}, rx_data ={1}".format(res, rx_data))
rev_str = str(rx_data).encode("utf-8")
if rev_str == "#TIMEOUT":
print("Time of game is over!!!!")
elif rev_str == "#STOP":
print("The game is stopped!!!!")
else:
print("unknown data!!!")
def joy_cb(msg):
global m_joyAnalogFlag
global m_xyCompareFlag
global m_joyButtonFlag
global m_joyJogFlag
global m_joyJogVel
m_joyJogVel = 60
targetPos = [0, 0, 90, 0, 90, 0]
hommingPos = [0, 0, 0, 0, 0, 0]
jog_target = [0, 0, 0, 0, 0, 0]
for i in range(0,8):
#print("msg.buttons[{}] = {}".format(i,msg.buttons[i]) )
print("msg.axes[{}] = {}".format(i,msg.axes[i]) )
print("\n")
####
# go home
if msg.buttons[7] == 1 and msg.buttons[6] == 1:
r.movej(targetPos, 50, 50)
#----- START stop_watch ---------------------------------
client_socket_write(g_sock, b'#START')
#--------------------------------------------------------
elif msg.buttons[8] == 1:
#----- STOP stop_watch ----------------------------------
client_socket_write(g_sock, b'#STOP')
#--------------------------------------------------------
r.movej(hommingPos, 50, 50)
if msg.axes[4] != 0 or msg.axes[0] != 0 or msg.axes[1] != 0:
m_joyAnalogFlag = True
else:
m_joyAnalogFlag = False
if msg.axes[1] != 0 or msg.axes[0] or 0:
if abs(msg.axes[1]) > abs(msg.axes[0]):
m_xyCompareFlag = False
else:
m_xyCompareFlag = True
if msg.axes[6] != 0 or msg.axes[7] != 0:
m_joyButtonFlag = True
else:
m_joyButtonFlag = False
if m_joyJogFlag == -1 and not m_joyAnalogFlag and m_joyButtonFlag:
print("1111111")
if msg.axes[6] == 1:
m_joyJogFlag = JOG_AXIS_TASK_Y
m_joyJogVel = -60
if msg.axes[6] == -1:
m_joyJogFlag = JOG_AXIS_TASK_Y
m_joyJogVel = 60
if msg.axes[7] == 1:
m_joyJogFlag = JOG_AXIS_TASK_X
m_joyJogVel = 60
if msg.axes[7] == -1:
m_joyJogFlag = JOG_AXIS_TASK_X
m_joyJogVel = -60
#r.jog(m_joyJogFlag, MOVE_REFERENCE_TOOL, m_joyJogVel)
r.jog_multi([1,1,0,0,0,0], MOVE_REFERENCE_BASE, m_joyJogVel)
#elif m_joyAnalogFlag and m_joyJogFlag == -1 and not m_joyButtonFlag:
elif m_joyAnalogFlag and not m_joyButtonFlag:
print("22222222")
if msg.axes[4] > 0:
#m_joyJogFlag = JOG_AXIS_TASK_Z
jog_target[2] = 1
if msg.axes[4] < 0:
#m_joyJogFlag = JOG_AXIS_TASK_Z
jog_target[2] = -1
m_xyCompareFlag = 0
if msg.axes[1] > 0 and m_xyCompareFlag == 0:
#m_joyJogFlag = JOG_AXIS_TASK_X
jog_target[0] = -1*msg.axes[1] #-1
if msg.axes[1] < 0 and m_xyCompareFlag == 0:
#m_joyJogFlag = JOG_AXIS_TASK_X
jog_target[0] = -1*msg.axes[1] #1
m_xyCompareFlag = 1
if msg.axes[0] > 0 and m_xyCompareFlag == 1:
#m_joyJogFlag = JOG_AXIS_TASK_Y
jog_target[1] = -1*msg.axes[0] #-1
if msg.axes[0] < 0 and m_xyCompareFlag == 1:
#m_joyJogFlag = JOG_AXIS_TASK_Y
jog_target[1] = -1*msg.axes[0] #1
print(">>>>>>>>>>>>>> jog_target = {}".format(jog_target))
#r.jog(m_joyJogFlag, MOVE_REFERENCE_TOOL, m_joyJogVel)
r.jog_multi(jog_target, MOVE_REFERENCE_BASE, m_joyJogVel)
else:
print("33333333")
if not m_joyAnalogFlag and not m_joyButtonFlag:
rospy.loginfo("jog stop")
#r.jog(m_joyJogFlag, MOVE_REFERENCE_TOOL, 0)
r.jog_multi([0,0,0,0,0,0], MOVE_REFERENCE_BASE, 0)
m_joyJogFlag = -1
if __name__ == "__main__":
rospy.init_node('joy_xbox360_py')
rospy.on_shutdown(shutdown)
t1 = threading.Thread(target=thread_stop_watch)
t1.daemon = True
t1.start()
pub_stop = rospy.Publisher('/'+ROBOT_ID +ROBOT_MODEL+'/stop', RobotStop, queue_size=10)
sub_joy = rospy.Subscriber("joy", Joy, joy_cb)
while not rospy.is_shutdown():
pass
client_socket_close(g_sock)
print('good bye!')
|
dispatch.py
|
"""
File : dispatch.py
Author : ian
Created : 04-21-2017
Last Modified By : ian
Last Modified On : 04-21-2017
***********************************************************************
The MIT License (MIT)
Copyright © 2017 Ian Cooper <ian_hammond_cooper@yahoo.co.uk>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the “Software”), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
***********************************************************************
"""
import logging
import time
from enum import Enum
from multiprocessing import Event, Process
from threading import Thread
from typing import Callable, Dict
from brightside.channels import Channel
from brightside.command_processor import CommandProcessor, Request
from brightside.connection import Connection
from brightside.exceptions import ConfigurationException, MessagingException
from brightside.message_factory import create_quit_message
from brightside.message_pump import MessagePump
from brightside.messaging import BrightsideConsumerConfiguration, BrightsideConsumer, BrightsideMessage
class Performer:
def __init__(self,
channel_name: str,
connection: Connection,
consumer_configuration: BrightsideConsumerConfiguration,
consumer_factory: Callable[[Connection, BrightsideConsumerConfiguration, logging.Logger], BrightsideConsumer],
command_processor_factory: Callable[[str], CommandProcessor],
mapper_func: Callable[[BrightsideMessage], Request],
logger: logging.Logger=None
) -> None:
"""
Each Performer abstracts a process running a message pump.
That process is forked from the parent, as we cannot guarantee a message pump is only I/O bound and thus will
not scale because of the GIL.
The Performer is how the supervisor (the dispatcher) tracks the workers it has created
The Performer needs:
:param channel_name: The name of the channel we want to create a sub-process for
:param connection: The connection to the broker
:param consumer_factory: We need a user supplied callback to provide us an instance of the concumer for
the broker we are using. Arame? Something else?
:param command_processor_factory: We need a user supplied callback to create a commandprocessor with
subscribers, policies, outgoing tasks queues etc.
:param mapper_func: We need a user supplied callback to map on the wire messages to requests
"""
# TODO: The paramater needs to be a connection, not an AramaConnection as we can't decide to create an Arame Consumer
# here. Where do we make that choice?
self._channel_name = channel_name
self._connection = connection
self._consumer_configuration = consumer_configuration
self._consumer_factory = consumer_factory
self._command_processor_factory = command_processor_factory
self._mapper_func = mapper_func
self._logger = logger or logging.getLogger(__name__)
def stop(self) -> None:
self._consumer_configuration.pipeline.put(create_quit_message())
def run(self, started_event: Event) -> Process:
p = Process(target=_sub_process_main, args=(
started_event,
self._channel_name,
self._connection,
self._consumer_configuration,
self._consumer_factory,
self._command_processor_factory,
self._mapper_func))
self._logger.debug("Starting worker process for channel: %s on exchange %s on server %s",
self._channel_name, self._connection.exchange, self._connection.amqp_uri)
p.start()
started_event.wait(timeout=1)
return p
def _sub_process_main(started_event: Event,
channel_name: str,
connection: Connection,
consumer_configuration: BrightsideConsumerConfiguration,
consumer_factory: Callable[[Connection, BrightsideConsumerConfiguration, logging.Logger], BrightsideConsumer],
command_processor_factory: Callable[[str], CommandProcessor],
mapper_func: Callable[[BrightsideMessage], Request]) -> None:
"""
This is the main method for the sub=process, everything we need to create the message pump and
channel it needs to be passed in as parameters that can be pickled as when we run they will be serialized
into this process. The data should be value types, not reference types as we will receive a copy of the original.
Inter-process communication is signalled by the event - to indicate startup - and the pipeline to facilitate a
sentinel or stop message
:param started_event: Used by the sub-process to signal that it is ready
:param channel_name: The name we want to give the channel to the broker for identification
:param connection: The 'broker' connection
:param consumer_configuration: How to configure our consumer of messages from the channel
:param consumer_factory: Callback to create the consumer. User code as we don't know what consumer library they
want to use. Arame? Something else?
:param command_processor_factory: Callback to register subscribers, policies, and task queues then build command
processor. User code that provides us with their requests and handlers
:param mapper_func: We need to map between messages on the wire and our handlers
:return:
"""
logger = logging.getLogger(__name__)
consumer = consumer_factory(connection, consumer_configuration, logger)
channel = Channel(name=channel_name, consumer=consumer, pipeline=consumer_configuration.pipeline)
# TODO: Fix defaults that need passed in config values
command_processor = command_processor_factory(channel_name)
message_pump = MessagePump(command_processor=command_processor, channel=channel, mapper_func=mapper_func,
timeout=500, unacceptable_message_limit=None, requeue_count=None)
logger.debug("Starting the message pump for %s", channel_name)
message_pump.run(started_event)
class ConsumerConfiguration:
def __init__(self,
connection: Connection,
consumer: BrightsideConsumerConfiguration,
consumer_factory: Callable[[Connection, BrightsideConsumerConfiguration, logging.Logger], BrightsideConsumer],
command_processor_factory: Callable[[str], CommandProcessor],
mapper_func: Callable[[BrightsideMessage], Request]) -> None:
"""
The configuration parameters for one consumer - can create one or more performers from this, each of which is
a message pump reading froma queue
:param connection: The connection to the broker
:param consumer: The consumer we want to create (routing key, queue etc)
:param consumer_factory: A factory to create a consumer to read from a broker, a given implementation i.e. arame
the command processor factory creates a command procesoor configured for a pipeline
:param mapper_func: Maps between messages on the queue and requests (commnands/events)
"""
self._connection = connection
self._consumer = consumer
self._consumer_factory = consumer_factory
self._command_processor_factory = command_processor_factory
self._mapper_func = mapper_func
@property
def connection(self) -> Connection:
return self._connection
@property
def brightside_configuration(self) -> BrightsideConsumerConfiguration:
return self._consumer
@property
def consumer_factory(self) -> Callable[[Connection, BrightsideConsumerConfiguration, logging.Logger], BrightsideConsumer]:
return self._consumer_factory
@property
def command_processor_factory(self):
return self._command_processor_factory
@property
def mapper_func(self) -> Callable[[BrightsideMessage], Request]:
return self._mapper_func
class DispatcherState(Enum):
ds_awaiting = 0,
ds_notready = 1,
ds_running = 2,
ds_stopped = 3,
ds_stopping = 4
class Dispatcher:
"""
The dispatcher orchestrates the creation of consumers, where a consumer is the sub-process that runs a message pump
to consumer messages from a given channel and dispatch to handlers. The dispatcher can start more than one performer
for a given channel.
The dispatcher also orchestrates the shutdown of consumers. It does this by posting a stop message into each running
consumers queue, thus allowing the current handler to run to completion but killing the consumer before it can
consume another work item from the queue.
As such the dispatcher tracks consumer instances.
In addition, as we must pass a factory method to the sub-process that creates the command processor for that channel
i.e. handler and policy registration, outgoing queues, the Dispatcher also acts a registry of those factory methods
for individual channels.
THe dispatcher uses a thread to 'stay running' until end is called. This means that receive is non-blocking. The
supervisor thread yields regularly to avoid spinning the CPU. This means there can be a delay between signalling to
end and the shutdown beginning.
Shutdown will finish work in progress, as it inserts a quit message in the queue that gets consumerd 'next'
"""
def __init__(self, consumers: Dict[str, ConsumerConfiguration]) -> None:
self._state = DispatcherState.ds_notready
self._consumers = consumers
self._performers = {k: Performer(
k,
v.connection,
v.brightside_configuration,
v.consumer_factory,
v.command_processor_factory,
v.mapper_func)
for k, v in self._consumers.items()}
self._running_performers = {}
self._supervisor = None
self._state = DispatcherState.ds_awaiting
@property
def state(self):
return self._state
def receive(self):
def _receive(dispatcher: Dispatcher, initialized: Event) -> None:
for k, v in self._performers.items():
event = Event()
dispatcher._running_performers[k] = v.run(event)
event.wait(3) # TODO: Do we want to configure this polling interval?
initialized.set()
while self._state == DispatcherState.ds_running:
time.sleep(5) # yield to avoid spinning, between checking for changes to state
if self._state == DispatcherState.ds_awaiting:
initialized = Event()
self._supervisor = Thread(target=_receive, args=(self, initialized))
initialized.wait(5) # TODO: Should this be number of performs and configured with related?
self._state = DispatcherState.ds_running
self._supervisor.start()
def end(self):
if self._state == DispatcherState.ds_running:
for channel, process in list(self._running_performers.items()):
self._performers[channel].stop()
process.join(10) # TODO: We really want to make this configurable
self._state = DispatcherState.ds_stopping
self._supervisor.join(5)
self._running_performers.clear()
self._supervisor = None
self._state = DispatcherState.ds_stopped
# Do we want to determine if any processes have failed to complete Within the time frame
def open(self, consumer_name: str) -> None:
# TODO: Build then refactor with receive
# Find the consumer
if consumer_name not in self._consumers:
raise ConfigurationException("The consumer {} could not be found, did you register it?".format(consumer_name))
consumer = self._consumers[consumer_name]
performer = Performer(consumer_name,
consumer.connection,
consumer.brightside_configuration,
consumer.consumer_factory,
consumer.command_processor_factory,
consumer.mapper_func)
self._performers[consumer_name] = performer
# if we have a supervisor thread
if self._state == DispatcherState.ds_running:
# start and add to items monitored by supervisor (running performers)
pass
# else
elif self._state == DispatcherState.ds_stopped:
# start the supervisor with the single consumer
self._state = DispatcherState.ds_awaiting
self.receive()
else:
raise MessagingException("Dispatcher in a un-recognised state to open new connection; state was {}", self._state)
|
runserver.py
|
from django.core.management.commands.runserver import Command as BaseCommand
from atlassian_connect_django.addon import JiraAddon, ConfluenceAddon
import time, threading
class Command(BaseCommand):
def get_handler(self, *args, **options):
handler = super(Command, self).get_handler(*args, **options)
def register_addon():
time.sleep(0.1)
jira_addon = JiraAddon()
confluence_addon = ConfluenceAddon()
jira_addon.register(port=self.port)
confluence_addon.register(port=self.port)
th = threading.Thread(target=register_addon, name='RegisterAddon')
th.start()
return handler
|
training.py
|
from __future__ import print_function
from __future__ import absolute_import
import warnings
import copy
import time
import numpy as np
import multiprocessing
import threading
import six
try:
import queue
except ImportError:
import Queue as queue
from .topology import Container
from .. import backend as K
from .. import optimizers
from .. import objectives
from .. import metrics as metrics_module
from ..utils.generic_utils import Progbar
from .. import callbacks as cbks
def standardize_input_data(data, names, shapes=None,
check_batch_dim=True,
exception_prefix=''):
'''Users may pass data as a list of arrays, dictionary of arrays,
or as a single array. We normalize this to an ordered list of
arrays (same order as `names`), while checking that the provided
arrays have shapes that match the network's expectations.
'''
if type(data) is dict:
arrays = []
for name in names:
if name not in data:
raise Exception('No data provided for "' +
name + '". Need data for each key in: ' +
str(data.keys()))
arrays.append(data[name])
elif type(data) is list:
if len(data) != len(names):
if len(data) > 0 and hasattr(data[0], 'shape'):
raise Exception('Error when checking ' + exception_prefix +
': the list of Numpy arrays '
'that you are passing to your model '
'is not the size the model expected. '
'Expected to see ' + str(len(names)) +
' arrays but instead got '
'the following list of ' + str(len(data)) +
' arrays: ' + str(data)[:200] +
'...')
else:
if len(names) == 1:
data = [np.asarray(data)]
else:
raise Exception('Error when checking ' + exception_prefix +
': you are passing a list as '
'input to your model, '
'but the model expects '
'a list of ' + str(len(names)) +
' Numpy arrays instead. '
'The list you passed was: ' +
str(data)[:200])
arrays = data
else:
if not hasattr(data, 'shape'):
raise Exception('Error when checking ' + exception_prefix +
': data should be a Numpy array, '
'or list/dict of Numpy arrays. '
'Found: ' + str(data)[:200] + '...')
if len(names) != 1:
# case: model expects multiple inputs but only received
# a single Numpy array
raise Exception('The model expects ' + str(len(names)) +
' input arrays, but only received one array. '
'Found: array with shape ' + str(data.shape))
arrays = [data]
# make arrays at least 2D
for i in range(len(names)):
array = arrays[i]
if len(array.shape) == 1:
array = np.expand_dims(array, 1)
arrays[i] = array
# check shapes compatibility
if shapes:
for i in range(len(names)):
if shapes[i] is None:
continue
array = arrays[i]
if len(array.shape) != len(shapes[i]):
raise Exception('Error when checking ' + exception_prefix +
': expected ' + names[i] +
' to have ' + str(len(shapes[i])) +
' dimensions, but got array with shape ' +
str(array.shape))
for j, (dim, ref_dim) in enumerate(zip(array.shape, shapes[i])):
if not j and not check_batch_dim:
# skip the first axis
continue
if ref_dim:
if ref_dim != dim:
raise Exception('Error when checking ' + exception_prefix +
': expected ' + names[i] +
' to have shape ' + str(shapes[i]) +
' but got array with shape ' +
str(array.shape))
return arrays
def standardize_sample_or_class_weights(x_weight, output_names, weight_type):
if x_weight is None or len(x_weight) == 0:
return [None for _ in output_names]
if len(output_names) == 1:
if type(x_weight) is list and len(x_weight) == 1:
return x_weight
if type(x_weight) is dict and output_names[0] in x_weight:
return [x_weight[output_names[0]]]
else:
return [x_weight]
if type(x_weight) is list:
if len(x_weight) != len(output_names):
raise Exception('Provided `' + weight_type + '` was a list of ' +
str(len(x_weight)) +
' elements, but the model has ' +
str(len(output_names)) + ' outputs. '
'You should provide one `' + weight_type + '`'
'array per model output.')
return x_weight
if type(x_weight) is dict:
x_weights = []
for name in output_names:
x_weights.append(x_weight.get(name))
return x_weights
else:
raise Exception('The model has multiple outputs, so `' +
weight_type + '` '
'should be either a list of a dict. '
'Provided `' + weight_type +
'` type not understood: ' +
str(x_weight))
def standardize_class_weights(class_weight, output_names):
return standardize_sample_or_class_weights(class_weight,
output_names,
'class_weight')
def standardize_sample_weights(sample_weight, output_names):
return standardize_sample_or_class_weights(sample_weight,
output_names,
'sample_weight')
def check_array_lengths(X, Y, W):
x_lengths = [x.shape[0] for x in X]
y_lengths = [y.shape[0] for y in Y]
w_lengths = [w.shape[0] for w in W]
set_x = set(x_lengths)
if len(set_x) != 1:
raise Exception('All input arrays (x) should have '
'the same number of samples.')
set_y = set(y_lengths)
if len(set_y) != 1:
raise Exception('All target arrays (y) should have '
'the same number of samples.')
set_w = set(w_lengths)
if len(set_w) != 1:
raise Exception('All sample_weight arrays should have '
'the same number of samples.')
if list(set_x)[0] != list(set_y)[0]:
raise Exception('Input arrays should have '
'the same number of samples as target arrays. Found ' +
str(list(set_x)[0]) + ' input samples and ' +
str(list(set_y)[0]) + ' target samples.')
if list(set_x)[0] != list(set_w)[0]:
raise Exception('Sample_weight arrays should have '
'the same number of samples as input arrays. Found ' +
str(list(set_x)[0]) + ' input samples and ' +
str(list(set_w)[0]) + ' target samples.')
def check_loss_and_target_compatibility(targets, losses, output_shapes):
assert len(targets) == len(losses) == len(output_shapes)
key_losses = {'mean_square_error',
'binary_crossentropy',
'categorical_crossentropy'}
for y, loss, shape in zip(targets, losses, output_shapes):
if loss.__name__ == 'categorical_crossentropy':
if y.shape[1] == 1:
raise Exception('You are passing a target array of shape ' + str(y.shape) +
' while using as loss `categorical_crossentropy`. '
'`categorical_crossentropy` expects '
'targets to be binary matrices (1s and 0s) '
'of shape (samples, classes). '
'If your targets are integer classes, '
'you can convert them to the expected format via:\n'
'```\n'
'from keras.utils.np_utils import to_categorical\n'
'y_binary = to_categorical(y_int)\n'
'```\n'
'\n'
'Alternatively, you can use the loss function '
'`sparse_categorical_crossentropy` instead, '
'which does expect integer targets.')
if loss.__name__ in key_losses and shape[1] is not None and y.shape[1] != shape[1]:
raise Exception('A target array with shape ' + str(y.shape) +
' was passed for an output of shape ' + str(shape) +
' while using as loss `' + loss.__name__ + '`. '
'This loss expects '
'targets to have the same shape '
'as the output.')
def collect_metrics(metrics, output_names):
if not metrics:
return [[] for _ in output_names]
if type(metrics) is list:
# we then apply all metrics to all outputs.
return [copy.copy(metrics) for _ in output_names]
elif type(metrics) is dict:
nested_metrics = []
for name in output_names:
output_metrics = metrics.get(name, [])
if type(output_metrics) is not list:
output_metrics = [output_metrics]
nested_metrics.append(output_metrics)
return nested_metrics
else:
raise Exception('Type of `metrics` argument not understood. '
'Expected a list or dictionary, found: ' +
str(metrics))
def collect_trainable_weights(layer):
'''Collects all `trainable_weights` attributes,
excluding any sublayers where `trainable` is set the `False`.
'''
trainable = getattr(layer, 'trainable', True)
if not trainable:
return []
weights = []
if layer.__class__.__name__ == 'Sequential':
for sublayer in layer.flattened_layers:
weights += collect_trainable_weights(sublayer)
elif layer.__class__.__name__ == 'Model':
for sublayer in layer.layers:
weights += collect_trainable_weights(sublayer)
elif layer.__class__.__name__ == 'Graph':
for sublayer in layer._graph_nodes.values():
weights += collect_trainable_weights(sublayer)
else:
weights += layer.trainable_weights
# dedupe weights
weights = list(set(weights))
# TF variables have auto-generated the name, while Theano has auto-generated the auto_name variable. name in Theano is None
if weights:
if K.backend() == 'theano':
weights.sort(key=lambda x: x.auto_name)
else:
weights.sort(key=lambda x: x.name)
return weights
def batch_shuffle(index_array, batch_size):
'''This shuffles an array in a batch-wise fashion.
Useful for shuffling HDF5 arrays
(where one cannot access arbitrary indices).
'''
batch_count = int(len(index_array) / batch_size)
# to reshape we need to be cleanly divisible by batch size
# we stash extra items and reappend them after shuffling
last_batch = index_array[batch_count * batch_size:]
index_array = index_array[:batch_count * batch_size]
index_array = index_array.reshape((batch_count, batch_size))
np.random.shuffle(index_array)
index_array = index_array.flatten()
return np.append(index_array, last_batch)
def make_batches(size, batch_size):
'''Returns a list of batch indices (tuples of indices).
'''
nb_batch = int(np.ceil(size / float(batch_size)))
return [(i * batch_size, min(size, (i + 1) * batch_size))
for i in range(0, nb_batch)]
def slice_X(X, start=None, stop=None):
'''This takes an array-like, or a list of
array-likes, and outputs:
- X[start:stop] if X is an array-like
- [x[start:stop] for x in X] if X in a list
Can also work on list/array of indices: `slice_X(x, indices)`
# Arguments:
start: can be an integer index (start index)
or a list/array of indices
stop: integer (stop index); should be None if
`start` was a list.
'''
if type(X) == list:
if hasattr(start, '__len__'):
# hdf5 datasets only support list objects as indices
if hasattr(start, 'shape'):
start = start.tolist()
return [x[start] for x in X]
else:
return [x[start:stop] for x in X]
else:
if hasattr(start, '__len__'):
if hasattr(start, 'shape'):
start = start.tolist()
return X[start]
else:
return X[start:stop]
def weighted_objective(fn):
'''Transforms an objective function `fn(y_true, y_pred)`
into a sample-weighted, cost-masked objective function
`fn(y_true, y_pred, weights, mask)`.
'''
def weighted(y_true, y_pred, weights, mask=None):
# score_array has ndim >= 2
score_array = fn(y_true, y_pred)
if mask is not None:
# Cast the mask to floatX to avoid float64 upcasting in theano
mask = K.cast(mask, K.floatx())
# mask should have the same shape as score_array
score_array *= mask
# the loss per batch should be proportional
# to the number of unmasked samples.
score_array /= K.mean(mask)
# reduce score_array to same ndim as weight array
ndim = K.ndim(score_array)
weight_ndim = K.ndim(weights)
score_array = K.mean(score_array, axis=list(range(weight_ndim, ndim)))
# apply sample weighting
if weights is not None:
score_array *= weights
score_array /= K.mean(K.cast(K.not_equal(weights, 0), K.floatx()))
return K.mean(score_array)
return weighted
def standardize_weights(y, sample_weight=None, class_weight=None,
sample_weight_mode=None):
'''Performs weight input validation and standardization
to a single sample-wise (or timestep-wise) weight array.
'''
if sample_weight_mode is not None:
if sample_weight_mode != 'temporal':
raise Exception('"sample_weight_mode '
'should be None or "temporal". '
'Found: ' + str(sample_weight_mode))
if len(y.shape) < 3:
raise Exception('Found a sample_weight array for '
'an input with shape ' +
str(y.shape) + '. '
'Timestep-wise sample weighting (use of '
'sample_weight_mode="temporal") is restricted to '
'outputs that are at least 3D, i.e. that have '
'a time dimension.')
if sample_weight is not None and len(sample_weight.shape) != 2:
raise Exception('Found a sample_weight array with shape ' +
str(sample_weight.shape) + '. '
'In order to use timestep-wise sample weighting, '
'you should pass a 2D sample_weight array.')
else:
if sample_weight is not None and len(sample_weight.shape) != 1:
raise Exception('Found a sample_weight array with shape ' +
str(sample_weight.shape) + '. '
'In order to use timestep-wise sample weights, '
'you should specify sample_weight_mode="temporal" '
'in compile(). If you just mean to use '
'sample-wise weights, make sure your '
'sample_weight array is 1D.')
if sample_weight is not None:
assert len(sample_weight.shape) <= len(y.shape)
# TODO: proper error message
assert y.shape[:sample_weight.ndim] == sample_weight.shape
return sample_weight
elif isinstance(class_weight, dict):
if len(y.shape) > 2:
raise Exception('class_weight not supported for '
'3+ dimensional targets.')
if y.shape[1] > 1:
y_classes = y.argmax(axis=1)
elif y.shape[1] == 1:
y_classes = np.reshape(y, y.shape[0])
else:
y_classes = y
weights = np.asarray([class_weight[cls] for cls in y_classes])
return weights
else:
if sample_weight_mode is None:
return np.ones((y.shape[0],), dtype=K.floatx())
else:
return np.ones((y.shape[0], y.shape[1]), dtype=K.floatx())
def generator_queue(generator, max_q_size=10,
wait_time=0.05, nb_worker=1, pickle_safe=False):
'''Builds a queue out of a data generator.
If pickle_safe, use a multiprocessing approach. Else, use threading.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
'''
generator_threads = []
if pickle_safe:
q = multiprocessing.Queue(maxsize=max_q_size)
_stop = multiprocessing.Event()
else:
q = queue.Queue()
_stop = threading.Event()
try:
def data_generator_task():
while not _stop.is_set():
try:
if pickle_safe or q.qsize() < max_q_size:
generator_output = next(generator)
q.put(generator_output)
else:
time.sleep(wait_time)
except Exception:
_stop.set()
raise
for i in range(nb_worker):
if pickle_safe:
# Reset random seed else all children processes share the same seed
np.random.seed()
thread = multiprocessing.Process(target=data_generator_task)
else:
thread = threading.Thread(target=data_generator_task)
generator_threads.append(thread)
thread.daemon = True
thread.start()
except:
_stop.set()
if pickle_safe:
# Terminate all daemon processes
for p in generator_threads:
if p.is_alive():
p.terminate()
q.close()
raise
return q, _stop, generator_threads
class Model(Container):
def compile(self, optimizer, loss, metrics=[], loss_weights=None,
sample_weight_mode=None, **kwargs):
'''Configures the model for training.
# Arguments
optimizer: str (name of optimizer) or optimizer object.
See [optimizers](/optimizers).
loss: str (name of objective function) or objective function.
See [objectives](/objectives).
If the model has multiple outputs, you can use a different loss
on each output by passing a dictionary or a list of objectives.
metrics: list of metrics to be evaluated by the model
during training and testing.
Typically you will use `metrics=['accuracy']`.
To specify different metrics for different outputs of a
multi-output model, you could also pass a dictionary,
such as `metrics={'output_a': 'accuracy'}`.
sample_weight_mode: if you need to do timestep-wise
sample weighting (2D weights), set this to "temporal".
"None" defaults to sample-wise weights (1D).
If the model has multiple outputs, you can use a different
`sample_weight_mode` on each output by passing a
dictionary or a list of modes.
kwargs: when using the Theano backend, these arguments
are passed into K.function. Ignored for Tensorflow backend.
'''
self.optimizer = optimizers.get(optimizer)
self.sample_weight_mode = sample_weight_mode
self.loss = loss
self.loss_weights = loss_weights
# prepare loss weights
if loss_weights is None:
loss_weights_list = [1. for _ in range(len(self.outputs))]
elif type(loss_weights) is dict:
for name in loss_weights:
if name not in self.output_names:
raise Exception('Unknown entry in loss_weights '
'dictionary: "' + name + '". '
'Only expected the following keys: ' +
str(self.output_names))
loss_weights_list = []
for name in self.output_names:
loss_weights_list.append(loss_weights.get(name, 1.))
elif type(loss_weights) is list:
if len(loss_weights) != len(self.outputs):
raise Exception('When passing a list as loss_weights, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss_weights=' +
str(loss_weights))
loss_weights_list = loss_weights
else:
raise Exception('Could not interpret loss_weights argument: ' +
str(loss_weights))
# prepare loss functions
if type(loss) is dict:
for name in loss:
if name not in self.output_names:
raise Exception('Unknown entry in loss '
'dictionary: "' + name + '". '
'Only expected the following keys: ' +
str(self.output_names))
loss_functions = []
for name in self.output_names:
if name not in loss:
raise Exception('Output "' + name +
'" missing from loss dictionary')
loss_functions.append(objectives.get(loss[name]))
elif type(loss) is list:
if len(loss) != len(self.outputs):
raise Exception('When passing a list as loss, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss=' +
str(loss))
loss_functions = [objectives.get(l) for l in loss]
else:
loss_function = objectives.get(loss)
loss_functions = [loss_function for _ in range(len(self.outputs))]
self.loss_functions = loss_functions
weighted_losses = [weighted_objective(fn) for fn in loss_functions]
# prepare output masks
masks = self.compute_mask(self.inputs, mask=None)
if masks is None:
masks = [None for _ in self.outputs]
if type(masks) is not list:
masks = [masks]
# prepare sample weights
if type(sample_weight_mode) is dict:
for name in sample_weight_mode:
if name not in self.output_names:
raise Exception('Unknown entry in '
'sample_weight_mode dictionary: "' +
name + '". '
'Only expected the following keys: ' +
str(self.output_names))
sample_weights = []
sample_weight_modes = []
for name in self.output_names:
if name not in sample_weight_mode:
raise Exception('Output "' + name +
'" missing from sample_weight_modes '
'dictionary')
if sample_weight_mode.get(name) == 'temporal':
weight = K.placeholder(ndim=2, name=name + '_sample_weights')
sample_weight_modes.append('temporal')
else:
weight = K.placeholder(ndim=1, name=name + '_sample_weights')
sample_weight_modes.append(None)
sample_weights.append(weight)
elif type(sample_weight_mode) is list:
if len(sample_weight_mode) != len(self.outputs):
raise Exception('When passing a list as sample_weight_mode, ' +
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed sample_weight_mode=' +
str(sample_weight_mode))
sample_weights = []
sample_weight_modes = []
for mode, name in zip(sample_weight_mode, self.output_names):
if mode == 'temporal':
weight = K.placeholder(ndim=2, name=name + '_sample_weights')
sample_weight_modes.append('temporal')
else:
weight = K.placeholder(ndim=1, name=name + '_sample_weights')
sample_weight_modes.append(None)
sample_weights.append(weight)
else:
if sample_weight_mode == 'temporal':
sample_weights = [K.placeholder(ndim=2, name=name + '_sample_weights')
for name in self.output_names]
sample_weight_modes = ['temporal' for name in self.output_names]
else:
sample_weights = [K.placeholder(ndim=1, name=name + '_sample_weights')
for name in self.output_names]
sample_weight_modes = [None for name in self.output_names]
self.sample_weight_modes = sample_weight_modes
# prepare targets of model
self.targets = []
for i in range(len(self.outputs)):
shape = self.internal_output_shapes[i]
name = self.output_names[i]
self.targets.append(K.placeholder(ndim=len(shape),
name=name + '_target',
sparse=K.is_sparse(self.outputs[i]),
dtype=K.dtype(self.outputs[i])))
# prepare metrics
self.metrics = metrics
self.metrics_names = ['loss']
self.metrics_tensors = []
# compute total loss
total_loss = None
for i in range(len(self.outputs)):
y_true = self.targets[i]
y_pred = self.outputs[i]
weighted_loss = weighted_losses[i]
sample_weight = sample_weights[i]
mask = masks[i]
loss_weight = loss_weights_list[i]
output_loss = weighted_loss(y_true, y_pred,
sample_weight, mask)
if len(self.outputs) > 1:
self.metrics_tensors.append(output_loss)
self.metrics_names.append(self.output_names[i] + '_loss')
if total_loss is None:
total_loss = loss_weight * output_loss
else:
total_loss += loss_weight * output_loss
# add regularization penalties to the loss
for r in self.regularizers:
total_loss = r(total_loss)
# list of same size as output_names.
# contains tuples (metrics for output, names of metrics)
nested_metrics = collect_metrics(metrics, self.output_names)
def append_metric(layer_num, metric_name, metric_tensor):
"""Helper function, used in loop below"""
if len(self.output_names) > 1:
metric_name = self.output_layers[layer_num].name + '_' + metric_name
self.metrics_names.append(metric_name)
self.metrics_tensors.append(metric_tensor)
for i in range(len(self.outputs)):
y_true = self.targets[i]
y_pred = self.outputs[i]
output_metrics = nested_metrics[i]
for metric in output_metrics:
if metric == 'accuracy' or metric == 'acc':
# custom handling of accuracy (because of class mode duality)
output_shape = self.internal_output_shapes[i]
acc_fn = None
if output_shape[-1] == 1 or self.loss_functions[i] == objectives.binary_crossentropy:
# case: binary accuracy
acc_fn = metrics_module.binary_accuracy
elif self.loss_functions[i] == objectives.sparse_categorical_crossentropy:
# case: categorical accuracy with sparse targets
acc_fn = metrics_module.sparse_categorical_accuracy
else:
acc_fn = metrics_module.categorical_accuracy
append_metric(i, 'acc', acc_fn(y_true, y_pred))
else:
metric_fn = metrics_module.get(metric)
metric_result = metric_fn(y_true, y_pred)
if not isinstance(metric_result, dict):
metric_result = {
metric_fn.__name__: metric_result
}
for name, tensor in six.iteritems(metric_result):
append_metric(i, name, tensor)
# prepare gradient updates and state updates
self.optimizer = optimizers.get(optimizer)
self.total_loss = total_loss
self.sample_weights = sample_weights
# functions for train, test and predict will
# be compiled lazily when required.
# This saves time when the user is not using all functions.
self._function_kwargs = kwargs
self.train_function = None
self.test_function = None
self.predict_function = None
self._collected_trainable_weights = collect_trainable_weights(self)
def _make_train_function(self):
if not hasattr(self, 'train_function'):
raise Exception('You must compile your model before using it.')
if self.train_function is None:
if self.uses_learning_phase and type(K.learning_phase()) is not int:
inputs = self.inputs + self.targets + self.sample_weights + [K.learning_phase()]
else:
inputs = self.inputs + self.targets + self.sample_weights
training_updates = self.optimizer.get_updates(self._collected_trainable_weights,
self.constraints,
self.total_loss)
updates = self.updates + training_updates
# returns loss and metrics. Updates weights at each call.
self.train_function = K.function(inputs,
[self.total_loss] + self.metrics_tensors,
updates=updates,
**self._function_kwargs)
def _make_test_function(self):
if not hasattr(self, 'test_function'):
raise Exception('You must compile your model before using it.')
if self.test_function is None:
if self.uses_learning_phase and type(K.learning_phase()) is not int:
inputs = self.inputs + self.targets + self.sample_weights + [K.learning_phase()]
else:
inputs = self.inputs + self.targets + self.sample_weights
# return loss and metrics, no gradient updates.
# Does update the network states.
self.test_function = K.function(inputs,
[self.total_loss] + self.metrics_tensors,
updates=self.state_updates,
**self._function_kwargs)
def _make_predict_function(self):
if not hasattr(self, 'predict_function'):
self.predict_function = None
if self.predict_function is None:
if self.uses_learning_phase and type(K.learning_phase()) is not int:
inputs = self.inputs + [K.learning_phase()]
else:
inputs = self.inputs
# returns network outputs. Does not update weights.
# Does update the network states.
kwargs = getattr(self, '_function_kwargs', {})
self.predict_function = K.function(inputs,
self.outputs,
updates=self.state_updates,
**kwargs)
def _fit_loop(self, f, ins, out_labels=[], batch_size=32,
nb_epoch=100, verbose=1, callbacks=[],
val_f=None, val_ins=None, shuffle=True,
callback_metrics=[]):
'''Abstract fit function for f(ins).
Assume that f returns a list, labeled by out_labels.
# Arguments
f: Keras function returning a list of tensors
ins: list of tensors to be fed to `f`
out_labels: list of strings, display names of
the outputs of `f`
batch_size: integer batch size
nb_epoch: number of times to iterate over the data
verbose: verbosity mode, 0, 1 or 2
callbacks: list of callbacks to be called during training
val_f: Keras function to call for validation
val_ins: list of tensors to be fed to `val_f`
shuffle: whether to shuffle the data at the beginning of each epoch
callback_metrics: list of strings, the display names of the metrics
passed to the callbacks. They should be the
concatenation of list the display names of the outputs of
`f` and the list of display names of the outputs of `f_val`.
# Returns
`History` object.
'''
do_validation = False
if val_f and val_ins:
do_validation = True
if verbose:
print('Train on %d samples, validate on %d samples' %
(ins[0].shape[0], val_ins[0].shape[0]))
nb_train_sample = ins[0].shape[0]
index_array = np.arange(nb_train_sample)
self.history = cbks.History()
callbacks = [cbks.BaseLogger()] + callbacks + [self.history]
if verbose:
callbacks += [cbks.ProgbarLogger()]
callbacks = cbks.CallbackList(callbacks)
# it's possible to callback a different model than self
# (used by Sequential models)
if hasattr(self, 'callback_model') and self.callback_model:
callback_model = self.callback_model
else:
callback_model = self
callbacks._set_model(callback_model)
callbacks._set_params({
'batch_size': batch_size,
'nb_epoch': nb_epoch,
'nb_sample': nb_train_sample,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics,
})
callbacks.on_train_begin()
callback_model.stop_training = False
self.validation_data = val_ins
for epoch in range(nb_epoch):
callbacks.on_epoch_begin(epoch)
if shuffle == 'batch':
index_array = batch_shuffle(index_array, batch_size)
elif shuffle:
np.random.shuffle(index_array)
batches = make_batches(nb_train_sample, batch_size)
epoch_logs = {}
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
try:
if type(ins[-1]) is float:
# do not slice the training phase flag
ins_batch = slice_X(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = slice_X(ins, batch_ids)
except TypeError:
raise Exception('TypeError while preparing batch. '
'If using HDF5 input data, '
'pass shuffle="batch".')
batch_logs = {}
batch_logs['batch'] = batch_index
batch_logs['size'] = len(batch_ids)
callbacks.on_batch_begin(batch_index, batch_logs)
outs = f(ins_batch)
if type(outs) != list:
outs = [outs]
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(batch_index, batch_logs)
if batch_index == len(batches) - 1: # last batch
# validation
if do_validation:
# replace with self._evaluate
val_outs = self._test_loop(val_f, val_ins,
batch_size=batch_size,
verbose=0)
if type(val_outs) != list:
val_outs = [val_outs]
# same labels assumed
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
callbacks.on_epoch_end(epoch, epoch_logs)
if callback_model.stop_training:
break
callbacks.on_train_end()
return self.history
def _predict_loop(self, f, ins, batch_size=32, verbose=0):
'''Abstract method to loop over some data in batches.
# Arguments
f: Keras function returning a list of tensors.
ins: list of tensors to be fed to `f`.
batch_size: integer batch size.
verbose: verbosity mode.
# Returns
Array of predictions (if the model has a single output)
or list of arrays of predictions
(if the model has multiple outputs).
'''
nb_sample = ins[0].shape[0]
outs = []
if verbose == 1:
progbar = Progbar(target=nb_sample)
batches = make_batches(nb_sample, batch_size)
index_array = np.arange(nb_sample)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
if type(ins[-1]) is float:
# do not slice the training phase flag
ins_batch = slice_X(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = slice_X(ins, batch_ids)
batch_outs = f(ins_batch)
if type(batch_outs) != list:
batch_outs = [batch_outs]
if batch_index == 0:
for batch_out in batch_outs:
shape = (nb_sample,) + batch_out.shape[1:]
outs.append(np.zeros(shape, dtype=K.floatx()))
for i, batch_out in enumerate(batch_outs):
outs[i][batch_start:batch_end] = batch_out
if verbose == 1:
progbar.update(batch_end)
if len(outs) == 1:
return outs[0]
return outs
def _test_loop(self, f, ins, batch_size=32, verbose=0):
'''Abstract method to loop over some data in batches.
# Arguments
f: Keras function returning a list of tensors.
ins: list of tensors to be fed to `f`.
batch_size: integer batch size.
verbose: verbosity mode.
# Returns
Scalar loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
'''
nb_sample = ins[0].shape[0]
outs = []
if verbose == 1:
progbar = Progbar(target=nb_sample)
batches = make_batches(nb_sample, batch_size)
index_array = np.arange(nb_sample)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
if type(ins[-1]) is float:
# do not slice the training phase flag
ins_batch = slice_X(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = slice_X(ins, batch_ids)
batch_outs = f(ins_batch)
if type(batch_outs) == list:
if batch_index == 0:
for batch_out in enumerate(batch_outs):
outs.append(0.)
for i, batch_out in enumerate(batch_outs):
outs[i] += batch_out * len(batch_ids)
else:
if batch_index == 0:
outs.append(0.)
outs[0] += batch_outs * len(batch_ids)
if verbose == 1:
progbar.update(batch_end)
for i, out in enumerate(outs):
outs[i] /= nb_sample
if len(outs) == 1:
return outs[0]
return outs
def _standardize_user_data(self, x, y,
sample_weight=None, class_weight=None,
check_batch_dim=True, batch_size=None):
if not hasattr(self, 'optimizer'):
raise Exception('You must compile a model before training/testing.'
' Use `model.compile(optimizer, loss)`.')
output_shapes = []
for output_shape, loss_fn in zip(self.internal_output_shapes, self.loss_functions):
if loss_fn.__name__ == 'sparse_categorical_crossentropy':
output_shapes.append(output_shape[:-1] + (1,))
elif getattr(objectives, loss_fn.__name__, None) is None:
output_shapes.append(None)
else:
output_shapes.append(output_shape)
x = standardize_input_data(x, self.input_names,
self.internal_input_shapes,
check_batch_dim=False,
exception_prefix='model input')
y = standardize_input_data(y, self.output_names,
output_shapes,
check_batch_dim=False,
exception_prefix='model target')
sample_weights = standardize_sample_weights(sample_weight,
self.output_names)
class_weights = standardize_class_weights(class_weight,
self.output_names)
sample_weights = [standardize_weights(ref, sw, cw, mode)
for (ref, sw, cw, mode)
in zip(y, sample_weights, class_weights, self.sample_weight_modes)]
check_array_lengths(x, y, sample_weights)
check_loss_and_target_compatibility(y, self.loss_functions, self.internal_output_shapes)
if self.stateful and batch_size:
if x[0].shape[0] % batch_size != 0:
raise Exception('In a stateful network, '
'you should only pass inputs with '
'a number of samples that can be '
'divided by the batch size. Found: ' +
str(x[0].shape[0]) + ' samples')
return x, y, sample_weights
def fit(self, x, y, batch_size=32, nb_epoch=10, verbose=1, callbacks=[],
validation_split=0., validation_data=None, shuffle=True,
class_weight=None, sample_weight=None):
'''Trains the model for a fixed number of epochs (iterations on a dataset).
# Arguments
x: Numpy array of training data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named, you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named, you can also pass a dictionary
mapping output names to Numpy arrays.
batch_size: integer. Number of samples per gradient update.
nb_epoch: integer, the number of times to iterate over the training data arrays.
verbose: 0, 1, or 2. Verbosity mode. 0 = silent, 1 = verbose, 2 = one log line per epoch.
callbacks: list of callbacks to be called during training.
See [callbacks](/callbacks).
validation_split: float between 0 and 1:
fraction of the training data to be used as validation data.
The model will set apart this fraction of the training data,
will not train on it, and will evaluate the loss and any model metrics
on this data at the end of each epoch.
validation_data: data on which to evaluate the loss and any model metrics
at the end of each epoch. The model will not be trained on this data.
This could be a tuple (x_val, y_val) or a tuple (x_val, y_val, val_sample_weights).
shuffle: boolean, whether to shuffle the training data before each epoch.
class_weight: optional dictionary mapping class indices (integers) to
a weight (float) to apply to the model's loss for the samples
from this class during training.
This can be useful to tell the model to "pay more attention" to
samples from an under-represented class.
sample_weight: optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify sample_weight_mode="temporal" in compile().
# Returns
A `History` instance. Its `history` attribute contains
all information collected during training.
'''
# validate user data
x, y, sample_weights = self._standardize_user_data(x, y,
sample_weight=sample_weight,
class_weight=class_weight,
check_batch_dim=False,
batch_size=batch_size)
# prepare validation data
if validation_data:
do_validation = True
if len(validation_data) == 2:
val_x, val_y = validation_data
val_sample_weight = None
elif len(validation_data) == 3:
val_x, val_y, val_sample_weight = validation_data
else:
raise
val_x, val_y, val_sample_weights = self._standardize_user_data(val_x, val_y,
sample_weight=val_sample_weight,
check_batch_dim=False,
batch_size=batch_size)
self._make_test_function()
val_f = self.test_function
if self.uses_learning_phase and type(K.learning_phase()) is not int:
val_ins = val_x + val_y + val_sample_weights + [0.]
else:
val_ins = val_x + val_y + val_sample_weights
elif validation_split and 0. < validation_split < 1.:
do_validation = True
split_at = int(len(x[0]) * (1. - validation_split))
x, val_x = (slice_X(x, 0, split_at), slice_X(x, split_at))
y, val_y = (slice_X(y, 0, split_at), slice_X(y, split_at))
sample_weights, val_sample_weights = (
slice_X(sample_weights, 0, split_at), slice_X(sample_weights, split_at))
self._make_test_function()
val_f = self.test_function
if self.uses_learning_phase and type(K.learning_phase()) is not int:
val_ins = val_x + val_y + val_sample_weights + [0.]
else:
val_ins = val_x + val_y + val_sample_weights
else:
do_validation = False
val_f = None
val_ins = None
# prepare input arrays and training function
if self.uses_learning_phase and type(K.learning_phase()) is not int:
ins = x + y + sample_weights + [1.]
else:
ins = x + y + sample_weights
self._make_train_function()
f = self.train_function
# prepare display labels
out_labels = self.metrics_names
# rename duplicated metrics name
# (can happen with an output layer shared among multiple dataflows)
deduped_out_labels = []
for i, label in enumerate(out_labels):
new_label = label
if out_labels.count(label) > 1:
dup_idx = out_labels[:i].count(label)
new_label += '_' + str(dup_idx + 1)
deduped_out_labels.append(new_label)
out_labels = deduped_out_labels
if do_validation:
callback_metrics = copy.copy(out_labels) + ['val_' + n for n in out_labels]
else:
callback_metrics = copy.copy(out_labels)
# delegate logic to _fit_loop
return self._fit_loop(f, ins, out_labels=out_labels,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=verbose, callbacks=callbacks,
val_f=val_f, val_ins=val_ins, shuffle=shuffle,
callback_metrics=callback_metrics)
def evaluate(self, x, y, batch_size=32, verbose=1, sample_weight=None):
'''Returns the loss value and metrics values for the model
in test mode. Computation is done in batches.
# Arguments
x: Numpy array of test data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named, you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named, you can also pass a dictionary
mapping output names to Numpy arrays.
batch_size: integer. Number of samples per gradient update.
# Returns
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
'''
# validate user data
x, y, sample_weights = self._standardize_user_data(x, y,
sample_weight=sample_weight,
check_batch_dim=False,
batch_size=batch_size)
# prepare inputs, delegate logic to _test_loop
if self.uses_learning_phase and type(K.learning_phase()) is not int:
ins = x + y + sample_weights + [0.]
else:
ins = x + y + sample_weights
self._make_test_function()
f = self.test_function
return self._test_loop(f, ins,
batch_size=batch_size,
verbose=verbose)
def predict(self, x, batch_size=32, verbose=0):
'''Generates output predictions for the input samples,
processing the samples in a batched way.
# Arguments
x: the input data, as a Numpy array
(or list of Numpy arrays if the model has multiple outputs).
batch_size: integer.
verbose: verbosity mode, 0 or 1.
# Returns
A Numpy array of predictions.
'''
# validate user data
x = standardize_input_data(x, self.input_names,
self.internal_input_shapes,
check_batch_dim=False)
if self.stateful:
if x[0].shape[0] > batch_size and x[0].shape[0] % batch_size != 0:
raise Exception('In a stateful network, '
'you should only pass inputs with '
'a number of samples that can be '
'divided by the batch size. Found: ' +
str(x[0].shape[0]) + ' samples. '
'Batch size: ' + str(batch_size) + '.')
# prepare inputs, delegate logic to _predict_loop
if self.uses_learning_phase and type(K.learning_phase()) is not int:
ins = x + [0.]
else:
ins = x
self._make_predict_function()
f = self.predict_function
return self._predict_loop(f, ins,
batch_size=batch_size, verbose=verbose)
def train_on_batch(self, x, y,
sample_weight=None, class_weight=None):
'''Runs a single gradient update on a single batch of data.
# Arguments
x: Numpy array of training data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named, you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named, you can also pass a dictionary
mapping output names to Numpy arrays.
sample_weight: optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify sample_weight_mode="temporal" in compile().
class_weight: optional dictionary mapping class indices (integers) to
a weight (float) to apply to the model's loss for the samples
from this class during training.
This can be useful to tell the model to "pay more attention" to
samples from an under-represented class.
# Returns
Scalar training loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
'''
x, y, sample_weights = self._standardize_user_data(x, y,
sample_weight=sample_weight,
class_weight=class_weight,
check_batch_dim=True)
if self.uses_learning_phase and type(K.learning_phase()) is not int:
ins = x + y + sample_weights + [1.]
else:
ins = x + y + sample_weights
self._make_train_function()
outputs = self.train_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def test_on_batch(self, x, y, sample_weight=None):
'''Test the model on a single batch of samples.
# Arguments
x: Numpy array of test data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named, you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named, you can also pass a dictionary
mapping output names to Numpy arrays.
sample_weight: optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify sample_weight_mode="temporal" in compile().
# Returns
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
'''
x, y, sample_weights = self._standardize_user_data(x, y,
sample_weight=sample_weight,
check_batch_dim=True)
if self.uses_learning_phase and type(K.learning_phase()) is not int:
ins = x + y + sample_weights + [0.]
else:
ins = x + y + sample_weights
self._make_test_function()
outputs = self.test_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def predict_on_batch(self, x):
'''Returns predictions for a single batch of samples.
'''
x = standardize_input_data(x, self.input_names,
self.internal_input_shapes)
if self.uses_learning_phase and type(K.learning_phase()) is not int:
ins = x + [0.]
else:
ins = x
self._make_predict_function()
outputs = self.predict_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def fit_generator(self, generator, samples_per_epoch, nb_epoch,
verbose=1, callbacks=[],
validation_data=None, nb_val_samples=None,
class_weight={}, max_q_size=10, nb_worker=1, pickle_safe=False):
'''Fits the model on data generated batch-by-batch by
a Python generator.
The generator is run in parallel to the model, for efficiency.
For instance, this allows you to do real-time data augmentation
on images on CPU in parallel to training your model on GPU.
# Arguments
generator: a generator.
The output of the generator must be either
- a tuple (inputs, targets)
- a tuple (inputs, targets, sample_weights).
All arrays should contain the same number of samples.
The generator is expected to loop over its data
indefinitely. An epoch finishes when `samples_per_epoch`
samples have been seen by the model.
samples_per_epoch: integer, number of samples to process before
going to the next epoch.
nb_epoch: integer, total number of iterations on the data.
verbose: verbosity mode, 0, 1, or 2.
callbacks: list of callbacks to be called during training.
validation_data: this can be either
- a generator for the validation data
- a tuple (inputs, targets)
- a tuple (inputs, targets, sample_weights).
nb_val_samples: only relevant if `validation_data` is a generator.
number of samples to use from validation generator
at the end of every epoch.
class_weight: dictionary mapping class indices to a weight
for the class.
max_q_size: maximum size for the generator queue
nb_worker: maximum number of processes to spin up when using process based threading
pickle_safe: if True, use process based threading. Note that because
this implementation relies on multiprocessing, you should not pass
non picklable arguments to the generator as they can't be passed
easily to children processes.
# Returns
A `History` object.
# Example
```python
def generate_arrays_from_file(path):
while 1:
f = open(path)
for line in f:
# create numpy arrays of input data
# and labels, from each line in the file
x1, x2, y = process_line(line)
yield ({'input_1': x1, 'input_2': x2}, {'output': y})
f.close()
model.fit_generator(generate_arrays_from_file('/my_file.txt'),
samples_per_epoch=10000, nb_epoch=10)
```
'''
wait_time = 0.01 # in seconds
epoch = 0
do_validation = bool(validation_data)
self._make_train_function()
if do_validation:
self._make_test_function()
# python 2 has 'next', 3 has '__next__'
# avoid any explicit version checks
val_gen = (hasattr(validation_data, 'next') or
hasattr(validation_data, '__next__'))
if val_gen and not nb_val_samples:
raise Exception('When using a generator for validation data, '
'you must specify a value for "nb_val_samples".')
out_labels = self.metrics_names
callback_metrics = out_labels + ['val_' + n for n in out_labels]
# prepare callbacks
self.history = cbks.History()
callbacks = [cbks.BaseLogger()] + callbacks + [self.history]
if verbose:
callbacks += [cbks.ProgbarLogger()]
callbacks = cbks.CallbackList(callbacks)
# it's possible to callback a different model than self:
if hasattr(self, 'callback_model') and self.callback_model:
callback_model = self.callback_model
else:
callback_model = self
callbacks._set_model(callback_model)
callbacks._set_params({
'nb_epoch': nb_epoch,
'nb_sample': samples_per_epoch,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics,
})
callbacks.on_train_begin()
if do_validation and not val_gen:
if len(validation_data) == 2:
val_x, val_y = validation_data
val_sample_weight = None
elif len(validation_data) == 3:
val_x, val_y, val_sample_weight = validation_data
else:
raise Exception('validation_data should be a tuple '
'(val_x, val_y, val_sample_weight) '
'or (val_x, val_y). Found: ' + str(validation_data))
val_x, val_y, val_sample_weights = self._standardize_user_data(val_x, val_y, val_sample_weight)
self.validation_data = val_x + [val_y, val_sample_weights]
else:
self.validation_data = None
# start generator thread storing batches into a queue
data_gen_queue, _stop, generator_threads = generator_queue(generator, max_q_size=max_q_size, nb_worker=nb_worker,
pickle_safe=pickle_safe)
callback_model.stop_training = False
while epoch < nb_epoch:
callbacks.on_epoch_begin(epoch)
samples_seen = 0
batch_index = 0
while samples_seen < samples_per_epoch:
generator_output = None
while not _stop.is_set():
if not data_gen_queue.empty():
generator_output = data_gen_queue.get()
break
else:
time.sleep(wait_time)
if not hasattr(generator_output, '__len__'):
_stop.set()
raise Exception('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
if len(generator_output) == 2:
x, y = generator_output
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output
else:
_stop.set()
raise Exception('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
# build batch logs
batch_logs = {}
if type(x) is list:
batch_size = x[0].shape[0]
elif type(x) is dict:
batch_size = list(x.values())[0].shape[0]
else:
batch_size = x.shape[0]
batch_logs['batch'] = batch_index
batch_logs['size'] = batch_size
callbacks.on_batch_begin(batch_index, batch_logs)
try:
outs = self.train_on_batch(x, y,
sample_weight=sample_weight,
class_weight=class_weight)
except:
_stop.set()
raise
if type(outs) != list:
outs = [outs]
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(batch_index, batch_logs)
# construct epoch logs
epoch_logs = {}
batch_index += 1
samples_seen += batch_size
# epoch finished
if samples_seen > samples_per_epoch:
warnings.warn('Epoch comprised more than '
'`samples_per_epoch` samples, '
'which might affect learning results. '
'Set `samples_per_epoch` correctly '
'to avoid this warning.')
if samples_seen >= samples_per_epoch and do_validation:
if val_gen:
val_outs = self.evaluate_generator(validation_data,
nb_val_samples,
max_q_size=max_q_size,
nb_worker=nb_worker,
pickle_safe=pickle_safe)
else:
# no need for try/except because
# data has already been validated
val_outs = self.evaluate(val_x, val_y,
batch_size=batch_size,
sample_weight=val_sample_weights,
verbose=0)
if type(val_outs) is not list:
val_outs = [val_outs]
# same labels assumed
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
callbacks.on_epoch_end(epoch, epoch_logs)
epoch += 1
if callback_model.stop_training:
break
_stop.set()
if pickle_safe:
# Terminate all daemon processes
for p in generator_threads:
if p.is_alive():
p.terminate()
data_gen_queue.close()
callbacks.on_train_end()
return self.history
def evaluate_generator(self, generator, val_samples, max_q_size=10, nb_worker=1, pickle_safe=False):
'''Evaluates the model on a data generator. The generator should
return the same kind of data as accepted by `test_on_batch`.
Arguments:
generator:
generator yielding tuples (inputs, targets)
or (inputs, targets, sample_weights)
val_samples:
total number of samples to generate from `generator`
before returning.
max_q_size: maximum size for the generator queue
nb_worker: maximum number of processes to spin up when using process based threading
pickle_safe: if True, use process based threading. Note that because
this implementation relies on multiprocessing, you should not pass
non picklable arguments to the generator as they can't be passed
easily to children processes.
# Returns
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
'''
self._make_test_function()
processed_samples = 0
wait_time = 0.01
all_outs = []
weights = []
data_gen_queue, _stop, generator_threads = generator_queue(generator, max_q_size=max_q_size, nb_worker=nb_worker,
pickle_safe=pickle_safe)
while processed_samples < val_samples:
generator_output = None
while not _stop.is_set():
if not data_gen_queue.empty():
generator_output = data_gen_queue.get()
break
else:
time.sleep(wait_time)
if not hasattr(generator_output, '__len__'):
_stop.set()
raise Exception('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
if len(generator_output) == 2:
x, y = generator_output
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output
else:
_stop.set()
raise Exception('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
try:
outs = self.test_on_batch(x, y, sample_weight=sample_weight)
except:
_stop.set()
raise
if type(x) is list:
nb_samples = len(x[0])
elif type(x) is dict:
nb_samples = len(list(x.values())[0])
else:
nb_samples = len(x)
all_outs.append(outs)
processed_samples += nb_samples
weights.append(nb_samples)
_stop.set()
if pickle_safe:
# Terminate all daemon processes
for p in generator_threads:
if p.is_alive():
p.terminate()
data_gen_queue.close()
if type(outs) is not list:
return np.average(np.asarray(all_outs),
weights=weights)
else:
averages = []
for i in range(len(outs)):
averages.append(np.average([out[i] for out in all_outs],
weights=weights))
return averages
def predict_generator(self, generator, val_samples, max_q_size=10, nb_worker=1, pickle_safe=False):
'''Generates predictions for the input samples from a data generator.
The generator should return the same kind of data as accepted by
`predict_on_batch`.
# Arguments
generator: generator yielding batches of input samples.
val_samples: total number of samples to generate from `generator`
before returning.
max_q_size: maximum size for the generator queue
nb_worker: maximum number of processes to spin up when using process based threading
pickle_safe: if True, use process based threading. Note that because
this implementation relies on multiprocessing, you should not pass
non picklable arguments to the generator as they can't be passed
easily to children processes.
# Returns
Numpy array(s) of predictions.
'''
self._make_predict_function()
processed_samples = 0
wait_time = 0.01
all_outs = []
data_gen_queue, _stop, generator_threads = generator_queue(generator, max_q_size=max_q_size, nb_worker=nb_worker,
pickle_safe=pickle_safe)
while processed_samples < val_samples:
generator_output = None
while not _stop.is_set():
if not data_gen_queue.empty():
generator_output = data_gen_queue.get()
break
else:
time.sleep(wait_time)
if isinstance(generator_output, tuple):
if len(generator_output) == 2:
x, y = generator_output
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output
else:
_stop.set()
raise Exception('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
else:
x = generator_output
try:
outs = self.predict_on_batch(x)
except:
_stop.set()
raise
if type(x) is list:
nb_samples = len(x[0])
elif type(x) is dict:
nb_samples = len(list(x.values())[0])
else:
nb_samples = len(x)
if type(outs) != list:
outs = [outs]
if len(all_outs) == 0:
for out in outs:
shape = (val_samples,) + out.shape[1:]
all_outs.append(np.zeros(shape, dtype=K.floatx()))
for i, out in enumerate(outs):
all_outs[i][processed_samples:(processed_samples + nb_samples)] = out
processed_samples += nb_samples
_stop.set()
if pickle_safe:
# Terminate all daemon processes
for p in generator_threads:
if p.is_alive():
p.terminate()
data_gen_queue.close()
if len(all_outs) == 1:
return all_outs[0]
return all_outs
|
pika.py
|
import json
import logging
import os
import time
import typing
from collections import deque
from contextlib import contextmanager
from threading import Thread
from typing import (
Callable,
Deque,
Dict,
Optional,
Text,
Union,
Any,
List,
Tuple,
Generator,
)
from rasa.constants import (
DEFAULT_LOG_LEVEL_LIBRARIES,
ENV_LOG_LEVEL_LIBRARIES,
DOCS_URL_PIKA_EVENT_BROKER,
)
from rasa.core.brokers.broker import EventBroker
import rasa.shared.utils.io
from rasa.utils.endpoints import EndpointConfig
from rasa.utils.io import DEFAULT_ENCODING
if typing.TYPE_CHECKING:
from pika.adapters.blocking_connection import BlockingChannel
from pika import SelectConnection, BlockingConnection, BasicProperties
from pika.channel import Channel
import pika
from pika.connection import Parameters, Connection
logger = logging.getLogger(__name__)
RABBITMQ_EXCHANGE = "rasa-exchange"
DEFAULT_QUEUE_NAME = "rasa_core_events"
def initialise_pika_connection(
host: Text,
username: Text,
password: Text,
port: Union[Text, int] = 5672,
connection_attempts: int = 20,
retry_delay_in_seconds: float = 5,
) -> "BlockingConnection":
"""Create a Pika `BlockingConnection`.
Args:
host: Pika host
username: username for authentication with Pika host
password: password for authentication with Pika host
port: port of the Pika host
connection_attempts: number of channel attempts before giving up
retry_delay_in_seconds: delay in seconds between channel attempts
Returns:
`pika.BlockingConnection` with provided parameters
"""
import pika
with _pika_log_level(logging.CRITICAL):
parameters = _get_pika_parameters(
host, username, password, port, connection_attempts, retry_delay_in_seconds
)
return pika.BlockingConnection(parameters)
@contextmanager
def _pika_log_level(temporary_log_level: int) -> Generator[None, None, None]:
"""Change the log level of the `pika` library.
The log level will remain unchanged if the current log level is 10 (`DEBUG`) or
lower.
Args:
temporary_log_level: Temporary log level for pika. Will be reverted to
previous log level when context manager exits.
"""
pika_logger = logging.getLogger("pika")
old_log_level = pika_logger.level
is_debug_mode = logging.root.level <= logging.DEBUG
if not is_debug_mode:
pika_logger.setLevel(temporary_log_level)
yield
pika_logger.setLevel(old_log_level)
def _get_pika_parameters(
host: Text,
username: Text,
password: Text,
port: Union[Text, int] = 5672,
connection_attempts: int = 20,
retry_delay_in_seconds: float = 5,
) -> "Parameters":
"""Create Pika `Parameters`.
Args:
host: Pika host
username: username for authentication with Pika host
password: password for authentication with Pika host
port: port of the Pika host
connection_attempts: number of channel attempts before giving up
retry_delay_in_seconds: delay in seconds between channel attempts
Returns:
`pika.ConnectionParameters` which can be used to create a new connection to a
broker.
"""
import pika
if host.startswith("amqp"):
# user supplied an AMQP URL containing all the info
parameters = pika.URLParameters(host)
parameters.connection_attempts = connection_attempts
parameters.retry_delay = retry_delay_in_seconds
if username:
parameters.credentials = pika.PlainCredentials(username, password)
else:
# host seems to be just the host, so we use our parameters
parameters = pika.ConnectionParameters(
host,
port=port,
credentials=pika.PlainCredentials(username, password),
connection_attempts=connection_attempts,
# Wait between retries since
# it can take some time until
# RabbitMQ comes up.
retry_delay=retry_delay_in_seconds,
ssl_options=create_rabbitmq_ssl_options(host),
)
return parameters
def initialise_pika_select_connection(
parameters: "Parameters",
on_open_callback: Callable[["SelectConnection"], None],
on_open_error_callback: Callable[["SelectConnection", Text], None],
) -> "SelectConnection":
"""Create a non-blocking Pika `SelectConnection`.
Args:
parameters: Parameters which should be used to connect.
on_open_callback: Callback which is called when the connection was established.
on_open_error_callback: Callback which is called when connecting to the broker
failed.
Returns:
A callback-based connection to the RabbitMQ event broker.
"""
import pika
return pika.SelectConnection(
parameters,
on_open_callback=on_open_callback,
on_open_error_callback=on_open_error_callback,
)
def initialise_pika_channel(
host: Text,
queue: Text,
username: Text,
password: Text,
port: Union[Text, int] = 5672,
connection_attempts: int = 20,
retry_delay_in_seconds: float = 5,
) -> "BlockingChannel":
"""Initialise a Pika channel with a durable queue.
Args:
host: Pika host.
queue: Pika queue to declare.
username: Username for authentication with Pika host.
password: Password for authentication with Pika host.
port: port of the Pika host.
connection_attempts: Number of channel attempts before giving up.
retry_delay_in_seconds: Delay in seconds between channel attempts.
Returns:
Pika `BlockingChannel` with declared queue.
"""
connection = initialise_pika_connection(
host, username, password, port, connection_attempts, retry_delay_in_seconds
)
return _declare_pika_channel_with_queue(connection, queue)
def _declare_pika_channel_with_queue(
connection: "BlockingConnection", queue: Text
) -> "BlockingChannel":
"""Declare a durable queue on Pika channel."""
channel = connection.channel()
channel.queue_declare(queue, durable=True)
return channel
def close_pika_channel(
channel: "Channel",
attempts: int = 1000,
time_between_attempts_in_seconds: float = 0.001,
) -> None:
"""Attempt to close Pika channel and wait until it is closed.
Args:
channel: Pika `Channel` to close.
attempts: How many times to try to confirm that the channel has indeed been
closed.
time_between_attempts_in_seconds: Wait time between attempts to confirm closed
state.
"""
from pika.exceptions import AMQPError
try:
channel.close()
logger.debug("Successfully initiated closing of Pika channel.")
except AMQPError:
logger.exception("Failed to initiate closing of Pika channel.")
while attempts:
if channel.is_closed:
logger.debug("Successfully closed Pika channel.")
return None
time.sleep(time_between_attempts_in_seconds)
attempts -= 1
logger.exception("Failed to close Pika channel.")
def close_pika_connection(connection: "Connection") -> None:
"""Attempt to close Pika connection."""
from pika.exceptions import AMQPError
try:
connection.close()
logger.debug("Successfully closed Pika connection with host.")
except AMQPError:
logger.exception("Failed to close Pika connection with host.")
class PikaEventBroker(EventBroker):
"""Pika-based event broker for publishing messages to RabbitMQ."""
def __init__(
self,
host: Text,
username: Text,
password: Text,
port: Union[int, Text] = 5672,
queues: Union[List[Text], Tuple[Text], Text, None] = None,
should_keep_unpublished_messages: bool = True,
raise_on_failure: bool = False,
log_level: Union[Text, int] = os.environ.get(
ENV_LOG_LEVEL_LIBRARIES, DEFAULT_LOG_LEVEL_LIBRARIES
),
**kwargs: Any,
):
"""Initialise RabbitMQ event broker.
Args:
host: Pika host.
username: Username for authentication with Pika host.
password: Password for authentication with Pika host.
port: port of the Pika host.
queues: Pika queues to declare and publish to.
should_keep_unpublished_messages: Whether or not the event broker should
maintain a queue of unpublished messages to be published later in
case of errors.
raise_on_failure: Whether to raise an exception if publishing fails. If
`False`, keep retrying.
log_level: Logging level.
"""
logging.getLogger("pika").setLevel(log_level)
self.host = host
self.username = username
self.password = password
self.port = port
self.channel: Optional["Channel"] = None
self.queues = self._get_queues_from_args(queues)
self.should_keep_unpublished_messages = should_keep_unpublished_messages
self.raise_on_failure = raise_on_failure
# List to store unpublished messages which hopefully will be published later
self._unpublished_messages: Deque[Text] = deque()
self._run_pika()
def __del__(self) -> None:
if self.channel:
close_pika_channel(self.channel)
close_pika_connection(self.channel.connection)
def close(self) -> None:
"""Close the pika channel and connection."""
self.__del__()
@property
def rasa_environment(self) -> Optional[Text]:
"""Get value of the `RASA_ENVIRONMENT` environment variable."""
return os.environ.get("RASA_ENVIRONMENT")
@staticmethod
def _get_queues_from_args(
queues_arg: Union[List[Text], Tuple[Text], Text, None]
) -> Union[List[Text], Tuple[Text]]:
"""Get queues for this event broker.
The preferred argument defining the RabbitMQ queues the `PikaEventBroker` should
publish to is `queues` (as of Rasa Open Source version 1.8.2). This method
can be removed in the future, and `self.queues` should just receive the value of
the `queues` kwarg in the constructor.
Args:
queues_arg: Value of the supplied `queues` argument.
Returns:
Queues this event broker publishes to.
Raises:
`ValueError` if no valid `queues` argument was found.
"""
if queues_arg and isinstance(queues_arg, (list, tuple)):
return queues_arg
if queues_arg and isinstance(queues_arg, str):
logger.debug(
f"Found a string value under the `queues` key of the Pika event broker "
f"config. Please supply a list of queues under this key, even if it is "
f"just a single one. See {DOCS_URL_PIKA_EVENT_BROKER}"
)
return [queues_arg]
rasa.shared.utils.io.raise_warning(
f"No `queues` argument provided. It is suggested to "
f"explicitly specify a queue as described in "
f"{DOCS_URL_PIKA_EVENT_BROKER}. "
f"Using the default queue '{DEFAULT_QUEUE_NAME}' for now."
)
return [DEFAULT_QUEUE_NAME]
@classmethod
def from_endpoint_config(
cls, broker_config: Optional["EndpointConfig"]
) -> Optional["PikaEventBroker"]:
"""Initialise `PikaEventBroker` from `EndpointConfig`.
Args:
broker_config: `EndpointConfig` to read.
Returns:
`PikaEventBroker` if `broker_config` was supplied, else `None`.
"""
if broker_config is None:
return None
return cls(broker_config.url, **broker_config.kwargs)
def _run_pika(self) -> None:
parameters = _get_pika_parameters(
self.host, self.username, self.password, self.port
)
self._pika_connection = initialise_pika_select_connection(
parameters, self._on_open_connection, self._on_open_connection_error
)
# Run Pika io loop in extra thread so it's not blocking
self._run_pika_io_loop_in_thread()
def _on_open_connection(self, connection: "SelectConnection") -> None:
logger.debug(f"RabbitMQ connection to '{self.host}' was established.")
connection.channel(on_open_callback=self._on_channel_open)
def _on_open_connection_error(self, _, error: Text) -> None:
logger.warning(
f"Connecting to '{self.host}' failed with error '{error}'. Trying again."
)
def _on_channel_open(self, channel: "Channel") -> None:
logger.debug("RabbitMQ channel was opened. Declaring fanout exchange.")
# declare exchange of type 'fanout' in order to publish to multiple queues
# (https://www.rabbitmq.com/tutorials/amqp-concepts.html#exchange-fanout)
channel.exchange_declare(RABBITMQ_EXCHANGE, exchange_type="fanout")
for queue in self.queues:
channel.queue_declare(queue=queue, durable=True)
channel.queue_bind(exchange=RABBITMQ_EXCHANGE, queue=queue)
self.channel = channel
while self._unpublished_messages:
# Send unpublished messages
message = self._unpublished_messages.popleft()
self._publish(message)
logger.debug(
f"Published message from queue of unpublished messages. "
f"Remaining unpublished messages: {len(self._unpublished_messages)}."
)
def _run_pika_io_loop_in_thread(self) -> None:
thread = Thread(target=self._run_pika_io_loop, daemon=True)
thread.start()
def _run_pika_io_loop(self) -> None:
# noinspection PyUnresolvedReferences
self._pika_connection.ioloop.start()
def is_ready(
self, attempts: int = 1000, wait_time_between_attempts_in_seconds: float = 0.01
) -> bool:
"""Spin until the pika channel is open.
It typically takes 50 ms or so for the pika channel to open. We'll wait up
to 10 seconds just in case.
Args:
attempts: Number of retries.
wait_time_between_attempts_in_seconds: Wait time between retries.
Returns:
`True` if the channel is available, `False` otherwise.
"""
while attempts:
if self.channel:
return True
time.sleep(wait_time_between_attempts_in_seconds)
attempts -= 1
return False
def publish(
self,
event: Dict[Text, Any],
retries: int = 60,
retry_delay_in_seconds: int = 5,
headers: Optional[Dict[Text, Text]] = None,
) -> None:
"""Publish `event` into Pika queue.
Args:
event: Serialised event to be published.
retries: Number of retries if publishing fails
retry_delay_in_seconds: Delay in seconds between retries.
headers: Message headers to append to the published message (key-value
dictionary). The headers can be retrieved in the consumer from the
`headers` attribute of the message's `BasicProperties`.
"""
body = json.dumps(event)
while retries:
try:
self._publish(body, headers)
return
except Exception as e:
logger.error(
f"Could not open Pika channel at host '{self.host}'. "
f"Failed with error: {e}"
)
self.channel = None
if self.raise_on_failure:
raise e
retries -= 1
time.sleep(retry_delay_in_seconds)
logger.error(f"Failed to publish Pika event on host '{self.host}':\n{body}")
def _get_message_properties(
self, headers: Optional[Dict[Text, Text]] = None
) -> "BasicProperties":
"""Create RabbitMQ message `BasicProperties`.
The `app_id` property is set to the value of `self.rasa_environment` if
present, and the message delivery mode is set to 2 (persistent). In
addition, the `headers` property is set if supplied.
Args:
headers: Message headers to add to the message properties of the
published message (key-value dictionary). The headers can be retrieved in
the consumer from the `headers` attribute of the message's
`BasicProperties`.
Returns:
`pika.spec.BasicProperties` with the `RASA_ENVIRONMENT` environment variable
as the properties' `app_id` value, `delivery_mode`=2 and `headers` as the
properties' headers.
"""
from pika.spec import BasicProperties
# make message persistent
kwargs = {"delivery_mode": 2}
if self.rasa_environment:
kwargs["app_id"] = self.rasa_environment
if headers:
kwargs["headers"] = headers
return BasicProperties(**kwargs)
def _basic_publish(
self, body: Text, headers: Optional[Dict[Text, Text]] = None
) -> None:
self.channel.basic_publish(
exchange=RABBITMQ_EXCHANGE,
routing_key="",
body=body.encode(DEFAULT_ENCODING),
properties=self._get_message_properties(headers),
)
logger.debug(
f"Published Pika events to exchange '{RABBITMQ_EXCHANGE}' on host "
f"'{self.host}':\n{body}"
)
def _publish(self, body: Text, headers: Optional[Dict[Text, Text]] = None) -> None:
if self._pika_connection.is_closed:
# Try to reset connection
self._run_pika()
self._basic_publish(body, headers)
elif not self.channel and self.should_keep_unpublished_messages:
logger.warning(
f"RabbitMQ channel has not been assigned. Adding message to "
f"list of unpublished messages and trying to publish them "
f"later. Current number of unpublished messages is "
f"{len(self._unpublished_messages)}."
)
self._unpublished_messages.append(body)
else:
self._basic_publish(body, headers)
def create_rabbitmq_ssl_options(
rabbitmq_host: Optional[Text] = None,
) -> Optional["pika.SSLOptions"]:
"""Create RabbitMQ SSL options.
Requires the following environment variables to be set:
RABBITMQ_SSL_CLIENT_CERTIFICATE - path to the SSL client certificate (required)
RABBITMQ_SSL_CLIENT_KEY - path to the SSL client key (required)
RABBITMQ_SSL_CA_FILE - path to the SSL CA file for verification (optional)
RABBITMQ_SSL_KEY_PASSWORD - SSL private key password (optional)
Details on how to enable RabbitMQ TLS support can be found here:
https://www.rabbitmq.com/ssl.html#enabling-tls
Args:
rabbitmq_host: RabbitMQ hostname
Returns:
Pika SSL context of type `pika.SSLOptions` if
the RABBITMQ_SSL_CLIENT_CERTIFICATE and RABBITMQ_SSL_CLIENT_KEY
environment variables are valid paths, else `None`.
"""
client_certificate_path = os.environ.get("RABBITMQ_SSL_CLIENT_CERTIFICATE")
client_key_path = os.environ.get("RABBITMQ_SSL_CLIENT_KEY")
if client_certificate_path and client_key_path:
import pika
import rasa.server
logger.debug(f"Configuring SSL context for RabbitMQ host '{rabbitmq_host}'.")
ca_file_path = os.environ.get("RABBITMQ_SSL_CA_FILE")
key_password = os.environ.get("RABBITMQ_SSL_KEY_PASSWORD")
ssl_context = rasa.server.create_ssl_context(
client_certificate_path, client_key_path, ca_file_path, key_password
)
return pika.SSLOptions(ssl_context, rabbitmq_host)
else:
return None
|
mainwindow.py
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Spyder, the Scientific Python Development Environment
=====================================================
Developped and maintained by the Spyder Project
Contributors
Copyright © Spyder Project Contributors
Licensed under the terms of the MIT License
(see spyder/__init__.py for details)
"""
# =============================================================================
# Stdlib imports
# =============================================================================
from __future__ import print_function
import errno
import gc
import logging
import os
import os.path as osp
import re
import signal
import socket
import subprocess
import sys
import threading
import traceback
import importlib
logger = logging.getLogger(__name__)
#==============================================================================
# Keeping a reference to the original sys.exit before patching it
#==============================================================================
ORIGINAL_SYS_EXIT = sys.exit
#==============================================================================
# Check requirements
#==============================================================================
from spyder import requirements
requirements.check_path()
requirements.check_qt()
requirements.check_spyder_kernels()
#==============================================================================
# Windows only: support for hiding console window when started with python.exe
#==============================================================================
set_attached_console_visible = None
is_attached_console_visible = None
set_windows_appusermodelid = None
if os.name == 'nt':
from spyder.utils.windows import (set_attached_console_visible,
is_attached_console_visible,
set_windows_appusermodelid)
#==============================================================================
# Workaround: importing rope.base.project here, otherwise this module can't
# be imported if Spyder was executed from another folder than spyder
#==============================================================================
try:
import rope.base.project # analysis:ignore
except ImportError:
pass
#==============================================================================
# Qt imports
#==============================================================================
from qtpy import API, PYQT5
from qtpy.compat import from_qvariant
from qtpy.QtCore import (QByteArray, QCoreApplication, QPoint, QSize, Qt,
QThread, QTimer, QUrl, Signal, Slot)
from qtpy.QtGui import QColor, QDesktopServices, QIcon, QKeySequence, QPixmap
from qtpy.QtWidgets import (QAction, QApplication, QDockWidget, QMainWindow,
QMenu, QMessageBox, QShortcut, QSplashScreen,
QStyleFactory, QTabWidget, QWidget)
# Avoid a "Cannot mix incompatible Qt library" error on Windows platforms
from qtpy import QtSvg # analysis:ignore
# Avoid a bug in Qt: https://bugreports.qt.io/browse/QTBUG-46720
from qtpy import QtWebEngineWidgets # analysis:ignore
# For issue 7447
try:
from qtpy.QtQuick import QQuickWindow, QSGRendererInterface
except Exception:
QQuickWindow = QSGRendererInterface = None
# To catch font errors in QtAwesome
from qtawesome.iconic_font import FontError
#==============================================================================
# Proper high DPI scaling is available in Qt >= 5.6.0. This attibute must
# be set before creating the application.
#==============================================================================
from spyder.config.main import CONF
if hasattr(Qt, 'AA_EnableHighDpiScaling'):
QCoreApplication.setAttribute(Qt.AA_EnableHighDpiScaling,
CONF.get('main', 'high_dpi_scaling'))
#==============================================================================
# Create our QApplication instance here because it's needed to render the
# splash screen created below
#==============================================================================
from spyder.utils.qthelpers import qapplication, MENU_SEPARATOR
from spyder.config.base import get_image_path
MAIN_APP = qapplication()
if PYQT5:
APP_ICON = QIcon(get_image_path("spyder.svg"))
else:
APP_ICON = QIcon(get_image_path("spyder.png"))
MAIN_APP.setWindowIcon(APP_ICON)
#==============================================================================
# Create splash screen out of MainWindow to reduce perceived startup time.
#==============================================================================
from spyder.config.base import _, get_image_path, DEV, running_under_pytest
if not running_under_pytest():
SPLASH = QSplashScreen(QPixmap(get_image_path('splash.svg')))
SPLASH_FONT = SPLASH.font()
SPLASH_FONT.setPixelSize(10)
SPLASH.setFont(SPLASH_FONT)
SPLASH.show()
SPLASH.showMessage(_("Initializing..."), Qt.AlignBottom | Qt.AlignCenter |
Qt.AlignAbsolute, QColor(Qt.white))
QApplication.processEvents()
else:
SPLASH = None
#==============================================================================
# Local utility imports
#==============================================================================
from spyder import (__version__, __project_url__, __forum_url__,
__trouble_url__, __trouble_url_short__, __website_url__,
get_versions)
from spyder.config.base import (get_conf_path, get_module_source_path, STDERR,
get_debug_level, MAC_APP_NAME, get_home_dir,
running_in_mac_app, get_module_path,
reset_config_files)
from spyder.config.main import OPEN_FILES_PORT
from spyder.config.utils import IMPORT_EXT, is_gtk_desktop
from spyder.app.cli_options import get_options
from spyder import dependencies
from spyder.py3compat import (is_text_string, to_text_string,
PY3, qbytearray_to_str, configparser as cp)
from spyder.utils import encoding, programs
from spyder.utils import icon_manager as ima
from spyder.utils.programs import is_module_installed
from spyder.utils.misc import select_port, getcwd_or_home, get_python_executable
from spyder.widgets.fileswitcher import FileSwitcher
from spyder.plugins.help.utils.sphinxify import CSS_PATH, DARK_CSS_PATH
from spyder.plugins.lspmanager import LSPManager
from spyder.config.gui import is_dark_font_color
#==============================================================================
# Local gui imports
#==============================================================================
# NOTE: Move (if possible) import's of widgets and plugins exactly where they
# are needed in MainWindow to speed up perceived startup time (i.e. the time
# from clicking the Spyder icon to showing the splash screen).
try:
from spyder.utils.environ import WinUserEnvDialog
except ImportError:
WinUserEnvDialog = None # analysis:ignore
from spyder.utils.qthelpers import (create_action, add_actions, get_icon,
add_shortcut_to_tooltip,
create_module_bookmark_actions,
create_program_action, DialogManager,
create_python_script_action, file_uri)
from spyder.config.gui import get_shortcut
from spyder.otherplugins import get_spyderplugins_mods
from spyder.app import tour
#==============================================================================
# Third-party library imports
#==============================================================================
import qdarkstyle
#==============================================================================
# Get the cwd before initializing WorkingDirectory, which sets it to the one
# used in the last session
#==============================================================================
CWD = getcwd_or_home()
#==============================================================================
# Utility functions
#==============================================================================
def get_python_doc_path():
"""
Return Python documentation path
(Windows: return the PythonXX.chm path if available)
"""
if os.name == 'nt':
doc_path = osp.join(sys.prefix, "Doc")
if not osp.isdir(doc_path):
return
python_chm = [path for path in os.listdir(doc_path)
if re.match(r"(?i)Python[0-9]{3,6}.chm", path)]
if python_chm:
return file_uri(osp.join(doc_path, python_chm[0]))
else:
vinf = sys.version_info
doc_path = '/usr/share/doc/python%d.%d/html' % (vinf[0], vinf[1])
python_doc = osp.join(doc_path, "index.html")
if osp.isfile(python_doc):
return file_uri(python_doc)
def set_opengl_implementation(option):
"""
Set the OpenGL implementation used by Spyder.
See issue 7447 for the details.
"""
if option == 'software':
QCoreApplication.setAttribute(Qt.AA_UseSoftwareOpenGL)
if QQuickWindow is not None:
QQuickWindow.setSceneGraphBackend(QSGRendererInterface.Software)
elif option == 'desktop':
QCoreApplication.setAttribute(Qt.AA_UseDesktopOpenGL)
if QQuickWindow is not None:
QQuickWindow.setSceneGraphBackend(QSGRendererInterface.OpenGL)
elif option == 'gles':
QCoreApplication.setAttribute(Qt.AA_UseOpenGLES)
if QQuickWindow is not None:
QQuickWindow.setSceneGraphBackend(QSGRendererInterface.OpenGL)
def setup_logging(cli_options):
"""Setup logging with cli options defined by the user."""
if cli_options.debug_info or get_debug_level() > 0:
levels = {2: logging.INFO, 3: logging.DEBUG}
log_level = levels[get_debug_level()]
log_format = '%(asctime)s [%(levelname)s] [%(name)s] -> %(message)s'
if cli_options.debug_output == 'file':
log_file = 'spyder-debug.log'
else:
log_file = None
logging.basicConfig(level=log_level,
format=log_format,
filename=log_file,
filemode='w+')
# =============================================================================
# Dependencies
# =============================================================================
QDARKSTYLE_REQVER = '>=2.6.4'
dependencies.add("qdarkstyle", _("Dark style for the entire interface"),
required_version=QDARKSTYLE_REQVER)
#==============================================================================
# Main Window
#==============================================================================
class MainWindow(QMainWindow):
"""Spyder main window"""
DOCKOPTIONS = QMainWindow.AllowTabbedDocks|QMainWindow.AllowNestedDocks
CURSORBLINK_OSDEFAULT = QApplication.cursorFlashTime()
SPYDER_PATH = get_conf_path('path')
SPYDER_NOT_ACTIVE_PATH = get_conf_path('not_active_path')
BOOKMARKS = (
('Python2', "https://docs.python.org/2/index.html",
_("Python2 documentation")),
('Python3', "https://docs.python.org/3/index.html",
_("Python3 documentation")),
('numpy', "https://docs.scipy.org/doc/",
_("Numpy and Scipy documentation")),
('matplotlib', "https://matplotlib.org/contents.html",
_("Matplotlib documentation")),
('PyQt5',
"http://pyqt.sourceforge.net/Docs/PyQt5/",
_("PyQt5 Reference Guide")),
('PyQt5',
"http://pyqt.sourceforge.net/Docs/PyQt5/class_reference.html",
_("PyQt5 API Reference")),
('winpython', "https://winpython.github.io/",
_("WinPython"))
)
DEFAULT_LAYOUTS = 4
# Signals
restore_scrollbar_position = Signal()
all_actions_defined = Signal()
sig_pythonpath_changed = Signal()
sig_open_external_file = Signal(str)
sig_resized = Signal("QResizeEvent") # related to interactive tour
sig_moved = Signal("QMoveEvent") # related to interactive tour
def __init__(self, options=None):
QMainWindow.__init__(self)
qapp = QApplication.instance()
if PYQT5:
# Enabling scaling for high dpi
qapp.setAttribute(Qt.AA_UseHighDpiPixmaps)
self.default_style = str(qapp.style().objectName())
self.dialog_manager = DialogManager()
self.init_workdir = options.working_directory
self.profile = options.profile
self.multithreaded = options.multithreaded
self.new_instance = options.new_instance
self.open_project = options.project
self.window_title = options.window_title
logger.info("Start of MainWindow constructor")
def signal_handler(signum, frame=None):
"""Handler for signals."""
sys.stdout.write('Handling signal: %s\n' % signum)
sys.stdout.flush()
QApplication.quit()
if os.name == "nt":
try:
import win32api
win32api.SetConsoleCtrlHandler(signal_handler, True)
except ImportError:
pass
else:
signal.signal(signal.SIGTERM, signal_handler)
if not DEV:
# Make spyder quit when presing ctrl+C in the console
# In DEV Ctrl+C doesn't quit, because it helps to
# capture the traceback when spyder freezes
signal.signal(signal.SIGINT, signal_handler)
# Use a custom Qt stylesheet
if sys.platform == 'darwin':
spy_path = get_module_source_path('spyder')
img_path = osp.join(spy_path, 'images')
mac_style = open(osp.join(spy_path, 'app', 'mac_stylesheet.qss')).read()
mac_style = mac_style.replace('$IMAGE_PATH', img_path)
self.setStyleSheet(mac_style)
# Shortcut management data
self.shortcut_data = []
# Loading Spyder path
self.path = []
self.not_active_path = []
self.project_path = []
if osp.isfile(self.SPYDER_PATH):
self.path, _x = encoding.readlines(self.SPYDER_PATH)
self.path = [name for name in self.path if osp.isdir(name)]
if osp.isfile(self.SPYDER_NOT_ACTIVE_PATH):
self.not_active_path, _x = \
encoding.readlines(self.SPYDER_NOT_ACTIVE_PATH)
self.not_active_path = \
[name for name in self.not_active_path if osp.isdir(name)]
self.remove_path_from_sys_path()
self.add_path_to_sys_path()
# Plugins
self.console = None
self.workingdirectory = None
self.editor = None
self.explorer = None
self.help = None
self.onlinehelp = None
self.projects = None
self.outlineexplorer = None
self.historylog = None
self.ipyconsole = None
self.variableexplorer = None
self.plots = None
self.findinfiles = None
self.thirdparty_plugins = []
# Tour # TODO: Should I consider it a plugin?? or?
self.tour = None
self.tours_available = None
# File switcher
self.fileswitcher = None
# Check for updates Thread and Worker, refereces needed to prevent
# segfaulting
self.check_updates_action = None
self.thread_updates = None
self.worker_updates = None
self.give_updates_feedback = True
# Preferences
from spyder.preferences.configdialog import (MainConfigPage,
ColorSchemeConfigPage)
from spyder.preferences.shortcuts import ShortcutsConfigPage
from spyder.preferences.runconfig import RunConfigPage
from spyder.preferences.maininterpreter import MainInterpreterConfigPage
self.general_prefs = [MainConfigPage, ShortcutsConfigPage,
ColorSchemeConfigPage, MainInterpreterConfigPage,
RunConfigPage]
self.prefs_index = None
self.prefs_dialog_size = None
# Quick Layouts and Dialogs
from spyder.preferences.layoutdialog import (LayoutSaveDialog,
LayoutSettingsDialog)
self.dialog_layout_save = LayoutSaveDialog
self.dialog_layout_settings = LayoutSettingsDialog
# Actions
self.lock_interface_action = None
self.show_toolbars_action = None
self.close_dockwidget_action = None
self.undo_action = None
self.redo_action = None
self.copy_action = None
self.cut_action = None
self.paste_action = None
self.selectall_action = None
self.maximize_action = None
self.fullscreen_action = None
# Menu bars
self.file_menu = None
self.file_menu_actions = []
self.edit_menu = None
self.edit_menu_actions = []
self.search_menu = None
self.search_menu_actions = []
self.source_menu = None
self.source_menu_actions = []
self.run_menu = None
self.run_menu_actions = []
self.debug_menu = None
self.debug_menu_actions = []
self.consoles_menu = None
self.consoles_menu_actions = []
self.projects_menu = None
self.projects_menu_actions = []
self.tools_menu = None
self.tools_menu_actions = []
self.external_tools_menu = None # We must keep a reference to this,
# otherwise the external tools menu is lost after leaving setup method
self.external_tools_menu_actions = []
self.view_menu = None
self.plugins_menu = None
self.plugins_menu_actions = []
self.toolbars_menu = None
self.help_menu = None
self.help_menu_actions = []
# Status bar widgets
self.mem_status = None
self.cpu_status = None
# Toolbars
self.visible_toolbars = []
self.toolbarslist = []
self.main_toolbar = None
self.main_toolbar_actions = []
self.file_toolbar = None
self.file_toolbar_actions = []
self.edit_toolbar = None
self.edit_toolbar_actions = []
self.search_toolbar = None
self.search_toolbar_actions = []
self.source_toolbar = None
self.source_toolbar_actions = []
self.run_toolbar = None
self.run_toolbar_actions = []
self.debug_toolbar = None
self.debug_toolbar_actions = []
self.layout_toolbar = None
self.layout_toolbar_actions = []
if running_under_pytest():
# Show errors in internal console when testing.
CONF.set('main', 'show_internal_errors', False)
# Set window title
self.set_window_title()
if set_windows_appusermodelid != None:
res = set_windows_appusermodelid()
logger.info("appusermodelid: %s", res)
# Setting QTimer if running in travis
test_travis = os.environ.get('TEST_CI_APP', None)
if test_travis is not None:
global MAIN_APP
timer_shutdown_time = 30000
self.timer_shutdown = QTimer(self)
self.timer_shutdown.timeout.connect(MAIN_APP.quit)
self.timer_shutdown.start(timer_shutdown_time)
# Showing splash screen
self.splash = SPLASH
if CONF.get('main', 'current_version', '') != __version__:
CONF.set('main', 'current_version', __version__)
# Execute here the actions to be performed only once after
# each update (there is nothing there for now, but it could
# be useful some day...)
# List of satellite widgets (registered in add_dockwidget):
self.widgetlist = []
# Flags used if closing() is called by the exit() shell command
self.already_closed = False
self.is_starting_up = True
self.is_setting_up = True
self.interface_locked = CONF.get('main', 'panes_locked')
self.floating_dockwidgets = []
self.window_size = None
self.window_position = None
self.state_before_maximizing = None
self.current_quick_layout = None
self.previous_layout_settings = None # TODO: related to quick layouts
self.last_plugin = None
self.fullscreen_flag = None # isFullscreen does not work as expected
# The following flag remember the maximized state even when
# the window is in fullscreen mode:
self.maximized_flag = None
# The following flag is used to restore window's geometry when
# toggling out of fullscreen mode in Windows.
self.saved_normal_geometry = None
# To keep track of the last focused widget
self.last_focused_widget = None
self.previous_focused_widget = None
# Server to open external files on a single instance
# This is needed in order to handle socket creation problems.
# See issue 4132
if os.name == 'nt':
try:
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
except OSError as e:
self.open_files_server = None
QMessageBox.warning(None, "Spyder",
_("An error occurred while creating a socket needed "
"by Spyder. Please, try to run as an Administrator "
"from cmd.exe the following command and then "
"restart your computer: <br><br><span "
"style=\'color: #555555\'><b>netsh winsock reset"
"</b></span><br>"))
else:
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
# Apply preferences
self.apply_settings()
# To set all dockwidgets tabs to be on top (in case we want to do it
# in the future)
# self.setTabPosition(Qt.AllDockWidgetAreas, QTabWidget.North)
logger.info("End of MainWindow constructor")
#---- Window setup
def create_toolbar(self, title, object_name, iconsize=24):
"""Create and return toolbar with *title* and *object_name*"""
toolbar = self.addToolBar(title)
toolbar.setObjectName(object_name)
toolbar.setIconSize(QSize(iconsize, iconsize))
self.toolbarslist.append(toolbar)
return toolbar
def setup(self):
"""Setup main window"""
logger.info("*** Start of MainWindow setup ***")
logger.info("Applying theme configuration...")
ui_theme = CONF.get('appearance', 'ui_theme')
color_scheme = CONF.get('appearance', 'selected')
if ui_theme == 'dark':
self.setStyleSheet(qdarkstyle.load_stylesheet_from_environment())
css_path = DARK_CSS_PATH
elif ui_theme == 'automatic':
if not is_dark_font_color(color_scheme):
self.setStyleSheet(
qdarkstyle.load_stylesheet_from_environment())
css_path = DARK_CSS_PATH
else:
css_path = CSS_PATH
else:
css_path = CSS_PATH
logger.info("Creating core actions...")
self.close_dockwidget_action = create_action(
self, icon=ima.icon('close_pane'),
text=_("Close current pane"),
triggered=self.close_current_dockwidget,
context=Qt.ApplicationShortcut
)
self.register_shortcut(self.close_dockwidget_action, "_",
"Close pane")
self.lock_interface_action = create_action(
self,
_("Lock panes and toolbars"),
toggled=self.toggle_lock,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.lock_interface_action, "_",
"Lock unlock panes")
# custom layouts shortcuts
self.toggle_next_layout_action = create_action(self,
_("Use next layout"),
triggered=self.toggle_next_layout,
context=Qt.ApplicationShortcut)
self.toggle_previous_layout_action = create_action(self,
_("Use previous layout"),
triggered=self.toggle_previous_layout,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.toggle_next_layout_action, "_",
"Use next layout")
self.register_shortcut(self.toggle_previous_layout_action, "_",
"Use previous layout")
# File switcher shortcuts
self.file_switcher_action = create_action(
self,
_('File switcher...'),
icon=ima.icon('filelist'),
tip=_('Fast switch between files'),
triggered=self.open_fileswitcher,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.file_switcher_action, context="_",
name="File switcher")
self.symbol_finder_action = create_action(
self, _('Symbol finder...'),
icon=ima.icon('symbol_find'),
tip=_('Fast symbol search in file'),
triggered=self.open_symbolfinder,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.symbol_finder_action, context="_",
name="symbol finder", add_sc_to_tip=True)
self.file_toolbar_actions = [self.file_switcher_action,
self.symbol_finder_action]
def create_edit_action(text, tr_text, icon):
textseq = text.split(' ')
method_name = textseq[0].lower()+"".join(textseq[1:])
action = create_action(self, tr_text,
icon=icon,
triggered=self.global_callback,
data=method_name,
context=Qt.WidgetShortcut)
self.register_shortcut(action, "Editor", text)
return action
self.undo_action = create_edit_action('Undo', _('Undo'),
ima.icon('undo'))
self.redo_action = create_edit_action('Redo', _('Redo'),
ima.icon('redo'))
self.copy_action = create_edit_action('Copy', _('Copy'),
ima.icon('editcopy'))
self.cut_action = create_edit_action('Cut', _('Cut'),
ima.icon('editcut'))
self.paste_action = create_edit_action('Paste', _('Paste'),
ima.icon('editpaste'))
self.selectall_action = create_edit_action("Select All",
_("Select All"),
ima.icon('selectall'))
self.edit_menu_actions = [self.undo_action, self.redo_action,
None, self.cut_action, self.copy_action,
self.paste_action, self.selectall_action]
namespace = None
logger.info("Creating toolbars...")
# File menu/toolbar
self.file_menu = self.menuBar().addMenu(_("&File"))
self.file_toolbar = self.create_toolbar(_("File toolbar"),
"file_toolbar")
# Edit menu/toolbar
self.edit_menu = self.menuBar().addMenu(_("&Edit"))
self.edit_toolbar = self.create_toolbar(_("Edit toolbar"),
"edit_toolbar")
# Search menu/toolbar
self.search_menu = self.menuBar().addMenu(_("&Search"))
self.search_toolbar = self.create_toolbar(_("Search toolbar"),
"search_toolbar")
# Source menu/toolbar
self.source_menu = self.menuBar().addMenu(_("Sour&ce"))
self.source_toolbar = self.create_toolbar(_("Source toolbar"),
"source_toolbar")
# Run menu/toolbar
self.run_menu = self.menuBar().addMenu(_("&Run"))
self.run_toolbar = self.create_toolbar(_("Run toolbar"),
"run_toolbar")
# Debug menu/toolbar
self.debug_menu = self.menuBar().addMenu(_("&Debug"))
self.debug_toolbar = self.create_toolbar(_("Debug toolbar"),
"debug_toolbar")
# Consoles menu/toolbar
self.consoles_menu = self.menuBar().addMenu(_("C&onsoles"))
# Projects menu
self.projects_menu = self.menuBar().addMenu(_("&Projects"))
self.projects_menu.aboutToShow.connect(self.valid_project)
# Tools menu
self.tools_menu = self.menuBar().addMenu(_("&Tools"))
# View menu
self.view_menu = self.menuBar().addMenu(_("&View"))
# Help menu
self.help_menu = self.menuBar().addMenu(_("&Help"))
# Status bar
status = self.statusBar()
status.setObjectName("StatusBar")
status.showMessage(_("Welcome to Spyder!"), 5000)
logger.info("Creating Tools menu...")
# Tools + External Tools
prefs_action = create_action(self, _("Pre&ferences"),
icon=ima.icon('configure'),
triggered=self.edit_preferences,
context=Qt.ApplicationShortcut)
self.register_shortcut(prefs_action, "_", "Preferences",
add_sc_to_tip=True)
spyder_path_action = create_action(self,
_("PYTHONPATH manager"),
None, icon=ima.icon('pythonpath'),
triggered=self.path_manager_callback,
tip=_("Python Path Manager"),
menurole=QAction.ApplicationSpecificRole)
reset_spyder_action = create_action(
self, _("Reset Spyder to factory defaults"),
triggered=self.reset_spyder)
self.tools_menu_actions = [prefs_action, spyder_path_action]
if WinUserEnvDialog is not None:
winenv_action = create_action(self,
_("Current user environment variables..."),
icon='win_env.png',
tip=_("Show and edit current user environment "
"variables in Windows registry "
"(i.e. for all sessions)"),
triggered=self.win_env)
self.tools_menu_actions.append(winenv_action)
self.tools_menu_actions += [MENU_SEPARATOR, reset_spyder_action]
# External Tools submenu
self.external_tools_menu = QMenu(_("External Tools"))
self.external_tools_menu_actions = []
# WinPython control panel
self.wp_action = create_action(self, _("WinPython control panel"),
icon=get_icon('winpython.svg'),
triggered=lambda:
programs.run_python_script('winpython', 'controlpanel'))
if os.name == 'nt' and is_module_installed('winpython'):
self.external_tools_menu_actions.append(self.wp_action)
# Qt-related tools
additact = []
for name in ("designer-qt4", "designer"):
qtdact = create_program_action(self, _("Qt Designer"), name)
if qtdact:
break
for name in ("linguist-qt4", "linguist"):
qtlact = create_program_action(self, _("Qt Linguist"), "linguist")
if qtlact:
break
args = ['-no-opengl'] if os.name == 'nt' else []
for act in (qtdact, qtlact):
if act:
additact.append(act)
if additact and is_module_installed('winpython'):
self.external_tools_menu_actions += [None] + additact
# Guidata and Sift
logger.info("Creating guidata and sift entries...")
gdgq_act = []
# Guidata and Guiqwt don't support PyQt5 yet and they fail
# with an AssertionError when imported using those bindings
# (see issue 2274)
try:
from guidata import configtools
from guidata import config # analysis:ignore
guidata_icon = configtools.get_icon('guidata.svg')
guidata_act = create_python_script_action(self,
_("guidata examples"), guidata_icon,
"guidata",
osp.join("tests", "__init__"))
gdgq_act += [guidata_act]
except:
pass
try:
from guidata import configtools
from guiqwt import config # analysis:ignore
guiqwt_icon = configtools.get_icon('guiqwt.svg')
guiqwt_act = create_python_script_action(self,
_("guiqwt examples"), guiqwt_icon, "guiqwt",
osp.join("tests", "__init__"))
if guiqwt_act:
gdgq_act += [guiqwt_act]
sift_icon = configtools.get_icon('sift.svg')
sift_act = create_python_script_action(self, _("Sift"),
sift_icon, "guiqwt", osp.join("tests", "sift"))
if sift_act:
gdgq_act += [sift_act]
except:
pass
if gdgq_act:
self.external_tools_menu_actions += [None] + gdgq_act
# Maximize current plugin
self.maximize_action = create_action(self, '',
triggered=self.maximize_dockwidget,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.maximize_action, "_", "Maximize pane")
self.__update_maximize_action()
# Fullscreen mode
self.fullscreen_action = create_action(self,
_("Fullscreen mode"),
triggered=self.toggle_fullscreen,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.fullscreen_action, "_",
"Fullscreen mode", add_sc_to_tip=True)
# Main toolbar
self.main_toolbar_actions = [self.maximize_action,
self.fullscreen_action,
None,
prefs_action, spyder_path_action]
self.main_toolbar = self.create_toolbar(_("Main toolbar"),
"main_toolbar")
# Internal console plugin
logger.info("Loading internal console...")
from spyder.plugins.console.plugin import Console
self.console = Console(self, namespace, exitfunc=self.closing,
profile=self.profile,
multithreaded=self.multithreaded,
message=_("Spyder Internal Console\n\n"
"This console is used to report application\n"
"internal errors and to inspect Spyder\n"
"internals with the following commands:\n"
" spy.app, spy.window, dir(spy)\n\n"
"Please don't use it to run your code\n\n"))
self.console.register_plugin()
# Language Server Protocol Client initialization
self.set_splash(_("Creating LSP Manager..."))
self.lspmanager = LSPManager(self)
# Working directory plugin
logger.info("Loading working directory...")
from spyder.plugins.workingdirectory.plugin import WorkingDirectory
self.workingdirectory = WorkingDirectory(self, self.init_workdir, main=self)
self.workingdirectory.register_plugin()
self.toolbarslist.append(self.workingdirectory.toolbar)
# Help plugin
if CONF.get('help', 'enable'):
self.set_splash(_("Loading help..."))
from spyder.plugins.help.plugin import Help
self.help = Help(self, css_path=css_path)
self.help.register_plugin()
# Outline explorer widget
if CONF.get('outline_explorer', 'enable'):
self.set_splash(_("Loading outline explorer..."))
from spyder.plugins.outlineexplorer.plugin import OutlineExplorer
self.outlineexplorer = OutlineExplorer(self)
self.outlineexplorer.register_plugin()
# Editor plugin
self.set_splash(_("Loading editor..."))
from spyder.plugins.editor.plugin import Editor
self.editor = Editor(self)
self.editor.register_plugin()
# Start LSP client
self.set_splash(_("Launching LSP Client..."))
self.lspmanager.start_lsp_client('python')
# Populating file menu entries
quit_action = create_action(self, _("&Quit"),
icon=ima.icon('exit'),
tip=_("Quit"),
triggered=self.console.quit,
context=Qt.ApplicationShortcut)
self.register_shortcut(quit_action, "_", "Quit")
restart_action = create_action(self, _("&Restart"),
icon=ima.icon('restart'),
tip=_("Restart"),
triggered=self.restart,
context=Qt.ApplicationShortcut)
self.register_shortcut(restart_action, "_", "Restart")
self.file_menu_actions += [self.file_switcher_action,
self.symbol_finder_action, None,
restart_action, quit_action]
self.set_splash("")
# Namespace browser
self.set_splash(_("Loading namespace browser..."))
from spyder.plugins.variableexplorer.plugin import VariableExplorer
self.variableexplorer = VariableExplorer(self)
self.variableexplorer.register_plugin()
# Figure browser
self.set_splash(_("Loading figure browser..."))
from spyder.plugins.plots.plugin import Plots
self.plots = Plots(self)
self.plots.register_plugin()
# History log widget
if CONF.get('historylog', 'enable'):
self.set_splash(_("Loading history plugin..."))
from spyder.plugins.history.plugin import HistoryLog
self.historylog = HistoryLog(self)
self.historylog.register_plugin()
# IPython console
self.set_splash(_("Loading IPython console..."))
from spyder.plugins.ipythonconsole.plugin import IPythonConsole
self.ipyconsole = IPythonConsole(self, css_path=css_path)
self.ipyconsole.register_plugin()
# Explorer
if CONF.get('explorer', 'enable'):
self.set_splash(_("Loading file explorer..."))
from spyder.plugins.explorer.plugin import Explorer
self.explorer = Explorer(self)
self.explorer.register_plugin()
# Online help widget
try: # Qt >= v4.4
from spyder.plugins.onlinehelp.plugin import OnlineHelp
except ImportError: # Qt < v4.4
OnlineHelp = None # analysis:ignore
if CONF.get('onlinehelp', 'enable') and OnlineHelp is not None:
self.set_splash(_("Loading online help..."))
self.onlinehelp = OnlineHelp(self)
self.onlinehelp.register_plugin()
# Project explorer widget
self.set_splash(_("Loading project explorer..."))
from spyder.plugins.projects.plugin import Projects
self.projects = Projects(self)
self.projects.register_plugin()
self.project_path = self.projects.get_pythonpath(at_start=True)
# Find in files
if CONF.get('find_in_files', 'enable'):
from spyder.plugins.findinfiles.plugin import FindInFiles
self.findinfiles = FindInFiles(self)
self.findinfiles.register_plugin()
# Load other plugins (former external plugins)
# TODO: Use this bucle to load all internall plugins and remove
# duplicated code
other_plugins = ['breakpoints', 'profiler', 'pylint']
for plugin_name in other_plugins:
if CONF.get(plugin_name, 'enable'):
module = importlib.import_module(
'spyder.plugins.{}'.format(plugin_name))
plugin = module.PLUGIN_CLASS(self)
if plugin.check_compatibility()[0]:
self.thirdparty_plugins.append(plugin)
plugin.register_plugin()
# Third-party plugins
self.set_splash(_("Loading third-party plugins..."))
for mod in get_spyderplugins_mods():
try:
plugin = mod.PLUGIN_CLASS(self)
if plugin.check_compatibility()[0]:
self.thirdparty_plugins.append(plugin)
plugin.register_plugin()
except Exception as error:
print("%s: %s" % (mod, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
self.set_splash(_("Setting up main window..."))
# Help menu
trouble_action = create_action(self,
_("Troubleshooting..."),
triggered=self.trouble_guide)
dep_action = create_action(self, _("Dependencies..."),
triggered=self.show_dependencies,
icon=ima.icon('advanced'))
report_action = create_action(self,
_("Report issue..."),
icon=ima.icon('bug'),
triggered=self.report_issue)
support_action = create_action(self,
_("Spyder support..."),
triggered=self.google_group)
self.check_updates_action = create_action(self,
_("Check for updates..."),
triggered=self.check_updates)
# Spyder documentation
spyder_doc = 'https://docs.spyder-ide.org/'
doc_action = create_action(self, _("Spyder documentation"),
icon=ima.icon('DialogHelpButton'),
triggered=lambda:
programs.start_file(spyder_doc))
self.register_shortcut(doc_action, "_",
"spyder documentation")
if self.help is not None:
tut_action = create_action(self, _("Spyder tutorial"),
triggered=self.help.show_tutorial)
else:
tut_action = None
shortcuts_action = create_action(self, _("Shortcuts Summary"),
shortcut="Meta+F1",
triggered=self.show_shortcuts_dialog)
#----- Tours
self.tour = tour.AnimatedTour(self)
self.tours_menu = QMenu(_("Interactive tours"), self)
self.tour_menu_actions = []
# TODO: Only show intro tour for now. When we are close to finish
# 3.0, we will finish and show the other tour
self.tours_available = tour.get_tours(0)
for i, tour_available in enumerate(self.tours_available):
self.tours_available[i]['last'] = 0
tour_name = tour_available['name']
def trigger(i=i, self=self): # closure needed!
return lambda: self.show_tour(i)
temp_action = create_action(self, tour_name, tip="",
triggered=trigger())
self.tour_menu_actions += [temp_action]
self.tours_menu.addActions(self.tour_menu_actions)
self.help_menu_actions = [doc_action, tut_action, shortcuts_action,
self.tours_menu,
MENU_SEPARATOR, trouble_action,
report_action, dep_action,
self.check_updates_action, support_action,
MENU_SEPARATOR]
# Python documentation
if get_python_doc_path() is not None:
pydoc_act = create_action(self, _("Python documentation"),
triggered=lambda:
programs.start_file(get_python_doc_path()))
self.help_menu_actions.append(pydoc_act)
# IPython documentation
if self.help is not None:
ipython_menu = QMenu(_("IPython documentation"), self)
intro_action = create_action(self, _("Intro to IPython"),
triggered=self.ipyconsole.show_intro)
quickref_action = create_action(self, _("Quick reference"),
triggered=self.ipyconsole.show_quickref)
guiref_action = create_action(self, _("Console help"),
triggered=self.ipyconsole.show_guiref)
add_actions(ipython_menu, (intro_action, guiref_action,
quickref_action))
self.help_menu_actions.append(ipython_menu)
# Windows-only: documentation located in sys.prefix/Doc
ipm_actions = []
def add_ipm_action(text, path):
"""Add installed Python module doc action to help submenu"""
# QAction.triggered works differently for PySide and PyQt
path = file_uri(path)
if not API == 'pyside':
slot=lambda _checked, path=path: programs.start_file(path)
else:
slot=lambda path=path: programs.start_file(path)
action = create_action(self, text,
icon='%s.png' % osp.splitext(path)[1][1:],
triggered=slot)
ipm_actions.append(action)
sysdocpth = osp.join(sys.prefix, 'Doc')
if osp.isdir(sysdocpth): # exists on Windows, except frozen dist.
for docfn in os.listdir(sysdocpth):
pt = r'([a-zA-Z\_]*)(doc)?(-dev)?(-ref)?(-user)?.(chm|pdf)'
match = re.match(pt, docfn)
if match is not None:
pname = match.groups()[0]
if pname not in ('Python', ):
add_ipm_action(pname, osp.join(sysdocpth, docfn))
# Installed Python modules submenu (Windows only)
if ipm_actions:
pymods_menu = QMenu(_("Installed Python modules"), self)
add_actions(pymods_menu, ipm_actions)
self.help_menu_actions.append(pymods_menu)
# Online documentation
web_resources = QMenu(_("Online documentation"), self)
webres_actions = create_module_bookmark_actions(self,
self.BOOKMARKS)
webres_actions.insert(2, None)
webres_actions.insert(5, None)
webres_actions.insert(8, None)
add_actions(web_resources, webres_actions)
self.help_menu_actions.append(web_resources)
# Qt assistant link
if sys.platform.startswith('linux') and not PYQT5:
qta_exe = "assistant-qt4"
else:
qta_exe = "assistant"
qta_act = create_program_action(self, _("Qt documentation"),
qta_exe)
if qta_act:
self.help_menu_actions += [qta_act, None]
# About Spyder
about_action = create_action(self,
_("About %s...") % "Spyder",
icon=ima.icon('MessageBoxInformation'),
triggered=self.about)
self.help_menu_actions += [MENU_SEPARATOR, about_action]
# Status bar widgets
from spyder.widgets.status import MemoryStatus, CPUStatus
self.mem_status = MemoryStatus(self, status)
self.cpu_status = CPUStatus(self, status)
self.apply_statusbar_settings()
# ----- View
# View menu
self.plugins_menu = QMenu(_("Panes"), self)
self.toolbars_menu = QMenu(_("Toolbars"), self)
self.quick_layout_menu = QMenu(_("Window layouts"), self)
self.quick_layout_set_menu()
self.view_menu.addMenu(self.plugins_menu) # Panes
add_actions(self.view_menu, (self.lock_interface_action,
self.close_dockwidget_action,
self.maximize_action,
MENU_SEPARATOR))
self.show_toolbars_action = create_action(self,
_("Show toolbars"),
triggered=self.show_toolbars,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.show_toolbars_action, "_",
"Show toolbars")
self.view_menu.addMenu(self.toolbars_menu)
self.view_menu.addAction(self.show_toolbars_action)
add_actions(self.view_menu, (MENU_SEPARATOR,
self.quick_layout_menu,
self.toggle_previous_layout_action,
self.toggle_next_layout_action,
MENU_SEPARATOR,
self.fullscreen_action))
if set_attached_console_visible is not None:
cmd_act = create_action(self,
_("Attached console window (debugging)"),
toggled=set_attached_console_visible)
cmd_act.setChecked(is_attached_console_visible())
add_actions(self.view_menu, (MENU_SEPARATOR, cmd_act))
# Adding external tools action to "Tools" menu
if self.external_tools_menu_actions:
external_tools_act = create_action(self, _("External Tools"))
external_tools_act.setMenu(self.external_tools_menu)
self.tools_menu_actions += [None, external_tools_act]
# Filling out menu/toolbar entries:
add_actions(self.file_menu, self.file_menu_actions)
add_actions(self.edit_menu, self.edit_menu_actions)
add_actions(self.search_menu, self.search_menu_actions)
add_actions(self.source_menu, self.source_menu_actions)
add_actions(self.run_menu, self.run_menu_actions)
add_actions(self.debug_menu, self.debug_menu_actions)
add_actions(self.consoles_menu, self.consoles_menu_actions)
add_actions(self.projects_menu, self.projects_menu_actions)
add_actions(self.tools_menu, self.tools_menu_actions)
add_actions(self.external_tools_menu,
self.external_tools_menu_actions)
add_actions(self.help_menu, self.help_menu_actions)
add_actions(self.main_toolbar, self.main_toolbar_actions)
add_actions(self.file_toolbar, self.file_toolbar_actions)
add_actions(self.edit_toolbar, self.edit_toolbar_actions)
add_actions(self.search_toolbar, self.search_toolbar_actions)
add_actions(self.source_toolbar, self.source_toolbar_actions)
add_actions(self.debug_toolbar, self.debug_toolbar_actions)
add_actions(self.run_toolbar, self.run_toolbar_actions)
# Apply all defined shortcuts (plugins + 3rd-party plugins)
self.apply_shortcuts()
# Emitting the signal notifying plugins that main window menu and
# toolbar actions are all defined:
self.all_actions_defined.emit()
# Window set-up
logger.info("Setting up window...")
self.setup_layout(default=False)
# Show and hide shortcuts in menus for Mac.
# This is a workaround because we can't disable shortcuts
# by setting context=Qt.WidgetShortcut there
if sys.platform == 'darwin':
for name in ['file', 'edit', 'search', 'source', 'run', 'debug',
'projects', 'tools', 'plugins']:
menu_object = getattr(self, name + '_menu')
menu_object.aboutToShow.connect(
lambda name=name: self.show_shortcuts(name))
menu_object.aboutToHide.connect(
lambda name=name: self.hide_shortcuts(name))
if self.splash is not None:
self.splash.hide()
# Enabling tear off for all menus except help menu
if CONF.get('main', 'tear_off_menus'):
for child in self.menuBar().children():
if isinstance(child, QMenu) and child != self.help_menu:
child.setTearOffEnabled(True)
# Menu about to show
for child in self.menuBar().children():
if isinstance(child, QMenu):
try:
child.aboutToShow.connect(self.update_edit_menu)
child.aboutToShow.connect(self.update_search_menu)
except TypeError:
pass
logger.info("*** End of MainWindow setup ***")
self.is_starting_up = False
def post_visible_setup(self):
"""Actions to be performed only after the main window's `show` method
was triggered"""
self.restore_scrollbar_position.emit()
# [Workaround for Issue 880]
# QDockWidget objects are not painted if restored as floating
# windows, so we must dock them before showing the mainwindow,
# then set them again as floating windows here.
for widget in self.floating_dockwidgets:
widget.setFloating(True)
# In MacOS X 10.7 our app is not displayed after initialized (I don't
# know why because this doesn't happen when started from the terminal),
# so we need to resort to this hack to make it appear.
if running_in_mac_app():
idx = __file__.index(MAC_APP_NAME)
app_path = __file__[:idx]
subprocess.call(['open', app_path + MAC_APP_NAME])
# Server to maintain just one Spyder instance and open files in it if
# the user tries to start other instances with
# $ spyder foo.py
if (CONF.get('main', 'single_instance') and not self.new_instance
and self.open_files_server):
t = threading.Thread(target=self.start_open_files_server)
t.setDaemon(True)
t.start()
# Connect the window to the signal emmited by the previous server
# when it gets a client connected to it
self.sig_open_external_file.connect(self.open_external_file)
# Create Plugins and toolbars submenus
self.create_plugins_menu()
self.create_toolbars_menu()
# Update toolbar visibility status
self.toolbars_visible = CONF.get('main', 'toolbars_visible')
self.load_last_visible_toolbars()
# Update lock status
self.lock_interface_action.setChecked(self.interface_locked)
# Hide Internal Console so that people don't use it instead of
# the External or IPython ones
if self.console.dockwidget.isVisible() and DEV is None:
self.console.toggle_view_action.setChecked(False)
self.console.dockwidget.hide()
# Show Help and Consoles by default
plugins_to_show = [self.ipyconsole]
if self.help is not None:
plugins_to_show.append(self.help)
for plugin in plugins_to_show:
if plugin.dockwidget.isVisible():
plugin.dockwidget.raise_()
# Show history file if no console is visible
if not self.ipyconsole.isvisible:
self.historylog.add_history(get_conf_path('history.py'))
if self.open_project:
self.projects.open_project(self.open_project)
else:
# Load last project if a project was active when Spyder
# was closed
self.projects.reopen_last_project()
# If no project is active, load last session
if self.projects.get_active_project() is None:
self.editor.setup_open_files()
# Check for spyder updates
if DEV is None and CONF.get('main', 'check_updates_on_startup'):
self.give_updates_feedback = False
self.check_updates(startup=True)
# Show dialog with missing dependencies
self.report_missing_dependencies()
# Raise the menuBar to the top of the main window widget's stack
# (Fixes issue 3887)
self.menuBar().raise_()
self.is_setting_up = False
def set_window_title(self):
"""Set window title."""
if DEV is not None:
title = u"Spyder %s (Python %s.%s)" % (__version__,
sys.version_info[0],
sys.version_info[1])
else:
title = u"Spyder (Python %s.%s)" % (sys.version_info[0],
sys.version_info[1])
if get_debug_level():
title += u" [DEBUG MODE %d]" % get_debug_level()
if self.window_title is not None:
title += u' -- ' + to_text_string(self.window_title)
if self.projects is not None:
path = self.projects.get_active_project_path()
if path:
path = path.replace(get_home_dir(), u'~')
title = u'{0} - {1}'.format(path, title)
self.base_title = title
self.setWindowTitle(self.base_title)
def report_missing_dependencies(self):
"""Show a QMessageBox with a list of missing hard dependencies"""
missing_deps = dependencies.missing_dependencies()
if missing_deps:
QMessageBox.critical(self, _('Error'),
_("<b>You have missing dependencies!</b>"
"<br><br><tt>%s</tt><br><br>"
"<b>Please install them to avoid this message.</b>"
"<br><br>"
"<i>Note</i>: Spyder could work without some of these "
"dependencies, however to have a smooth experience when "
"using Spyder we <i>strongly</i> recommend you to install "
"all the listed missing dependencies.<br><br>"
"Failing to install these dependencies might result in bugs. "
"Please be sure that any found bugs are not the direct "
"result of missing dependencies, prior to reporting a new "
"issue."
) % missing_deps, QMessageBox.Ok)
def load_window_settings(self, prefix, default=False, section='main'):
"""Load window layout settings from userconfig-based configuration
with *prefix*, under *section*
default: if True, do not restore inner layout"""
get_func = CONF.get_default if default else CONF.get
window_size = get_func(section, prefix+'size')
prefs_dialog_size = get_func(section, prefix+'prefs_dialog_size')
if default:
hexstate = None
else:
hexstate = get_func(section, prefix+'state', None)
pos = get_func(section, prefix+'position')
# It's necessary to verify if the window/position value is valid
# with the current screen. See issue 3748
width = pos[0]
height = pos[1]
screen_shape = QApplication.desktop().geometry()
current_width = screen_shape.width()
current_height = screen_shape.height()
if current_width < width or current_height < height:
pos = CONF.get_default(section, prefix+'position')
is_maximized = get_func(section, prefix+'is_maximized')
is_fullscreen = get_func(section, prefix+'is_fullscreen')
return hexstate, window_size, prefs_dialog_size, pos, is_maximized, \
is_fullscreen
def get_window_settings(self):
"""Return current window settings
Symetric to the 'set_window_settings' setter"""
window_size = (self.window_size.width(), self.window_size.height())
is_fullscreen = self.isFullScreen()
if is_fullscreen:
is_maximized = self.maximized_flag
else:
is_maximized = self.isMaximized()
pos = (self.window_position.x(), self.window_position.y())
prefs_dialog_size = (self.prefs_dialog_size.width(),
self.prefs_dialog_size.height())
hexstate = qbytearray_to_str(self.saveState())
return (hexstate, window_size, prefs_dialog_size, pos, is_maximized,
is_fullscreen)
def set_window_settings(self, hexstate, window_size, prefs_dialog_size,
pos, is_maximized, is_fullscreen):
"""Set window settings
Symetric to the 'get_window_settings' accessor"""
self.setUpdatesEnabled(False)
self.window_size = QSize(window_size[0], window_size[1]) # width,height
self.prefs_dialog_size = QSize(prefs_dialog_size[0],
prefs_dialog_size[1]) # width,height
self.window_position = QPoint(pos[0], pos[1]) # x,y
self.setWindowState(Qt.WindowNoState)
self.resize(self.window_size)
self.move(self.window_position)
# Window layout
if hexstate:
self.restoreState( QByteArray().fromHex(
str(hexstate).encode('utf-8')) )
# [Workaround for Issue 880]
# QDockWidget objects are not painted if restored as floating
# windows, so we must dock them before showing the mainwindow.
for widget in self.children():
if isinstance(widget, QDockWidget) and widget.isFloating():
self.floating_dockwidgets.append(widget)
widget.setFloating(False)
# Is fullscreen?
if is_fullscreen:
self.setWindowState(Qt.WindowFullScreen)
self.__update_fullscreen_action()
# Is maximized?
if is_fullscreen:
self.maximized_flag = is_maximized
elif is_maximized:
self.setWindowState(Qt.WindowMaximized)
self.setUpdatesEnabled(True)
def save_current_window_settings(self, prefix, section='main',
none_state=False):
"""Save current window settings with *prefix* in
the userconfig-based configuration, under *section*"""
win_size = self.window_size
prefs_size = self.prefs_dialog_size
CONF.set(section, prefix+'size', (win_size.width(), win_size.height()))
CONF.set(section, prefix+'prefs_dialog_size',
(prefs_size.width(), prefs_size.height()))
CONF.set(section, prefix+'is_maximized', self.isMaximized())
CONF.set(section, prefix+'is_fullscreen', self.isFullScreen())
pos = self.window_position
CONF.set(section, prefix+'position', (pos.x(), pos.y()))
self.maximize_dockwidget(restore=True)# Restore non-maximized layout
if none_state:
CONF.set(section, prefix + 'state', None)
else:
qba = self.saveState()
CONF.set(section, prefix + 'state', qbytearray_to_str(qba))
CONF.set(section, prefix+'statusbar',
not self.statusBar().isHidden())
def tabify_plugins(self, first, second):
"""Tabify plugin dockwigdets"""
self.tabifyDockWidget(first.dockwidget, second.dockwidget)
# --- Layouts
def setup_layout(self, default=False):
"""Setup window layout"""
prefix = 'window' + '/'
settings = self.load_window_settings(prefix, default)
hexstate = settings[0]
self.first_spyder_run = False
if hexstate is None:
# First Spyder execution:
self.setWindowState(Qt.WindowMaximized)
self.first_spyder_run = True
self.setup_default_layouts('default', settings)
# Now that the initial setup is done, copy the window settings,
# except for the hexstate in the quick layouts sections for the
# default layouts.
# Order and name of the default layouts is found in config.py
section = 'quick_layouts'
get_func = CONF.get_default if default else CONF.get
order = get_func(section, 'order')
# restore the original defaults if reset layouts is called
if default:
CONF.set(section, 'active', order)
CONF.set(section, 'order', order)
CONF.set(section, 'names', order)
for index, name, in enumerate(order):
prefix = 'layout_{0}/'.format(index)
self.save_current_window_settings(prefix, section,
none_state=True)
# store the initial layout as the default in spyder
prefix = 'layout_default/'
section = 'quick_layouts'
self.save_current_window_settings(prefix, section, none_state=True)
self.current_quick_layout = 'default'
# Regenerate menu
self.quick_layout_set_menu()
self.set_window_settings(*settings)
for plugin in (self.widgetlist + self.thirdparty_plugins):
try:
plugin.initialize_plugin_in_mainwindow_layout()
except Exception as error:
print("%s: %s" % (plugin, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
def setup_default_layouts(self, index, settings):
"""Setup default layouts when run for the first time"""
self.maximize_dockwidget(restore=True)
self.set_window_settings(*settings)
self.setUpdatesEnabled(False)
# IMPORTANT: order has to be the same as defined in the config file
MATLAB, RSTUDIO, VERTICAL, HORIZONTAL = range(self.DEFAULT_LAYOUTS)
# define widgets locally
editor = self.editor
console_ipy = self.ipyconsole
console_int = self.console
outline = self.outlineexplorer
explorer_project = self.projects
explorer_file = self.explorer
explorer_variable = self.variableexplorer
plots = self.plots
history = self.historylog
finder = self.findinfiles
help_plugin = self.help
helper = self.onlinehelp
plugins = self.thirdparty_plugins
global_hidden_widgets = [finder, console_int, explorer_project,
helper] + plugins
global_hidden_toolbars = [self.source_toolbar, self.edit_toolbar,
self.search_toolbar]
# Layout definition
# layouts are organized by columns, each colum is organized by rows
# widths have to add 1.0, height per column have to add 1.0
# Spyder Default Initial Layout
s_layout = {'widgets': [
# column 0
[[explorer_project]],
# column 1
[[editor]],
# column 2
[[outline]],
# column 3
[[help_plugin, explorer_variable, plots, helper,
explorer_file, finder] + plugins,
[console_int, console_ipy, history]]
],
'width fraction': [0.0, # column 0 width
0.55, # column 1 width
0.0, # column 2 width
0.45], # column 3 width
'height fraction': [[1.0], # column 0, row heights
[1.0], # column 1, row heights
[1.0], # column 2, row heights
[0.46, 0.54]], # column 3, row heights
'hidden widgets': [outline],
'hidden toolbars': [],
}
r_layout = {'widgets': [
# column 0
[[editor],
[console_ipy, console_int]],
# column 1
[[explorer_variable, plots, history, outline,
finder] + plugins,
[explorer_file, explorer_project, help_plugin, helper]]
],
'width fraction': [0.55, # column 0 width
0.45], # column 1 width
'height fraction': [[0.55, 0.45], # column 0, row heights
[0.55, 0.45]], # column 1, row heights
'hidden widgets': [outline],
'hidden toolbars': [],
}
# Matlab
m_layout = {'widgets': [
# column 0
[[explorer_file, explorer_project],
[outline]],
# column 1
[[editor],
[console_ipy, console_int]],
# column 2
[[explorer_variable, plots, finder] + plugins,
[history, help_plugin, helper]]
],
'width fraction': [0.20, # column 0 width
0.40, # column 1 width
0.40], # column 2 width
'height fraction': [[0.55, 0.45], # column 0, row heights
[0.55, 0.45], # column 1, row heights
[0.55, 0.45]], # column 2, row heights
'hidden widgets': [],
'hidden toolbars': [],
}
# Vertically split
v_layout = {'widgets': [
# column 0
[[editor],
[console_ipy, console_int, explorer_file,
explorer_project, help_plugin, explorer_variable, plots,
history, outline, finder, helper] + plugins]
],
'width fraction': [1.0], # column 0 width
'height fraction': [[0.55, 0.45]], # column 0, row heights
'hidden widgets': [outline],
'hidden toolbars': [],
}
# Horizontally split
h_layout = {'widgets': [
# column 0
[[editor]],
# column 1
[[console_ipy, console_int, explorer_file,
explorer_project, help_plugin, explorer_variable, plots,
history, outline, finder, helper] + plugins]
],
'width fraction': [0.55, # column 0 width
0.45], # column 1 width
'height fraction': [[1.0], # column 0, row heights
[1.0]], # column 1, row heights
'hidden widgets': [outline],
'hidden toolbars': []
}
# Layout selection
layouts = {'default': s_layout,
RSTUDIO: r_layout,
MATLAB: m_layout,
VERTICAL: v_layout,
HORIZONTAL: h_layout}
layout = layouts[index]
widgets_layout = layout['widgets']
widgets = []
for column in widgets_layout :
for row in column:
for widget in row:
if widget is not None:
widgets.append(widget)
# Make every widget visible
for widget in widgets:
widget.toggle_view(True)
action = widget.toggle_view_action
action.setChecked(widget.dockwidget.isVisible())
# Set the widgets horizontally
for i in range(len(widgets) - 1):
first, second = widgets[i], widgets[i+1]
if first is not None and second is not None:
self.splitDockWidget(first.dockwidget, second.dockwidget,
Qt.Horizontal)
# Arrange rows vertically
for column in widgets_layout :
for i in range(len(column) - 1):
first_row, second_row = column[i], column[i+1]
if first_row is not None and second_row is not None:
self.splitDockWidget(first_row[0].dockwidget,
second_row[0].dockwidget,
Qt.Vertical)
# Tabify
for column in widgets_layout :
for row in column:
for i in range(len(row) - 1):
first, second = row[i], row[i+1]
if first is not None and second is not None:
self.tabify_plugins(first, second)
# Raise front widget per row
row[0].dockwidget.show()
row[0].dockwidget.raise_()
# Hide toolbars
hidden_toolbars = global_hidden_toolbars + layout['hidden toolbars']
for toolbar in hidden_toolbars:
if toolbar is not None:
toolbar.close()
# Hide widgets
hidden_widgets = global_hidden_widgets + layout['hidden widgets']
for widget in hidden_widgets:
if widget is not None:
widget.dockwidget.close()
# set the width and height
self._layout_widget_info = []
width, height = self.window_size.width(), self.window_size.height()
# fix column width
# for c in range(len(widgets_layout)):
# widget = widgets_layout[c][0][0].dockwidget
# min_width, max_width = widget.minimumWidth(), widget.maximumWidth()
# info = {'widget': widget,
# 'min width': min_width,
# 'max width': max_width}
# self._layout_widget_info.append(info)
# new_width = int(layout['width fraction'][c] * width * 0.95)
# widget.setMinimumWidth(new_width)
# widget.setMaximumWidth(new_width)
# widget.updateGeometry()
# fix column height
for c, column in enumerate(widgets_layout):
for r in range(len(column) - 1):
widget = column[r][0]
dockwidget = widget.dockwidget
dock_min_h = dockwidget.minimumHeight()
dock_max_h = dockwidget.maximumHeight()
info = {'widget': widget,
'dock min height': dock_min_h,
'dock max height': dock_max_h}
self._layout_widget_info.append(info)
# The 0.95 factor is to adjust height based on usefull
# estimated area in the window
new_height = int(layout['height fraction'][c][r]*height*0.95)
dockwidget.setMinimumHeight(new_height)
dockwidget.setMaximumHeight(new_height)
self._custom_layout_timer = QTimer(self)
self._custom_layout_timer.timeout.connect(self.layout_fix_timer)
self._custom_layout_timer.setSingleShot(True)
self._custom_layout_timer.start(5000)
def layout_fix_timer(self):
"""Fixes the height of docks after a new layout is set."""
info = self._layout_widget_info
for i in info:
dockwidget = i['widget'].dockwidget
if 'dock min width' in i:
dockwidget.setMinimumWidth(i['dock min width'])
dockwidget.setMaximumWidth(i['dock max width'])
if 'dock min height' in i:
dockwidget.setMinimumHeight(i['dock min height'])
dockwidget.setMaximumHeight(i['dock max height'])
dockwidget.updateGeometry()
self.setUpdatesEnabled(True)
@Slot()
def toggle_previous_layout(self):
""" """
self.toggle_layout('previous')
@Slot()
def toggle_next_layout(self):
""" """
self.toggle_layout('next')
def toggle_layout(self, direction='next'):
""" """
get = CONF.get
names = get('quick_layouts', 'names')
order = get('quick_layouts', 'order')
active = get('quick_layouts', 'active')
if len(active) == 0:
return
layout_index = ['default']
for name in order:
if name in active:
layout_index.append(names.index(name))
current_layout = self.current_quick_layout
dic = {'next': 1, 'previous': -1}
if current_layout is None:
# Start from default
current_layout = 'default'
if current_layout in layout_index:
current_index = layout_index.index(current_layout)
else:
current_index = 0
new_index = (current_index + dic[direction]) % len(layout_index)
self.quick_layout_switch(layout_index[new_index])
def quick_layout_set_menu(self):
""" """
get = CONF.get
names = get('quick_layouts', 'names')
order = get('quick_layouts', 'order')
active = get('quick_layouts', 'active')
ql_actions = []
ql_actions = [create_action(self, _('Spyder Default Layout'),
triggered=lambda:
self.quick_layout_switch('default'))]
for name in order:
if name in active:
index = names.index(name)
# closure required so lambda works with the default parameter
def trigger(i=index, self=self):
return lambda: self.quick_layout_switch(i)
qli_act = create_action(self, name, triggered=trigger())
# closure above replaces the following which stopped working
# qli_act = create_action(self, name, triggered=lambda i=index:
# self.quick_layout_switch(i)
ql_actions += [qli_act]
self.ql_save = create_action(self, _("Save current layout"),
triggered=lambda:
self.quick_layout_save(),
context=Qt.ApplicationShortcut)
self.ql_preferences = create_action(self, _("Layout preferences"),
triggered=lambda:
self.quick_layout_settings(),
context=Qt.ApplicationShortcut)
self.ql_reset = create_action(self, _('Reset to spyder default'),
triggered=self.reset_window_layout)
self.register_shortcut(self.ql_save, "_", "Save current layout")
self.register_shortcut(self.ql_preferences, "_", "Layout preferences")
ql_actions += [None]
ql_actions += [self.ql_save, self.ql_preferences, self.ql_reset]
self.quick_layout_menu.clear()
add_actions(self.quick_layout_menu, ql_actions)
if len(order) == 0:
self.ql_preferences.setEnabled(False)
else:
self.ql_preferences.setEnabled(True)
@Slot()
def reset_window_layout(self):
"""Reset window layout to default"""
answer = QMessageBox.warning(self, _("Warning"),
_("Window layout will be reset to default settings: "
"this affects window position, size and dockwidgets.\n"
"Do you want to continue?"),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
self.setup_layout(default=True)
def quick_layout_save(self):
"""Save layout dialog"""
get = CONF.get
set_ = CONF.set
names = get('quick_layouts', 'names')
order = get('quick_layouts', 'order')
active = get('quick_layouts', 'active')
dlg = self.dialog_layout_save(self, names)
if dlg.exec_():
name = dlg.combo_box.currentText()
if name in names:
answer = QMessageBox.warning(self, _("Warning"),
_("Layout <b>%s</b> will be \
overwritten. Do you want to \
continue?") % name,
QMessageBox.Yes | QMessageBox.No)
index = order.index(name)
else:
answer = True
if None in names:
index = names.index(None)
names[index] = name
else:
index = len(names)
names.append(name)
order.append(name)
# Always make active a new layout even if it overwrites an inactive
# layout
if name not in active:
active.append(name)
if answer:
self.save_current_window_settings('layout_{}/'.format(index),
section='quick_layouts')
set_('quick_layouts', 'names', names)
set_('quick_layouts', 'order', order)
set_('quick_layouts', 'active', active)
self.quick_layout_set_menu()
def quick_layout_settings(self):
"""Layout settings dialog"""
get = CONF.get
set_ = CONF.set
section = 'quick_layouts'
names = get(section, 'names')
order = get(section, 'order')
active = get(section, 'active')
dlg = self.dialog_layout_settings(self, names, order, active)
if dlg.exec_():
set_(section, 'names', dlg.names)
set_(section, 'order', dlg.order)
set_(section, 'active', dlg.active)
self.quick_layout_set_menu()
def quick_layout_switch(self, index):
"""Switch to quick layout number *index*"""
section = 'quick_layouts'
try:
settings = self.load_window_settings('layout_{}/'.format(index),
section=section)
(hexstate, window_size, prefs_dialog_size, pos, is_maximized,
is_fullscreen) = settings
# The defaults layouts will always be regenerated unless there was
# an overwrite, either by rewriting with same name, or by deleting
# and then creating a new one
if hexstate is None:
# The value for hexstate shouldn't be None for a custom saved
# layout (ie, where the index is greater than the number of
# defaults). See issue 6202.
if index != 'default' and index >= self.DEFAULT_LAYOUTS:
QMessageBox.critical(
self, _("Warning"),
_("Error opening the custom layout. Please close"
" Spyder and try again. If the issue persists,"
" then you must use 'Reset to Spyder default' "
"from the layout menu."))
return
self.setup_default_layouts(index, settings)
except cp.NoOptionError:
QMessageBox.critical(self, _("Warning"),
_("Quick switch layout #%s has not yet "
"been defined.") % str(index))
return
# TODO: is there any real use in calling the previous layout
# setting?
# self.previous_layout_settings = self.get_window_settings()
self.set_window_settings(*settings)
self.current_quick_layout = index
# make sure the flags are correctly set for visible panes
for plugin in (self.widgetlist + self.thirdparty_plugins):
action = plugin.toggle_view_action
action.setChecked(plugin.dockwidget.isVisible())
# --- Show/Hide toolbars
def _update_show_toolbars_action(self):
"""Update the text displayed in the menu entry."""
if self.toolbars_visible:
text = _("Hide toolbars")
tip = _("Hide toolbars")
else:
text = _("Show toolbars")
tip = _("Show toolbars")
self.show_toolbars_action.setText(text)
self.show_toolbars_action.setToolTip(tip)
def save_visible_toolbars(self):
"""Saves the name of the visible toolbars in the .ini file."""
toolbars = []
for toolbar in self.visible_toolbars:
toolbars.append(toolbar.objectName())
CONF.set('main', 'last_visible_toolbars', toolbars)
def get_visible_toolbars(self):
"""Collects the visible toolbars."""
toolbars = []
for toolbar in self.toolbarslist:
if toolbar.toggleViewAction().isChecked():
toolbars.append(toolbar)
self.visible_toolbars = toolbars
def load_last_visible_toolbars(self):
"""Loads the last visible toolbars from the .ini file."""
toolbars_names = CONF.get('main', 'last_visible_toolbars', default=[])
if toolbars_names:
dic = {}
for toolbar in self.toolbarslist:
dic[toolbar.objectName()] = toolbar
toolbars = []
for name in toolbars_names:
if name in dic:
toolbars.append(dic[name])
self.visible_toolbars = toolbars
else:
self.get_visible_toolbars()
self._update_show_toolbars_action()
@Slot()
def show_toolbars(self):
"""Show/Hides toolbars."""
value = not self.toolbars_visible
CONF.set('main', 'toolbars_visible', value)
if value:
self.save_visible_toolbars()
else:
self.get_visible_toolbars()
for toolbar in self.visible_toolbars:
toolbar.toggleViewAction().setChecked(value)
toolbar.setVisible(value)
self.toolbars_visible = value
self._update_show_toolbars_action()
# --- Other
def valid_project(self):
"""Handle an invalid active project."""
try:
path = self.projects.get_active_project_path()
except AttributeError:
return
if bool(path):
if not self.projects.is_valid_project(path):
if path:
QMessageBox.critical(
self,
_('Error'),
_("<b>{}</b> is no longer a valid Spyder project! "
"Since it is the current active project, it will "
"be closed automatically.").format(path))
self.projects.close_project()
def free_memory(self):
"""Free memory after event."""
gc.collect()
def plugin_focus_changed(self):
"""Focus has changed from one plugin to another"""
self.update_edit_menu()
self.update_search_menu()
def show_shortcuts(self, menu):
"""Show action shortcuts in menu"""
for element in getattr(self, menu + '_menu_actions'):
if element and isinstance(element, QAction):
if element._shown_shortcut is not None:
element.setShortcut(element._shown_shortcut)
def hide_shortcuts(self, menu):
"""Hide action shortcuts in menu"""
for element in getattr(self, menu + '_menu_actions'):
if element and isinstance(element, QAction):
if element._shown_shortcut is not None:
element.setShortcut(QKeySequence())
def get_focus_widget_properties(self):
"""Get properties of focus widget
Returns tuple (widget, properties) where properties is a tuple of
booleans: (is_console, not_readonly, readwrite_editor)"""
from spyder.plugins.editor.widgets.editor import TextEditBaseWidget
from spyder.plugins.ipythonconsole.widgets import ControlWidget
widget = QApplication.focusWidget()
textedit_properties = None
if isinstance(widget, (TextEditBaseWidget, ControlWidget)):
console = isinstance(widget, ControlWidget)
not_readonly = not widget.isReadOnly()
readwrite_editor = not_readonly and not console
textedit_properties = (console, not_readonly, readwrite_editor)
return widget, textedit_properties
def update_edit_menu(self):
"""Update edit menu"""
widget, textedit_properties = self.get_focus_widget_properties()
if textedit_properties is None: # widget is not an editor/console
return
# !!! Below this line, widget is expected to be a QPlainTextEdit
# instance
console, not_readonly, readwrite_editor = textedit_properties
# Editor has focus and there is no file opened in it
if not console and not_readonly and not self.editor.is_file_opened():
return
# Disabling all actions to begin with
for child in self.edit_menu.actions():
child.setEnabled(False)
self.selectall_action.setEnabled(True)
# Undo, redo
self.undo_action.setEnabled( readwrite_editor \
and widget.document().isUndoAvailable() )
self.redo_action.setEnabled( readwrite_editor \
and widget.document().isRedoAvailable() )
# Copy, cut, paste, delete
has_selection = widget.has_selected_text()
self.copy_action.setEnabled(has_selection)
self.cut_action.setEnabled(has_selection and not_readonly)
self.paste_action.setEnabled(not_readonly)
# Comment, uncomment, indent, unindent...
if not console and not_readonly:
# This is the editor and current file is writable
for action in self.editor.edit_menu_actions:
action.setEnabled(True)
def update_search_menu(self):
"""Update search menu"""
# Disabling all actions except the last one
# (which is Find in files) to begin with
for child in self.search_menu.actions()[:-1]:
child.setEnabled(False)
widget, textedit_properties = self.get_focus_widget_properties()
if textedit_properties is None: # widget is not an editor/console
return
# !!! Below this line, widget is expected to be a QPlainTextEdit
# instance
console, not_readonly, readwrite_editor = textedit_properties
# Find actions only trigger an effect in the Editor
if not console:
for action in self.search_menu.actions():
try:
action.setEnabled(True)
except RuntimeError:
pass
# Disable the replace action for read-only files
self.search_menu_actions[3].setEnabled(readwrite_editor)
def create_plugins_menu(self):
order = ['editor', 'ipython_console', 'variable_explorer',
'help', 'plots', None, 'explorer', 'outline_explorer',
'project_explorer', 'find_in_files', None, 'historylog',
'profiler', 'breakpoints', 'pylint', None,
'onlinehelp', 'internal_console', None]
for plugin in self.widgetlist:
action = plugin.toggle_view_action
action.setChecked(plugin.dockwidget.isVisible())
try:
name = plugin.CONF_SECTION
pos = order.index(name)
except ValueError:
pos = None
if pos is not None:
order[pos] = action
else:
order.append(action)
actions = order[:]
for action in order:
if type(action) is str:
actions.remove(action)
self.plugins_menu_actions = actions
add_actions(self.plugins_menu, actions)
def create_toolbars_menu(self):
order = ['file_toolbar', 'run_toolbar', 'debug_toolbar',
'main_toolbar', 'Global working directory', None,
'search_toolbar', 'edit_toolbar', 'source_toolbar']
for toolbar in self.toolbarslist:
action = toolbar.toggleViewAction()
name = toolbar.objectName()
try:
pos = order.index(name)
except ValueError:
pos = None
if pos is not None:
order[pos] = action
else:
order.append(action)
add_actions(self.toolbars_menu, order)
def createPopupMenu(self):
menu = QMenu('', self)
actions = self.help_menu_actions[:3] + \
[None, self.help_menu_actions[-1]]
add_actions(menu, actions)
return menu
def set_splash(self, message):
"""Set splash message"""
if self.splash is None:
return
if message:
logger.info(message)
self.splash.show()
self.splash.showMessage(message, Qt.AlignBottom | Qt.AlignCenter |
Qt.AlignAbsolute, QColor(Qt.white))
QApplication.processEvents()
def closeEvent(self, event):
"""closeEvent reimplementation"""
if self.closing(True):
event.accept()
else:
event.ignore()
def resizeEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.fullscreen_flag:
self.window_size = self.size()
QMainWindow.resizeEvent(self, event)
# To be used by the tour to be able to resize
self.sig_resized.emit(event)
def moveEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.fullscreen_flag:
self.window_position = self.pos()
QMainWindow.moveEvent(self, event)
# To be used by the tour to be able to move
self.sig_moved.emit(event)
def hideEvent(self, event):
"""Reimplement Qt method"""
try:
for plugin in (self.widgetlist + self.thirdparty_plugins):
if plugin.isAncestorOf(self.last_focused_widget):
plugin.visibility_changed(True)
QMainWindow.hideEvent(self, event)
except RuntimeError:
QMainWindow.hideEvent(self, event)
def change_last_focused_widget(self, old, now):
"""To keep track of to the last focused widget"""
if (now is None and QApplication.activeWindow() is not None):
QApplication.activeWindow().setFocus()
self.last_focused_widget = QApplication.focusWidget()
elif now is not None:
self.last_focused_widget = now
self.previous_focused_widget = old
def closing(self, cancelable=False):
"""Exit tasks"""
if self.already_closed or self.is_starting_up:
return True
if cancelable and CONF.get('main', 'prompt_on_exit'):
reply = QMessageBox.critical(self, 'Spyder',
'Do you really want to exit?',
QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.No:
return False
prefix = 'window' + '/'
self.save_current_window_settings(prefix)
if CONF.get('main', 'single_instance') and self.open_files_server:
self.open_files_server.close()
for plugin in (self.widgetlist + self.thirdparty_plugins):
plugin.close_window()
if not plugin.closing_plugin(cancelable):
return False
self.dialog_manager.close_all()
if self.toolbars_visible:
self.save_visible_toolbars()
self.lspmanager.closing_plugin(cancelable)
self.already_closed = True
return True
def add_dockwidget(self, child):
"""Add QDockWidget and toggleViewAction"""
dockwidget, location = child.create_dockwidget()
if CONF.get('main', 'vertical_dockwidget_titlebars'):
dockwidget.setFeatures(dockwidget.features()|
QDockWidget.DockWidgetVerticalTitleBar)
self.addDockWidget(location, dockwidget)
self.widgetlist.append(child)
@Slot()
def close_current_dockwidget(self):
widget = QApplication.focusWidget()
for plugin in (self.widgetlist + self.thirdparty_plugins):
if plugin.isAncestorOf(widget):
plugin.toggle_view_action.setChecked(False)
break
def toggle_lock(self, value):
"""Lock/Unlock dockwidgets and toolbars"""
self.interface_locked = value
CONF.set('main', 'panes_locked', value)
# Apply lock to panes
for plugin in (self.widgetlist + self.thirdparty_plugins):
if self.interface_locked:
if plugin.dockwidget.isFloating():
plugin.dockwidget.setFloating(False)
plugin.dockwidget.setTitleBarWidget(QWidget())
else:
plugin.dockwidget.set_title_bar()
# Apply lock to toolbars
for toolbar in self.toolbarslist:
if self.interface_locked:
toolbar.setMovable(False)
else:
toolbar.setMovable(True)
def __update_maximize_action(self):
if self.state_before_maximizing is None:
text = _("Maximize current pane")
tip = _("Maximize current pane")
icon = ima.icon('maximize')
else:
text = _("Restore current pane")
tip = _("Restore pane to its original size")
icon = ima.icon('unmaximize')
self.maximize_action.setText(text)
self.maximize_action.setIcon(icon)
self.maximize_action.setToolTip(tip)
@Slot()
@Slot(bool)
def maximize_dockwidget(self, restore=False):
"""Shortcut: Ctrl+Alt+Shift+M
First call: maximize current dockwidget
Second call (or restore=True): restore original window layout"""
if self.state_before_maximizing is None:
if restore:
return
# Select plugin to maximize
self.state_before_maximizing = self.saveState()
focus_widget = QApplication.focusWidget()
for plugin in (self.widgetlist + self.thirdparty_plugins):
plugin.dockwidget.hide()
if plugin.isAncestorOf(focus_widget):
self.last_plugin = plugin
# Only plugins that have a dockwidget are part of widgetlist,
# so last_plugin can be None after the above "for" cycle.
# For example, this happens if, after Spyder has started, focus
# is set to the Working directory toolbar (which doesn't have
# a dockwidget) and then you press the Maximize button
if self.last_plugin is None:
# Using the Editor as default plugin to maximize
self.last_plugin = self.editor
# Maximize last_plugin
self.last_plugin.dockwidget.toggleViewAction().setDisabled(True)
self.setCentralWidget(self.last_plugin)
self.last_plugin.ismaximized = True
# Workaround to solve an issue with editor's outline explorer:
# (otherwise the whole plugin is hidden and so is the outline explorer
# and the latter won't be refreshed if not visible)
self.last_plugin.show()
self.last_plugin.visibility_changed(True)
if self.last_plugin is self.editor:
# Automatically show the outline if the editor was maximized:
self.addDockWidget(Qt.RightDockWidgetArea,
self.outlineexplorer.dockwidget)
self.outlineexplorer.dockwidget.show()
else:
# Restore original layout (before maximizing current dockwidget)
self.last_plugin.dockwidget.setWidget(self.last_plugin)
self.last_plugin.dockwidget.toggleViewAction().setEnabled(True)
self.setCentralWidget(None)
self.last_plugin.ismaximized = False
self.restoreState(self.state_before_maximizing)
self.state_before_maximizing = None
self.last_plugin.get_focus_widget().setFocus()
self.__update_maximize_action()
def __update_fullscreen_action(self):
if self.fullscreen_flag:
icon = ima.icon('window_nofullscreen')
else:
icon = ima.icon('window_fullscreen')
if is_text_string(icon):
icon = get_icon(icon)
self.fullscreen_action.setIcon(icon)
@Slot()
def toggle_fullscreen(self):
if self.fullscreen_flag:
self.fullscreen_flag = False
if os.name == 'nt':
self.setWindowFlags(
self.windowFlags()
^ (Qt.FramelessWindowHint | Qt.WindowStaysOnTopHint))
self.setGeometry(self.saved_normal_geometry)
self.showNormal()
if self.maximized_flag:
self.showMaximized()
else:
self.maximized_flag = self.isMaximized()
self.fullscreen_flag = True
self.saved_normal_geometry = self.normalGeometry()
if os.name == 'nt':
# Due to limitations of the Windows DWM, compositing is not
# handled correctly for OpenGL based windows when going into
# full screen mode, so we need to use this workaround.
# See Issue #4291.
self.setWindowFlags(self.windowFlags()
| Qt.FramelessWindowHint
| Qt.WindowStaysOnTopHint)
r = QApplication.desktop().screenGeometry()
self.setGeometry(
r.left() - 1, r.top() - 1, r.width() + 2, r.height() + 2)
self.showNormal()
else:
self.showFullScreen()
self.__update_fullscreen_action()
def add_to_toolbar(self, toolbar, widget):
"""Add widget actions to toolbar"""
actions = widget.toolbar_actions
if actions is not None:
add_actions(toolbar, actions)
@Slot()
def about(self):
"""Create About Spyder dialog with general information."""
versions = get_versions()
# Show Git revision for development version
revlink = ''
if versions['revision']:
rev = versions['revision']
revlink = " (<a href='https://github.com/spyder-ide/spyder/"\
"commit/%s'>Commit: %s</a>)" % (rev, rev)
msgBox = QMessageBox(self)
msgBox.setText(
"""
<b>Spyder {spyder_ver}</b> {revision}
<br>The Scientific Python Development Environment |
<a href="{website_url}">Spyder-IDE.org</a>
<br>Copyright © 2009-2018 Spyder Project Contributors
<br>Distributed under the terms of the
<a href="{github_url}/blob/master/LICENSE.txt">MIT License</a>.
<p>Created by Pierre Raybaut; current maintainer is Carlos Cordoba.
<br>Developed by the
<a href="{github_url}/graphs/contributors">international
Spyder community</a>.
<br>Many thanks to all the Spyder beta testers and dedicated users.
<p>For help with Spyder errors and crashes, please read our
<a href="{trouble_url}">Troubleshooting Guide</a>, and for bug
reports and feature requests, visit our
<a href="{github_url}">Github site</a>.
For project discussion, see our
<a href="{forum_url}">Google Group</a>.
<p>This project is part of a larger effort to promote and
facilitate the use of Python for scientific and engineering
software development.
The popular Python distributions
<a href="https://www.anaconda.com/download/">Anaconda</a> and
<a href="https://winpython.github.io/">WinPython</a>
also contribute to this plan.
<p>Python {python_ver} {bitness}-bit | Qt {qt_ver} |
{qt_api} {qt_api_ver} | {os_name} {os_ver}
<small><p>Certain source files under other compatible permissive
licenses and/or originally by other authors.
Spyder 3 theme icons derived from
<a href="https://fontawesome.com/">Font Awesome</a> 4.7
(© 2016 David Gandy; SIL OFL 1.1) and
<a href="http://materialdesignicons.com/">Material Design</a>
(© 2014 Austin Andrews; SIL OFL 1.1).
Most Spyder 2 theme icons sourced from the
<a href="https://www.everaldo.com">Crystal Project iconset</a>
(© 2006-2007 Everaldo Coelho; LGPL 2.1+).
Other icons from
<a href="http://p.yusukekamiyamane.com/">Yusuke Kamiyamane</a>
(© 2013 Yusuke Kamiyamane; CC-BY 3.0),
the <a href="http://www.famfamfam.com/lab/icons/silk/">FamFamFam
Silk icon set</a> 1.3 (© 2006 Mark James; CC-BY 2.5), and
the <a href="https://www.kde.org/">KDE Oxygen icons</a>
(© 2007 KDE Artists; LGPL 3.0+).</small>
<p>See the <a href="{github_url}/blob/master/NOTICE.txt">NOTICE</a>
file for full legal information.
"""
.format(spyder_ver=versions['spyder'],
revision=revlink,
website_url=__website_url__,
github_url=__project_url__,
trouble_url=__trouble_url__,
forum_url=__forum_url__,
python_ver=versions['python'],
bitness=versions['bitness'],
qt_ver=versions['qt'],
qt_api=versions['qt_api'],
qt_api_ver=versions['qt_api_ver'],
os_name=versions['system'],
os_ver=versions['release'])
)
msgBox.setWindowTitle(_("About %s") % "Spyder")
msgBox.setStandardButtons(QMessageBox.Ok)
msgBox.setIconPixmap(APP_ICON.pixmap(QSize(64, 64)))
msgBox.setTextInteractionFlags(
Qt.LinksAccessibleByMouse | Qt.TextSelectableByMouse)
msgBox.exec_()
@Slot()
def show_dependencies(self):
"""Show Spyder's Dependencies dialog box"""
from spyder.widgets.dependencies import DependenciesDialog
dlg = DependenciesDialog(self)
dlg.set_data(dependencies.DEPENDENCIES)
dlg.exec_()
def render_issue(self, description='', traceback=''):
"""Render issue before sending it to Github"""
# Get component versions
versions = get_versions()
# Get git revision for development version
revision = ''
if versions['revision']:
revision = versions['revision']
# Make a description header in case no description is supplied
if not description:
description = "### What steps reproduce the problem?"
# Make error section from traceback and add appropriate reminder header
if traceback:
error_section = ("### Traceback\n"
"```python-traceback\n"
"{}\n"
"```".format(traceback))
else:
error_section = ''
issue_template = """\
## Description
{description}
{error_section}
## Versions
* Spyder version: {spyder_version} {commit}
* Python version: {python_version}
* Qt version: {qt_version}
* {qt_api_name} version: {qt_api_version}
* Operating System: {os_name} {os_version}
### Dependencies
```
{dependencies}
```
""".format(description=description,
error_section=error_section,
spyder_version=versions['spyder'],
commit=revision,
python_version=versions['python'],
qt_version=versions['qt'],
qt_api_name=versions['qt_api'],
qt_api_version=versions['qt_api_ver'],
os_name=versions['system'],
os_version=versions['release'],
dependencies=dependencies.status())
return issue_template
@Slot()
def report_issue(self, body=None, title=None, open_webpage=False):
"""Report a Spyder issue to github, generating body text if needed."""
if body is None:
from spyder.widgets.reporterror import SpyderErrorDialog
report_dlg = SpyderErrorDialog(self, is_report=True)
report_dlg.show()
else:
if open_webpage:
if PY3:
from urllib.parse import quote
else:
from urllib import quote # analysis:ignore
from qtpy.QtCore import QUrlQuery
url = QUrl(__project_url__ + '/issues/new')
query = QUrlQuery()
query.addQueryItem("body", quote(body))
if title:
query.addQueryItem("title", quote(title))
url.setQuery(query)
QDesktopServices.openUrl(url)
@Slot()
def trouble_guide(self):
"""Open Spyder troubleshooting guide in a web browser."""
url = QUrl(__trouble_url__)
QDesktopServices.openUrl(url)
@Slot()
def google_group(self):
"""Open Spyder Google Group in a web browser."""
url = QUrl(__forum_url__)
QDesktopServices.openUrl(url)
@Slot()
def global_callback(self):
"""Global callback"""
widget = QApplication.focusWidget()
action = self.sender()
callback = from_qvariant(action.data(), to_text_string)
from spyder.plugins.editor.widgets.editor import TextEditBaseWidget
from spyder.plugins.ipythonconsole.widgets import ControlWidget
if isinstance(widget, (TextEditBaseWidget, ControlWidget)):
getattr(widget, callback)()
else:
return
def redirect_internalshell_stdio(self, state):
if state:
self.console.shell.interpreter.redirect_stds()
else:
self.console.shell.interpreter.restore_stds()
def open_external_console(self, fname, wdir, args, interact, debug, python,
python_args, systerm, post_mortem=False):
"""Open external console"""
if systerm:
# Running script in an external system terminal
try:
if CONF.get('main_interpreter', 'default'):
executable = get_python_executable()
else:
executable = CONF.get('main_interpreter', 'executable')
programs.run_python_script_in_terminal(
fname, wdir, args, interact, debug, python_args,
executable)
except NotImplementedError:
QMessageBox.critical(self, _("Run"),
_("Running an external system terminal "
"is not supported on platform %s."
) % os.name)
def execute_in_external_console(self, lines, focus_to_editor):
"""
Execute lines in IPython console and eventually set focus
to the Editor.
"""
console = self.ipyconsole
console.switch_to_plugin()
console.execute_code(lines)
if focus_to_editor:
self.editor.switch_to_plugin()
def open_file(self, fname, external=False):
"""
Open filename with the appropriate application
Redirect to the right widget (txt -> editor, spydata -> workspace, ...)
or open file outside Spyder (if extension is not supported)
"""
fname = to_text_string(fname)
ext = osp.splitext(fname)[1]
if encoding.is_text_file(fname):
self.editor.load(fname)
elif self.variableexplorer is not None and ext in IMPORT_EXT:
self.variableexplorer.import_data(fname)
elif not external:
fname = file_uri(fname)
programs.start_file(fname)
def open_external_file(self, fname):
"""
Open external files that can be handled either by the Editor or the
variable explorer inside Spyder.
"""
fname = encoding.to_unicode_from_fs(fname)
if osp.isfile(fname):
self.open_file(fname, external=True)
elif osp.isfile(osp.join(CWD, fname)):
self.open_file(osp.join(CWD, fname), external=True)
# ---- PYTHONPATH management, etc.
def get_spyder_pythonpath(self):
"""Return Spyder PYTHONPATH"""
active_path = [p for p in self.path if p not in self.not_active_path]
return active_path + self.project_path
def add_path_to_sys_path(self):
"""Add Spyder path to sys.path"""
for path in reversed(self.get_spyder_pythonpath()):
sys.path.insert(1, path)
def remove_path_from_sys_path(self):
"""Remove Spyder path from sys.path"""
for path in self.path + self.project_path:
while path in sys.path:
sys.path.remove(path)
@Slot()
def path_manager_callback(self):
"""Spyder path manager"""
from spyder.widgets.pathmanager import PathManager
self.remove_path_from_sys_path()
project_path = self.projects.get_pythonpath()
dialog = PathManager(self, self.path, project_path,
self.not_active_path, sync=True)
dialog.redirect_stdio.connect(self.redirect_internalshell_stdio)
dialog.exec_()
self.add_path_to_sys_path()
try:
encoding.writelines(self.path, self.SPYDER_PATH) # Saving path
encoding.writelines(self.not_active_path,
self.SPYDER_NOT_ACTIVE_PATH)
except EnvironmentError:
pass
self.sig_pythonpath_changed.emit()
def pythonpath_changed(self):
"""Projects PYTHONPATH contribution has changed"""
self.remove_path_from_sys_path()
self.project_path = self.projects.get_pythonpath()
self.add_path_to_sys_path()
self.sig_pythonpath_changed.emit()
@Slot()
def win_env(self):
"""Show Windows current user environment variables"""
self.dialog_manager.show(WinUserEnvDialog(self))
#---- Preferences
def apply_settings(self):
"""Apply settings changed in 'Preferences' dialog box"""
qapp = QApplication.instance()
# Set 'gtk+' as the default theme in Gtk-based desktops
# Fixes Issue 2036
if is_gtk_desktop() and ('GTK+' in QStyleFactory.keys()):
try:
qapp.setStyle('gtk+')
except:
pass
else:
style_name = CONF.get('appearance', 'windows_style',
self.default_style)
style = QStyleFactory.create(style_name)
if style is not None:
style.setProperty('name', style_name)
qapp.setStyle(style)
default = self.DOCKOPTIONS
if CONF.get('main', 'vertical_tabs'):
default = default|QMainWindow.VerticalTabs
if CONF.get('main', 'animated_docks'):
default = default|QMainWindow.AnimatedDocks
self.setDockOptions(default)
self.apply_panes_settings()
self.apply_statusbar_settings()
if CONF.get('main', 'use_custom_cursor_blinking'):
qapp.setCursorFlashTime(CONF.get('main', 'custom_cursor_blinking'))
else:
qapp.setCursorFlashTime(self.CURSORBLINK_OSDEFAULT)
def apply_panes_settings(self):
"""Update dockwidgets features settings"""
for plugin in (self.widgetlist + self.thirdparty_plugins):
features = plugin.FEATURES
if CONF.get('main', 'vertical_dockwidget_titlebars'):
features = features | QDockWidget.DockWidgetVerticalTitleBar
plugin.dockwidget.setFeatures(features)
plugin.update_margins()
def apply_statusbar_settings(self):
"""Update status bar widgets settings"""
show_status_bar = CONF.get('main', 'show_status_bar')
self.statusBar().setVisible(show_status_bar)
if show_status_bar:
for widget, name in ((self.mem_status, 'memory_usage'),
(self.cpu_status, 'cpu_usage')):
if widget is not None:
widget.setVisible(CONF.get('main', '%s/enable' % name))
widget.set_interval(CONF.get('main', '%s/timeout' % name))
else:
return
@Slot()
def edit_preferences(self):
"""Edit Spyder preferences"""
from spyder.preferences.configdialog import ConfigDialog
dlg = ConfigDialog(self)
dlg.size_change.connect(self.set_prefs_size)
if self.prefs_dialog_size is not None:
dlg.resize(self.prefs_dialog_size)
for PrefPageClass in self.general_prefs:
widget = PrefPageClass(dlg, main=self)
widget.initialize()
dlg.add_page(widget)
for plugin in [self.workingdirectory, self.lspmanager, self.editor,
self.projects, self.ipyconsole,
self.historylog, self.help, self.variableexplorer,
self.onlinehelp, self.explorer, self.findinfiles
]+self.thirdparty_plugins:
if plugin is not None:
try:
widget = plugin.create_configwidget(dlg)
if widget is not None:
dlg.add_page(widget)
except Exception:
traceback.print_exc(file=sys.stderr)
if self.prefs_index is not None:
dlg.set_current_index(self.prefs_index)
dlg.show()
dlg.check_all_settings()
dlg.pages_widget.currentChanged.connect(self.__preference_page_changed)
dlg.exec_()
def __preference_page_changed(self, index):
"""Preference page index has changed"""
self.prefs_index = index
def set_prefs_size(self, size):
"""Save preferences dialog size"""
self.prefs_dialog_size = size
#---- Shortcuts
def register_shortcut(self, qaction_or_qshortcut, context, name,
add_sc_to_tip=False):
"""
Register QAction or QShortcut to Spyder main application,
with shortcut (context, name, default)
"""
self.shortcut_data.append( (qaction_or_qshortcut, context,
name, add_sc_to_tip) )
def apply_shortcuts(self):
"""Apply shortcuts settings to all widgets/plugins"""
toberemoved = []
for index, (qobject, context, name,
add_sc_to_tip) in enumerate(self.shortcut_data):
keyseq = QKeySequence( get_shortcut(context, name) )
try:
if isinstance(qobject, QAction):
if sys.platform == 'darwin' and \
qobject._shown_shortcut == 'missing':
qobject._shown_shortcut = keyseq
else:
qobject.setShortcut(keyseq)
if add_sc_to_tip:
add_shortcut_to_tooltip(qobject, context, name)
elif isinstance(qobject, QShortcut):
qobject.setKey(keyseq)
except RuntimeError:
# Object has been deleted
toberemoved.append(index)
for index in sorted(toberemoved, reverse=True):
self.shortcut_data.pop(index)
@Slot()
def show_shortcuts_dialog(self):
from spyder.widgets.shortcutssummary import ShortcutsSummaryDialog
dlg = ShortcutsSummaryDialog(None)
dlg.exec_()
# -- Open files server
def start_open_files_server(self):
self.open_files_server.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
port = select_port(default_port=OPEN_FILES_PORT)
CONF.set('main', 'open_files_port', port)
self.open_files_server.bind(('127.0.0.1', port))
self.open_files_server.listen(20)
while 1: # 1 is faster than True
try:
req, dummy = self.open_files_server.accept()
except socket.error as e:
# See Issue 1275 for details on why errno EINTR is
# silently ignored here.
eintr = errno.WSAEINTR if os.name == 'nt' else errno.EINTR
# To avoid a traceback after closing on Windows
if e.args[0] == eintr:
continue
# handle a connection abort on close error
enotsock = (errno.WSAENOTSOCK if os.name == 'nt'
else errno.ENOTSOCK)
if e.args[0] in [errno.ECONNABORTED, enotsock]:
return
raise
fname = req.recv(1024)
fname = fname.decode('utf-8')
self.sig_open_external_file.emit(fname)
req.sendall(b' ')
# ---- Quit and restart, and reset spyder defaults
@Slot()
def reset_spyder(self):
"""
Quit and reset Spyder and then Restart application.
"""
answer = QMessageBox.warning(self, _("Warning"),
_("Spyder will restart and reset to default settings: <br><br>"
"Do you want to continue?"),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
self.restart(reset=True)
@Slot()
def restart(self, reset=False):
"""
Quit and Restart Spyder application.
If reset True it allows to reset spyder on restart.
"""
# Get start path to use in restart script
spyder_start_directory = get_module_path('spyder')
restart_script = osp.join(spyder_start_directory, 'app', 'restart.py')
# Get any initial argument passed when spyder was started
# Note: Variables defined in bootstrap.py and spyder/app/start.py
env = os.environ.copy()
bootstrap_args = env.pop('SPYDER_BOOTSTRAP_ARGS', None)
spyder_args = env.pop('SPYDER_ARGS')
# Get current process and python running spyder
pid = os.getpid()
python = sys.executable
# Check if started with bootstrap.py
if bootstrap_args is not None:
spyder_args = bootstrap_args
is_bootstrap = True
else:
is_bootstrap = False
# Pass variables as environment variables (str) to restarter subprocess
env['SPYDER_ARGS'] = spyder_args
env['SPYDER_PID'] = str(pid)
env['SPYDER_IS_BOOTSTRAP'] = str(is_bootstrap)
env['SPYDER_RESET'] = str(reset)
if DEV:
if os.name == 'nt':
env['PYTHONPATH'] = ';'.join(sys.path)
else:
env['PYTHONPATH'] = ':'.join(sys.path)
# Build the command and popen arguments depending on the OS
if os.name == 'nt':
# Hide flashing command prompt
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
shell = False
else:
startupinfo = None
shell = True
command = '"{0}" "{1}"'
command = command.format(python, restart_script)
try:
if self.closing(True):
subprocess.Popen(command, shell=shell, env=env,
startupinfo=startupinfo)
self.console.quit()
except Exception as error:
# If there is an error with subprocess, Spyder should not quit and
# the error can be inspected in the internal console
print(error) # spyder: test-skip
print(command) # spyder: test-skip
# ---- Interactive Tours
def show_tour(self, index):
"""Show interactive tour."""
self.maximize_dockwidget(restore=True)
frames = self.tours_available[index]
self.tour.set_tour(index, frames, self)
self.tour.start_tour()
# ---- Global File Switcher
def open_fileswitcher(self, symbol=False):
"""Open file list management dialog box."""
if self.fileswitcher is not None and \
self.fileswitcher.is_visible:
self.fileswitcher.hide()
self.fileswitcher.is_visible = False
return
if symbol:
self.fileswitcher.plugin = self.editor
self.fileswitcher.set_search_text('@')
else:
self.fileswitcher.set_search_text('')
self.fileswitcher.show()
self.fileswitcher.is_visible = True
def open_symbolfinder(self):
"""Open symbol list management dialog box."""
self.open_fileswitcher(symbol=True)
def add_to_fileswitcher(self, plugin, tabs, data, icon):
"""Add a plugin to the File Switcher."""
if self.fileswitcher is None:
self.fileswitcher = FileSwitcher(self, plugin, tabs, data, icon)
else:
self.fileswitcher.add_plugin(plugin, tabs, data, icon)
self.fileswitcher.sig_goto_file.connect(
plugin.get_current_tab_manager().set_stack_index)
# ---- Check for Spyder Updates
def _check_updates_ready(self):
"""Called by WorkerUpdates when ready"""
from spyder.widgets.helperwidgets import MessageCheckBox
# feedback` = False is used on startup, so only positive feedback is
# given. `feedback` = True is used when after startup (when using the
# menu action, and gives feeback if updates are, or are not found.
feedback = self.give_updates_feedback
# Get results from worker
update_available = self.worker_updates.update_available
latest_release = self.worker_updates.latest_release
error_msg = self.worker_updates.error
url_r = __project_url__ + '/releases'
url_i = 'https://docs.spyder-ide.org/installation.html'
# Define the custom QMessageBox
box = MessageCheckBox(icon=QMessageBox.Information,
parent=self)
box.setWindowTitle(_("Spyder updates"))
box.set_checkbox_text(_("Check for updates on startup"))
box.setStandardButtons(QMessageBox.Ok)
box.setDefaultButton(QMessageBox.Ok)
# Adjust the checkbox depending on the stored configuration
section, option = 'main', 'check_updates_on_startup'
check_updates = CONF.get(section, option)
box.set_checked(check_updates)
if error_msg is not None:
msg = error_msg
box.setText(msg)
box.set_check_visible(False)
box.exec_()
check_updates = box.is_checked()
else:
if update_available:
anaconda_msg = ''
if 'Anaconda' in sys.version or 'conda-forge' in sys.version:
anaconda_msg = _("<hr><b>IMPORTANT NOTE:</b> It seems "
"that you are using Spyder with "
"<b>Anaconda/Miniconda</b>. Please "
"<b>don't</b> use <code>pip</code> to "
"update it as that will probably break "
"your installation.<br><br>"
"Instead, please wait until new conda "
"packages are available and use "
"<code>conda</code> to perform the "
"update.<hr>")
msg = _("<b>Spyder %s is available!</b> <br><br>Please use "
"your package manager to update Spyder or go to our "
"<a href=\"%s\">Releases</a> page to download this "
"new version. <br><br>If you are not sure how to "
"proceed to update Spyder please refer to our "
" <a href=\"%s\">Installation</a> instructions."
"") % (latest_release, url_r, url_i)
msg += '<br>' + anaconda_msg
box.setText(msg)
box.set_check_visible(True)
box.exec_()
check_updates = box.is_checked()
elif feedback:
msg = _("Spyder is up to date.")
box.setText(msg)
box.set_check_visible(False)
box.exec_()
check_updates = box.is_checked()
# Update checkbox based on user interaction
CONF.set(section, option, check_updates)
# Enable check_updates_action after the thread has finished
self.check_updates_action.setDisabled(False)
# Provide feeback when clicking menu if check on startup is on
self.give_updates_feedback = True
@Slot()
def check_updates(self, startup=False):
"""
Check for spyder updates on github releases using a QThread.
"""
from spyder.workers.updates import WorkerUpdates
# Disable check_updates_action while the thread is working
self.check_updates_action.setDisabled(True)
if self.thread_updates is not None:
self.thread_updates.terminate()
self.thread_updates = QThread(self)
self.worker_updates = WorkerUpdates(self, startup=startup)
self.worker_updates.sig_ready.connect(self._check_updates_ready)
self.worker_updates.sig_ready.connect(self.thread_updates.quit)
self.worker_updates.moveToThread(self.thread_updates)
self.thread_updates.started.connect(self.worker_updates.start)
self.thread_updates.start()
# --- For OpenGL
def _test_setting_opengl(self, option):
"""Get the current OpenGL implementation in use"""
if option == 'software':
return QCoreApplication.testAttribute(Qt.AA_UseSoftwareOpenGL)
elif option == 'desktop':
return QCoreApplication.testAttribute(Qt.AA_UseDesktopOpenGL)
elif option == 'gles':
return QCoreApplication.testAttribute(Qt.AA_UseOpenGLES)
#==============================================================================
# Utilities to create the 'main' function
#==============================================================================
def initialize():
"""Initialize Qt, patching sys.exit and eventually setting up ETS"""
# This doesn't create our QApplication, just holds a reference to
# MAIN_APP, created above to show our splash screen as early as
# possible
app = qapplication()
# --- Set application icon
app.setWindowIcon(APP_ICON)
#----Monkey patching QApplication
class FakeQApplication(QApplication):
"""Spyder's fake QApplication"""
def __init__(self, args):
self = app # analysis:ignore
@staticmethod
def exec_():
"""Do nothing because the Qt mainloop is already running"""
pass
from qtpy import QtWidgets
QtWidgets.QApplication = FakeQApplication
# ----Monkey patching sys.exit
def fake_sys_exit(arg=[]):
pass
sys.exit = fake_sys_exit
# ----Monkey patching sys.excepthook to avoid crashes in PyQt 5.5+
if PYQT5:
def spy_excepthook(type_, value, tback):
sys.__excepthook__(type_, value, tback)
sys.excepthook = spy_excepthook
# Removing arguments from sys.argv as in standard Python interpreter
sys.argv = ['']
# Selecting Qt4 backend for Enthought Tool Suite (if installed)
try:
from enthought.etsconfig.api import ETSConfig
ETSConfig.toolkit = 'qt4'
except ImportError:
pass
return app
class Spy(object):
"""
Inspect Spyder internals
Attributes:
app Reference to main QApplication object
window Reference to spyder.MainWindow widget
"""
def __init__(self, app, window):
self.app = app
self.window = window
def __dir__(self):
return list(self.__dict__.keys()) +\
[x for x in dir(self.__class__) if x[0] != '_']
def versions(self):
return get_versions()
def run_spyder(app, options, args):
"""
Create and show Spyder's main window
Start QApplication event loop
"""
#TODO: insert here
# Main window
main = MainWindow(options)
try:
main.setup()
except BaseException:
if main.console is not None:
try:
main.console.shell.exit_interpreter()
except BaseException:
pass
raise
main.show()
main.post_visible_setup()
if main.console:
main.console.shell.interpreter.namespace['spy'] = \
Spy(app=app, window=main)
# Open external files passed as args
if args:
for a in args:
main.open_external_file(a)
# Don't show icons in menus for Mac
if sys.platform == 'darwin':
QCoreApplication.setAttribute(Qt.AA_DontShowIconsInMenus, True)
# Open external files with our Mac app
if running_in_mac_app():
app.sig_open_external_file.connect(main.open_external_file)
# To give focus again to the last focused widget after restoring
# the window
app.focusChanged.connect(main.change_last_focused_widget)
if not running_under_pytest():
app.exec_()
return main
#==============================================================================
# Main
#==============================================================================
def main():
"""Main function"""
# **** For Pytest ****
# We need to create MainWindow **here** to avoid passing pytest
# options to Spyder
if running_under_pytest():
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock # Python 2
options = Mock()
options.working_directory = None
options.profile = False
options.multithreaded = False
options.new_instance = False
options.project = None
options.window_title = None
options.opengl_implementation = None
options.debug_info = None
options.debug_output = None
if CONF.get('main', 'opengl') != 'automatic':
option = CONF.get('main', 'opengl')
set_opengl_implementation(option)
app = initialize()
window = run_spyder(app, options, None)
return window
# **** Collect command line options ****
# Note regarding Options:
# It's important to collect options before monkey patching sys.exit,
# otherwise, argparse won't be able to exit if --help option is passed
options, args = get_options()
# **** Set OpenGL implementation to use ****
if options.opengl_implementation:
option = options.opengl_implementation
set_opengl_implementation(option)
else:
if CONF.get('main', 'opengl') != 'automatic':
option = CONF.get('main', 'opengl')
set_opengl_implementation(option)
# **** Handle hide_console option ****
if options.show_console:
print("(Deprecated) --show console does nothing, now the default "
" behavior is to show the console, use --hide-console if you "
"want to hide it")
if set_attached_console_visible is not None:
set_attached_console_visible(not options.hide_console
or options.reset_config_files
or options.reset_to_defaults
or options.optimize
or bool(get_debug_level()))
# **** Set debugging info ****
setup_logging(options)
# **** Create the application ****
app = initialize()
# **** Handle other options ****
if options.reset_config_files:
# <!> Remove all configuration files!
reset_config_files()
return
elif options.reset_to_defaults:
# Reset Spyder settings to defaults
CONF.reset_to_defaults(save=True)
return
elif options.optimize:
# Optimize the whole Spyder's source code directory
import spyder
programs.run_python_script(module="compileall",
args=[spyder.__path__[0]], p_args=['-O'])
return
# **** Show crash dialog ****
if CONF.get('main', 'crash', False) and not DEV:
CONF.set('main', 'crash', False)
if SPLASH is not None:
SPLASH.hide()
QMessageBox.information(
None, "Spyder",
"Spyder crashed during last session.<br><br>"
"If Spyder does not start at all and <u>before submitting a "
"bug report</u>, please try to reset settings to defaults by "
"running Spyder with the command line option '--reset':<br>"
"<span style=\'color: #555555\'><b>spyder --reset</b></span>"
"<br><br>"
"<span style=\'color: #ff5555\'><b>Warning:</b></span> "
"this command will remove all your Spyder configuration files "
"located in '%s').<br><br>"
"If Spyder still fails to launch, you should consult our "
"comprehensive <b><a href=\"%s\">Troubleshooting Guide</a></b>, "
"which when followed carefully solves the vast majority of "
"crashes; also, take "
"the time to search for <a href=\"%s\">known bugs</a> or "
"<a href=\"%s\">discussions</a> matching your situation before "
"submitting a report to our <a href=\"%s\">issue tracker</a>. "
"Your feedback will always be greatly appreciated."
"" % (get_conf_path(), __trouble_url__, __project_url__,
__forum_url__, __project_url__))
# **** Create main window ****
mainwindow = None
try:
mainwindow = run_spyder(app, options, args)
except FontError as fontError:
QMessageBox.information(None, "Spyder",
"Spyder was unable to load the <i>Spyder 3</i> "
"icon theme. That's why it's going to fallback to the "
"theme used in Spyder 2.<br><br>"
"For that, please close this window and start Spyder again.")
CONF.set('appearance', 'icon_theme', 'spyder 2')
except BaseException:
CONF.set('main', 'crash', True)
import traceback
traceback.print_exc(file=STDERR)
traceback.print_exc(file=open('spyder_crash.log', 'w'))
if mainwindow is None:
# An exception occured
if SPLASH is not None:
SPLASH.hide()
return
ORIGINAL_SYS_EXIT()
if __name__ == "__main__":
main()
|
__init__.py
|
from __future__ import print_function, division, absolute_import
import os
import sys
import shutil
import subprocess
import optparse
import math
import signal
import threading
import atexit
import types
import re
import pprint
import time
import traceback
import locale
import inspect
import getpass
import tempfile
import copy
import posixpath
try:
import Queue as queue
except ImportError:
import queue
from . import apxs_config
_py_version = '%s%s' % sys.version_info[:2]
_py_soabi = ''
_py_soext = '.so'
_py_dylib = ''
try:
import sysconfig
import distutils.sysconfig
_py_soabi = sysconfig.get_config_var('SOABI')
_py_soext = sysconfig.get_config_var('EXT_SUFFIX')
if _py_soext is None:
_py_soext = sysconfig.get_config_var('SO')
if (sysconfig.get_config_var('WITH_DYLD') and
sysconfig.get_config_var('LIBDIR') and
sysconfig.get_config_var('LDLIBRARY')):
_py_dylib = posixpath.join(sysconfig.get_config_var('LIBDIR'),
sysconfig.get_config_var('LDLIBRARY'))
if not os.path.exists(_py_dylib):
_py_dylib = ''
except ImportError:
pass
MOD_WSGI_SO = 'mod_wsgi-py%s%s' % (_py_version, _py_soext)
MOD_WSGI_SO = posixpath.join(posixpath.dirname(__file__), MOD_WSGI_SO)
if not os.path.exists(MOD_WSGI_SO) and _py_soabi:
MOD_WSGI_SO = 'mod_wsgi-py%s.%s%s' % (_py_version, _py_soabi, _py_soext)
MOD_WSGI_SO = posixpath.join(posixpath.dirname(__file__), MOD_WSGI_SO)
if not os.path.exists(MOD_WSGI_SO) and os.name == 'nt':
MOD_WSGI_SO = 'mod_wsgi%s' % distutils.sysconfig.get_config_var('EXT_SUFFIX')
MOD_WSGI_SO = os.path.join(os.path.dirname(__file__), MOD_WSGI_SO)
MOD_WSGI_SO = MOD_WSGI_SO.replace('\\', '/')
def where():
return MOD_WSGI_SO
def default_run_user():
if os.name == 'nt':
return '#0'
try:
import pwd
uid = os.getuid()
return pwd.getpwuid(uid).pw_name
except KeyError:
return '#%d' % uid
def default_run_group():
if os.name == 'nt':
return '#0'
try:
import pwd
uid = os.getuid()
entry = pwd.getpwuid(uid)
except KeyError:
return '#%d' % uid
try:
import grp
gid = entry.pw_gid
return grp.getgrgid(gid).gr_name
except KeyError:
return '#%d' % gid
def find_program(names, default=None, paths=[]):
for name in names:
for path in os.environ['PATH'].split(':') + paths:
program = posixpath.join(path, name)
if os.path.exists(program):
return program
return default
def find_mimetypes():
if os.name == 'nt':
return posixpath.join(posixpath.dirname(posixpath.dirname(
apxs_config.HTTPD)), 'conf', 'mime.types')
else:
import mimetypes
for name in mimetypes.knownfiles:
if os.path.exists(name):
return name
else:
return '/dev/null'
SHELL = find_program(['bash', 'sh'], ['/usr/local/bin'])
APACHE_GENERAL_CONFIG = """
<IfModule !version_module>
LoadModule version_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_version.so'
</IfModule>
ServerName %(host)s
ServerRoot '%(server_root)s'
PidFile '%(pid_file)s'
<IfVersion >= 2.4>
DefaultRuntimeDir '%(server_root)s'
</IfVersion>
ServerTokens ProductOnly
ServerSignature Off
<IfDefine !MOD_WSGI_MPM_ENABLE_WINNT_MODULE>
User ${MOD_WSGI_USER}
Group ${MOD_WSGI_GROUP}
</IfDefine>
<IfDefine MOD_WSGI_WITH_LISTENER_HOST>
Listen %(host)s:%(port)s
</IfDefine>
<IfDefine !MOD_WSGI_WITH_LISTENER_HOST>
Listen %(port)s
</IfDefine>
<IfVersion < 2.4>
LockFile '%(server_root)s/accept.lock'
</IfVersion>
<IfVersion >= 2.4>
<IfDefine MOD_WSGI_WITH_PHP5>
<IfModule !mpm_event_module>
<IfModule !mpm_worker_module>
<IfModule !mpm_prefork_module>
<IfDefine MOD_WSGI_MPM_EXISTS_PREFORK_MODULE>
LoadModule mpm_prefork_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_mpm_prefork.so'
</IfDefine>
</IfModule>
</IfModule>
</IfModule>
</IfDefine>
</IfVersion>
<IfVersion >= 2.4>
<IfModule !mpm_event_module>
<IfModule !mpm_worker_module>
<IfModule !mpm_prefork_module>
<IfDefine MOD_WSGI_MPM_ENABLE_EVENT_MODULE>
LoadModule mpm_event_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_mpm_event.so'
</IfDefine>
<IfDefine MOD_WSGI_MPM_ENABLE_WORKER_MODULE>
LoadModule mpm_worker_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_mpm_worker.so'
</IfDefine>
<IfDefine MOD_WSGI_MPM_ENABLE_PREFORK_MODULE>
LoadModule mpm_prefork_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_mpm_prefork.so'
</IfDefine>
</IfModule>
</IfModule>
</IfModule>
</IfVersion>
<IfDefine MOD_WSGI_WITH_HTTP2>
LoadModule http2_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_http2.so'
</IfDefine>
<IfVersion >= 2.4>
<IfModule !access_compat_module>
LoadModule access_compat_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_access_compat.so'
</IfModule>
<IfDefine !MOD_WSGI_MPM_ENABLE_WINNT_MODULE>
<IfModule !unixd_module>
LoadModule unixd_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_unixd.so'
</IfModule>
</IfDefine>
<IfModule !authn_core_module>
LoadModule authn_core_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_authn_core.so'
</IfModule>
<IfModule !authz_core_module>
LoadModule authz_core_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_authz_core.so'
</IfModule>
</IfVersion>
<IfModule !authz_host_module>
LoadModule authz_host_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_authz_host.so'
</IfModule>
<IfModule !mime_module>
LoadModule mime_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_mime.so'
</IfModule>
<IfModule !rewrite_module>
LoadModule rewrite_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_rewrite.so'
</IfModule>
<IfModule !alias_module>
LoadModule alias_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_alias.so'
</IfModule>
<IfModule !dir_module>
LoadModule dir_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_dir.so'
</IfModule>
<IfModule !env_module>
LoadModule env_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_env.so'
</IfModule>
<IfModule !headers_module>
LoadModule headers_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_headers.so'
</IfModule>
<IfModule !filter_module>
LoadModule filter_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_filter.so'
</IfModule>
<IfDefine MOD_WSGI_DIRECTORY_LISTING>
<IfModule !autoindex_module>
LoadModule autoindex_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_autoindex.so'
</IfModule>
</IfDefine>
<IfVersion >= 2.2.15>
<IfModule !reqtimeout_module>
LoadModule reqtimeout_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_reqtimeout.so'
</IfModule>
</IfVersion>
<IfDefine MOD_WSGI_COMPRESS_RESPONSES>
<IfModule !deflate_module>
LoadModule deflate_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_deflate.so'
</IfModule>
</IfDefine>
<IfDefine MOD_WSGI_AUTH_USER>
<IfModule !auth_basic_module>
LoadModule auth_basic_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_auth_basic.so'
</IfModule>
<IfModule !auth_digest_module>
LoadModule auth_digest_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_auth_digest.so'
</IfModule>
<IfModule !authz_user_module>
LoadModule authz_user_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_authz_user.so'
</IfModule>
</IfDefine>
<IfDefine MOD_WSGI_WITH_PROXY>
<IfModule !proxy_module>
LoadModule proxy_module ${MOD_WSGI_MODULES_DIRECTORY}/mod_proxy.so
</IfModule>
<IfModule !proxy_http_module>
LoadModule proxy_http_module ${MOD_WSGI_MODULES_DIRECTORY}/mod_proxy_http.so
</IfModule>
</IfDefine>
<IfModule mpm_prefork_module>
<IfDefine MOD_WSGI_WITH_PHP5>
<IfModule !php5_module>
Loadmodule php5_module '${MOD_WSGI_MODULES_DIRECTORY}/libphp5.so'
</IfModule>
AddHandler application/x-httpd-php .php
</IfDefine>
</IfModule>
<IfDefine MOD_WSGI_LOAD_PYTHON_DYLIB>
LoadFile '%(python_dylib)s'
</IfDefine>
LoadModule wsgi_module '%(mod_wsgi_so)s'
<IfDefine MOD_WSGI_SERVER_METRICS>
<IfModule !status_module>
LoadModule status_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_status.so'
</IfModule>
</IfDefine>
<IfDefine MOD_WSGI_CGID_SCRIPT>
<IfModule !cgid_module>
LoadModule cgid_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_cgid.so'
</IfModule>
</IfDefine>
<IfDefine MOD_WSGI_CGI_SCRIPT>
<IfModule !cgi_module>
LoadModule cgi_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_cgi.so'
</IfModule>
</IfDefine>
<IfVersion < 2.4>
DefaultType text/plain
</IfVersion>
TypesConfig '%(mime_types)s'
HostnameLookups Off
MaxMemFree 64
Timeout %(socket_timeout)s
ListenBacklog %(server_backlog)s
<IfDefine MOD_WSGI_WITH_HTTP2>
Protocols h2 h2c http/1.1
</IfDefine>
<IfVersion >= 2.2.15>
RequestReadTimeout %(request_read_timeout)s
</IfVersion>
LimitRequestBody %(limit_request_body)s
<Directory />
AllowOverride None
<IfVersion < 2.4>
Order deny,allow
Deny from all
</IfVersion>
<IfVersion >= 2.4>
Require all denied
</IfVersion>
</Directory>
WSGIPythonHome '%(python_home)s'
WSGIVerboseDebugging '%(verbose_debugging_flag)s'
<IfDefine !MOD_WSGI_MPM_ENABLE_WINNT_MODULE>
<IfDefine MOD_WSGI_WITH_SOCKET_PREFIX>
WSGISocketPrefix %(socket_prefix)s/wsgi
</IfDefine>
<IfDefine !MOD_WSGI_WITH_SOCKET_PREFIX>
WSGISocketPrefix %(server_root)s/wsgi
</IfDefine>
WSGISocketRotation Off
</IfDefine>
<IfDefine EMBEDDED_MODE>
MaxConnectionsPerChild %(maximum_requests)s
</IfDefine>
<IfDefine !ONE_PROCESS>
<IfDefine !EMBEDDED_MODE>
WSGIRestrictEmbedded On
<IfDefine MOD_WSGI_MULTIPROCESS>
WSGIDaemonProcess %(host)s:%(port)s \\
display-name='%(daemon_name)s' \\
home='%(working_directory)s' \\
processes=%(processes)s \\
threads=%(threads)s \\
maximum-requests=%(maximum_requests)s \\
python-path='%(python_path)s' \\
python-eggs='%(python_eggs)s' \\
lang='%(lang)s' \\
locale='%(locale)s' \\
listen-backlog=%(daemon_backlog)s \\
queue-timeout=%(queue_timeout)s \\
socket-timeout=%(socket_timeout)s \\
connect-timeout=%(connect_timeout)s \\
request-timeout=%(request_timeout)s \\
inactivity-timeout=%(inactivity_timeout)s \\
startup-timeout=%(startup_timeout)s \\
deadlock-timeout=%(deadlock_timeout)s \\
graceful-timeout=%(graceful_timeout)s \\
eviction-timeout=%(eviction_timeout)s \\
restart-interval=%(restart_interval)s \\
cpu-time-limit=%(cpu_time_limit)s \\
shutdown-timeout=%(shutdown_timeout)s \\
send-buffer-size=%(send_buffer_size)s \\
receive-buffer-size=%(receive_buffer_size)s \\
header-buffer-size=%(header_buffer_size)s \\
response-buffer-size=%(response_buffer_size)s \\
response-socket-timeout=%(response_socket_timeout)s \\
server-metrics=%(server_metrics_flag)s
</IfDefine>
<IfDefine !MOD_WSGI_MULTIPROCESS>
WSGIDaemonProcess %(host)s:%(port)s \\
display-name='%(daemon_name)s' \\
home='%(working_directory)s' \\
threads=%(threads)s \\
maximum-requests=%(maximum_requests)s \\
python-path='%(python_path)s' \\
python-eggs='%(python_eggs)s' \\
lang='%(lang)s' \\
locale='%(locale)s' \\
listen-backlog=%(daemon_backlog)s \\
queue-timeout=%(queue_timeout)s \\
socket-timeout=%(socket_timeout)s \\
connect-timeout=%(connect_timeout)s \\
request-timeout=%(request_timeout)s \\
inactivity-timeout=%(inactivity_timeout)s \\
startup-timeout=%(startup_timeout)s \\
deadlock-timeout=%(deadlock_timeout)s \\
graceful-timeout=%(graceful_timeout)s \\
eviction-timeout=%(eviction_timeout)s \\
restart-interval=%(restart_interval)s \\
cpu-time-limit=%(cpu_time_limit)s \\
shutdown-timeout=%(shutdown_timeout)s \\
send-buffer-size=%(send_buffer_size)s \\
receive-buffer-size=%(receive_buffer_size)s \\
response-buffer-size=%(response_buffer_size)s \\
response-socket-timeout=%(response_socket_timeout)s \\
server-metrics=%(server_metrics_flag)s
</IfDefine>
</IfDefine>
</IfDefine>
WSGICallableObject '%(callable_object)s'
WSGIPassAuthorization On
WSGIMapHEADToGET %(map_head_to_get)s
<IfDefine MOD_WSGI_DISABLE_RELOADING>
WSGIScriptReloading Off
</IfDefine>
<IfDefine EMBEDDED_MODE>
<IfDefine MOD_WSGI_WITH_PYTHON_PATH>
WSGIPythonPath '%(python_path)s'
</IfDefine>
</IfDefine>
<IfDefine ONE_PROCESS>
WSGIRestrictStdin Off
<IfDefine MOD_WSGI_WITH_PYTHON_PATH>
WSGIPythonPath '%(python_path)s'
</IfDefine>
</IfDefine>
<IfDefine MOD_WSGI_SERVER_METRICS>
ExtendedStatus On
</IfDefine>
WSGIServerMetrics %(server_metrics_flag)s
<IfDefine MOD_WSGI_SERVER_STATUS>
<Location /server-status>
SetHandler server-status
<IfVersion < 2.4>
Order deny,allow
Deny from all
Allow from localhost
</IfVersion>
<IfVersion >= 2.4>
Require all denied
Require host localhost
</IfVersion>
</Location>
</IfDefine>
<IfDefine MOD_WSGI_KEEP_ALIVE>
KeepAlive On
KeepAliveTimeout %(keep_alive_timeout)s
</IfDefine>
<IfDefine !MOD_WSGI_KEEP_ALIVE>
KeepAlive Off
</IfDefine>
<IfDefine MOD_WSGI_ENABLE_SENDFILE>
EnableSendfile On
WSGIEnableSendfile On
</IfDefine>
<IfDefine MOD_WSGI_COMPRESS_RESPONSES>
AddOutputFilterByType DEFLATE text/plain
AddOutputFilterByType DEFLATE text/html
AddOutputFilterByType DEFLATE text/xml
AddOutputFilterByType DEFLATE text/css
AddOutputFilterByType DEFLATE text/javascript
AddOutputFilterByType DEFLATE application/xhtml+xml
AddOutputFilterByType DEFLATE application/javascript
AddOutputFilterByType DEFLATE application/json
</IfDefine>
<IfDefine MOD_WSGI_ROTATE_LOGS>
ErrorLog "|%(rotatelogs_executable)s \\
%(error_log_file)s.%%Y-%%m-%%d-%%H_%%M_%%S %(max_log_size)sM"
</IfDefine>
<IfDefine !MOD_WSGI_ROTATE_LOGS>
ErrorLog "%(error_log_file)s"
</IfDefine>
LogLevel %(log_level)s
<IfDefine MOD_WSGI_ERROR_LOG_FORMAT>
ErrorLogFormat "%(error_log_format)s"
</IfDefine>
<IfDefine MOD_WSGI_ACCESS_LOG>
<IfModule !log_config_module>
LoadModule log_config_module ${MOD_WSGI_MODULES_DIRECTORY}/mod_log_config.so
</IfModule>
LogFormat "%%h %%l %%u %%t \\"%%r\\" %%>s %%b" common
LogFormat "%%h %%l %%u %%t \\"%%r\\" %%>s %%b \\"%%{Referer}i\\" \\"%%{User-agent}i\\"" combined
LogFormat "%(access_log_format)s" custom
<IfDefine MOD_WSGI_ROTATE_LOGS>
CustomLog "|%(rotatelogs_executable)s \\
%(access_log_file)s.%%Y-%%m-%%d-%%H_%%M_%%S %(max_log_size)sM" %(log_format_nickname)s
</IfDefine>
<IfDefine !MOD_WSGI_ROTATE_LOGS>
CustomLog "%(access_log_file)s" %(log_format_nickname)s
</IfDefine>
</IfDefine>
<IfDefine MOD_WSGI_CHUNKED_REQUEST>
WSGIChunkedRequest On
</IfDefine>
<IfDefine MOD_WSGI_WITH_PROXY_HEADERS>
WSGITrustedProxyHeaders %(trusted_proxy_headers)s
</IfDefine>
<IfDefine MOD_WSGI_WITH_TRUSTED_PROXIES>
WSGITrustedProxies %(trusted_proxies)s
</IfDefine>
<IfDefine MOD_WSGI_WITH_HTTPS>
<IfModule !ssl_module>
LoadModule ssl_module ${MOD_WSGI_MODULES_DIRECTORY}/mod_ssl.so
</IfModule>
</IfDefine>
<IfModule mpm_prefork_module>
<IfDefine !ONE_PROCESS>
ServerLimit %(prefork_server_limit)s
StartServers %(prefork_start_servers)s
MaxClients %(prefork_max_clients)s
MinSpareServers %(prefork_min_spare_servers)s
MaxSpareServers %(prefork_max_spare_servers)s
</IfDefine>
<IfDefine ONE_PROCESS>
ServerLimit 1
StartServers 1
MaxClients 1
MinSpareServers 1
MaxSpareServers 1
</IfDefine>
MaxRequestsPerChild 0
</IfModule>
<IfModule mpm_worker_module>
<IfDefine !ONE_PROCESS>
ServerLimit %(worker_server_limit)s
ThreadLimit %(worker_thread_limit)s
StartServers %(worker_start_servers)s
MaxClients %(worker_max_clients)s
MinSpareThreads %(worker_min_spare_threads)s
MaxSpareThreads %(worker_max_spare_threads)s
ThreadsPerChild %(worker_threads_per_child)s
</IfDefine>
<IfDefine ONE_PROCESS>
ServerLimit 1
ThreadLimit 1
StartServers 1
MaxClients 1
MinSpareThreads 1
MaxSpareThreads 1
ThreadsPerChild 1
</IfDefine>
MaxRequestsPerChild 0
ThreadStackSize 262144
</IfModule>
<IfModule mpm_event_module>
<IfDefine !ONE_PROCESS>
ServerLimit %(worker_server_limit)s
ThreadLimit %(worker_thread_limit)s
StartServers %(worker_start_servers)s
MaxClients %(worker_max_clients)s
MinSpareThreads %(worker_min_spare_threads)s
MaxSpareThreads %(worker_max_spare_threads)s
ThreadsPerChild %(worker_threads_per_child)s
</IfDefine>
<IfDefine ONE_PROCESS>
ServerLimit 1
ThreadLimit 1
StartServers 1
MaxClients 1
MinSpareThreads 1
MaxSpareThreads 1
ThreadsPerChild 1
</IfDefine>
MaxRequestsPerChild 0
ThreadStackSize 262144
</IfModule>
<IfDefine !MOD_WSGI_VIRTUAL_HOST>
<IfVersion < 2.4>
NameVirtualHost *:%(port)s
</IfVersion>
<VirtualHost _default_:%(port)s>
</VirtualHost>
</IfDefine>
<IfDefine MOD_WSGI_VIRTUAL_HOST>
<IfVersion < 2.4>
NameVirtualHost *:%(port)s
</IfVersion>
<VirtualHost _default_:%(port)s>
<Location />
<IfVersion < 2.4>
Order deny,allow
Deny from all
</IfVersion>
<IfVersion >= 2.4>
Require all denied
</IfVersion>
<IfDefine MOD_WSGI_ALLOW_LOCALHOST>
Allow from localhost
</IfDefine>
</Location>
</VirtualHost>
<IfDefine !MOD_WSGI_HTTPS_ONLY>
<VirtualHost *:%(port)s>
ServerName %(server_name)s
<IfDefine MOD_WSGI_SERVER_ALIAS>
ServerAlias %(server_aliases)s
</IfDefine>
</VirtualHost>
<IfDefine MOD_WSGI_REDIRECT_WWW>
<VirtualHost *:%(port)s>
ServerName %(parent_domain)s
Redirect permanent / http://%(server_name)s:%(port)s/
</VirtualHost>
</IfDefine>
</IfDefine>
<IfDefine MOD_WSGI_HTTPS_ONLY>
<VirtualHost *:%(port)s>
ServerName %(server_name)s
<IfDefine MOD_WSGI_SERVER_ALIAS>
ServerAlias %(server_aliases)s
</IfDefine>
RewriteEngine On
RewriteCond %%{HTTPS} off
RewriteRule (.*) https://%(server_name)s:%(https_port)s%%{REQUEST_URI}
</VirtualHost>
<IfDefine MOD_WSGI_REDIRECT_WWW>
<VirtualHost *:%(port)s>
ServerName %(parent_domain)s
RewriteEngine On
RewriteCond %%{HTTPS} off
RewriteRule (.*) https://%(server_name)s:%(https_port)s%%{REQUEST_URI}
</VirtualHost>
</IfDefine>
</IfDefine>
</IfDefine>
<IfDefine MOD_WSGI_VIRTUAL_HOST>
<IfDefine MOD_WSGI_WITH_HTTPS>
<IfDefine MOD_WSGI_WITH_LISTENER_HOST>
Listen %(host)s:%(https_port)s
</IfDefine>
<IfDefine !MOD_WSGI_WITH_LISTENER_HOST>
Listen %(https_port)s
</IfDefine>
<IfVersion < 2.4>
NameVirtualHost *:%(https_port)s
</IfVersion>
<VirtualHost _default_:%(https_port)s>
<Location />
<IfVersion < 2.4>
Order deny,allow
Deny from all
</IfVersion>
<IfVersion >= 2.4>
Require all denied
</IfVersion>
<IfDefine MOD_WSGI_ALLOW_LOCALHOST>
Allow from localhost
</IfDefine>
</Location>
SSLEngine On
SSLCertificateFile %(ssl_certificate_file)s
SSLCertificateKeyFile %(ssl_certificate_key_file)s
<IfDefine MOD_WSGI_VERIFY_CLIENT>
SSLCACertificateFile %(ssl_ca_certificate_file)s
SSLVerifyClient none
</IfDefine>
<IfDefine MOD_WSGI_CERTIFICATE_CHAIN>
SSLCertificateChainFile %(ssl_certificate_chain_file)s
</IfDefine>
</VirtualHost>
<VirtualHost *:%(https_port)s>
ServerName %(server_name)s
<IfDefine MOD_WSGI_SERVER_ALIAS>
ServerAlias %(server_aliases)s
</IfDefine>
SSLEngine On
SSLCertificateFile %(ssl_certificate_file)s
SSLCertificateKeyFile %(ssl_certificate_key_file)s
<IfDefine MOD_WSGI_VERIFY_CLIENT>
SSLCACertificateFile %(ssl_ca_certificate_file)s
SSLVerifyClient none
</IfDefine>
<IfDefine MOD_WSGI_CERTIFICATE_CHAIN>
SSLCertificateChainFile %(ssl_certificate_chain_file)s
</IfDefine>
<IfDefine MOD_WSGI_HTTPS_ONLY>
<IfDefine MOD_WSGI_HSTS_POLICY>
Header set Strict-Transport-Security %(hsts_policy)s
</IfDefine>
</IfDefine>
<IfDefine MOD_WSGI_SSL_ENVIRONMENT>
SSLOptions +StdEnvVars
</IfDefine>
</VirtualHost>
<IfDefine MOD_WSGI_REDIRECT_WWW>
<VirtualHost *:%(https_port)s>
ServerName %(parent_domain)s
Redirect permanent / https://%(server_name)s:%(https_port)s/
SSLEngine On
SSLCertificateFile %(ssl_certificate_file)s
SSLCertificateKeyFile %(ssl_certificate_key_file)s
<IfDefine MOD_WSGI_VERIFY_CLIENT>
SSLCACertificateFile %(ssl_ca_certificate_file)s
SSLVerifyClient none
</IfDefine>
<IfDefine MOD_WSGI_CERTIFICATE_CHAIN>
SSLCertificateChainFile %(ssl_certificate_chain_file)s
</IfDefine>
</VirtualHost>
</IfDefine>
</IfDefine>
</IfDefine>
DocumentRoot '%(document_root)s'
AccessFileName .htaccess
<Directory '%(server_root)s'>
AllowOverride %(allow_override)s
<Files handler.wsgi>
<IfVersion < 2.4>
Order allow,deny
Allow from all
</IfVersion>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
</Files>
</Directory>
<Directory '%(document_root)s'>
AllowOverride %(allow_override)s
<IfDefine MOD_WSGI_DIRECTORY_INDEX>
DirectoryIndex %(directory_index)s
</IfDefine>
<IfDefine MOD_WSGI_DIRECTORY_LISTING>
Options +Indexes
</IfDefine>
<IfDefine MOD_WSGI_CGI_SCRIPT>
Options +ExecCGI
</IfDefine>
<IfDefine MOD_WSGI_CGID_SCRIPT>
Options +ExecCGI
</IfDefine>
RewriteEngine On
Include %(rewrite_rules)s
<IfVersion < 2.4>
Order allow,deny
Allow from all
</IfVersion>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
</Directory>
<Directory '%(document_root)s%(mount_point)s'>
<IfDefine !MOD_WSGI_STATIC_ONLY>
RewriteCond %%{REQUEST_FILENAME} !-f
<IfDefine MOD_WSGI_DIRECTORY_INDEX>
RewriteCond %%{REQUEST_FILENAME} !-d
</IfDefine>
<IfDefine MOD_WSGI_SERVER_STATUS>
RewriteCond %%{REQUEST_URI} !/server-status
</IfDefine>
RewriteRule .* - [H=wsgi-handler]
</IfDefine>
</Directory>
<IfDefine MOD_WSGI_ERROR_OVERRIDE>
WSGIErrorOverride On
</IfDefine>
<IfDefine MOD_WSGI_HOST_ACCESS>
<Location />
WSGIAccessScript '%(host_access_script)s'
</Location>
</IfDefine>
<IfDefine MOD_WSGI_AUTH_USER>
<Location />
AuthType %(auth_type)s
AuthName '%(host)s:%(port)s'
Auth%(auth_type)sProvider wsgi
WSGIAuthUserScript '%(auth_user_script)s'
<IfDefine MOD_WSGI_AUTH_GROUP>
WSGIAuthGroupScript '%(auth_group_script)s'
</IfDefine>
<IfVersion < 2.4>
Require valid-user
<IfDefine MOD_WSGI_AUTH_GROUP>
Require wsgi-group '%(auth_group)s'
</IfDefine>
</IfVersion>
<IfVersion >= 2.4>
<RequireAll>
Require valid-user
<IfDefine MOD_WSGI_AUTH_GROUP>
Require wsgi-group '%(auth_group)s'
</IfDefine>
</RequireAll>
</IfVersion>
</Location>
</IfDefine>
<IfDefine !ONE_PROCESS>
<IfDefine !EMBEDDED_MODE>
WSGIHandlerScript wsgi-handler '%(server_root)s/handler.wsgi' \\
process-group='%(host)s:%(port)s' application-group=%%{GLOBAL}
WSGIImportScript '%(server_root)s/handler.wsgi' \\
process-group='%(host)s:%(port)s' application-group=%%{GLOBAL}
</IfDefine>
</IfDefine>
<IfDefine EMBEDDED_MODE>
WSGIHandlerScript wsgi-handler '%(server_root)s/handler.wsgi' \\
process-group='%%{GLOBAL}' application-group=%%{GLOBAL}
WSGIImportScript '%(server_root)s/handler.wsgi' \\
process-group='%%{GLOBAL}' application-group=%%{GLOBAL}
</IfDefine>
<IfDefine ONE_PROCESS>
<IfDefine !MOD_WSGI_MPM_ENABLE_WINNT_MODULE>
WSGIHandlerScript wsgi-handler '%(server_root)s/handler.wsgi' \\
process-group='%%{GLOBAL}' application-group=%%{GLOBAL}
WSGIImportScript '%(server_root)s/handler.wsgi' \\
process-group='%%{GLOBAL}' application-group=%%{GLOBAL}
</IfDefine>
<IfDefine MOD_WSGI_MPM_ENABLE_WINNT_MODULE>
WSGIHandlerScript wsgi-handler '%(server_root)s/handler.wsgi' \\
application-group=%%{GLOBAL}
WSGIImportScript '%(server_root)s/handler.wsgi' \\
application-group=%%{GLOBAL}
</IfDefine>
</IfDefine>
"""
APACHE_IGNORE_ACTIVITY_CONFIG = """
<Location '%(url)s'>
WSGIIgnoreActivity On
</Location>
"""
APACHE_PROXY_PASS_MOUNT_POINT_CONFIG = """
ProxyPass '%(mount_point)s' '%(url)s'
ProxyPassReverse '%(mount_point)s' '%(url)s'
<Location '%(mount_point)s'>
RewriteEngine On
RewriteRule .* - [E=SERVER_PORT:%%{SERVER_PORT},NE]
RequestHeader set X-Forwarded-Port %%{SERVER_PORT}e
RewriteCond %%{HTTPS} on
RewriteRule .* - [E=URL_SCHEME:https,NE]
RequestHeader set X-Forwarded-Scheme %%{URL_SCHEME}e env=URL_SCHEME
</Location>
"""
APACHE_PROXY_PASS_MOUNT_POINT_SLASH_CONFIG = """
ProxyPass '%(mount_point)s/' '%(url)s/'
ProxyPassReverse '%(mount_point)s/' '%(url)s/'
<Location '%(mount_point)s/'>
RewriteEngine On
RewriteRule .* - [E=SERVER_PORT:%%{SERVER_PORT},NE]
RequestHeader set X-Forwarded-Port %%{SERVER_PORT}e
RewriteCond %%{HTTPS} on
RewriteRule .* - [E=URL_SCHEME:https,NE]
RequestHeader set X-Forwarded-Scheme %%{URL_SCHEME}e env=URL_SCHEME
</Location>
<LocationMatch '^%(mount_point)s$'>
RewriteEngine On
RewriteRule - http://%%{HTTP_HOST}%%{REQUEST_URI}/ [R=302,L]
</LocationMatch>
"""
APACHE_PROXY_PASS_HOST_CONFIG = """
<VirtualHost *:%(port)s>
ServerName %(host)s
ProxyPass / '%(url)s'
ProxyPassReverse / '%(url)s'
RequestHeader set X-Forwarded-Port %(port)s
RewriteEngine On
RewriteCond %%{HTTPS} on
RewriteRule .* - [E=URL_SCHEME:https,NE]
RequestHeader set X-Forwarded-Scheme %%{URL_SCHEME}e env=URL_SCHEME
</VirtualHost>
"""
APACHE_ALIAS_DIRECTORY_CONFIG = """
Alias '%(mount_point)s' '%(directory)s'
<Directory '%(directory)s'>
AllowOverride %(allow_override)s
<IfVersion < 2.4>
Order allow,deny
Allow from all
</IfVersion>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
</Directory>
"""
APACHE_ALIAS_FILENAME_CONFIG = """
Alias '%(mount_point)s' '%(directory)s/%(filename)s'
<Directory '%(directory)s'>
<Files '%(filename)s'>
<IfVersion < 2.4>
Order allow,deny
Allow from all
</IfVersion>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
</Files>
</Directory>
"""
APACHE_ALIAS_DOCUMENTATION = """
Alias /__wsgi__/docs '%(documentation_directory)s'
Alias /__wsgi__/images '%(images_directory)s'
<Directory '%(documentation_directory)s'>
DirectoryIndex index.html
<IfVersion < 2.4>
Order allow,deny
Allow from all
</IfVersion>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
</Directory>
<Directory '%(images_directory)s'>
<IfVersion < 2.4>
Order allow,deny
Allow from all
</IfVersion>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
</Directory>
"""
APACHE_VERIFY_CLIENT_CONFIG = """
<IfDefine MOD_WSGI_VERIFY_CLIENT>
<Location '%(path)s'>
SSLVerifyClient require
SSLVerifyDepth 1
</Location>
</IfDefine>
"""
APACHE_ERROR_DOCUMENT_CONFIG = """
ErrorDocument '%(status)s' '%(document)s'
"""
APACHE_SETENV_CONFIG = """
SetEnv '%(name)s' '%(value)s'
"""
APACHE_PASSENV_CONFIG = """
PassEnv '%(name)s'
"""
APACHE_HANDLER_SCRIPT_CONFIG = """
WSGIHandlerScript wsgi-resource '%(server_root)s/resource.wsgi' \\
process-group='%(host)s:%(port)s' application-group=%%{GLOBAL}
"""
APACHE_HANDLER_CONFIG = """
AddHandler %(handler)s %(extension)s
"""
APACHE_INCLUDE_CONFIG = """
Include '%(filename)s'
"""
APACHE_TOOLS_CONFIG = """
WSGIDaemonProcess express display-name=%%{GROUP} threads=1 server-metrics=On
"""
APACHE_METRICS_CONFIG = """
WSGIImportScript '%(server_root)s/server-metrics.py' \\
process-group=express application-group=server-metrics
"""
APACHE_SERVICE_CONFIG = """
WSGIDaemonProcess 'service:%(name)s' \\
display-name=%%{GROUP} \\
user='%(user)s' \\
group='%(group)s' \\
home='%(working_directory)s' \\
threads=0 \\
python-path='%(python_path)s' \\
python-eggs='%(python_eggs)s' \\
lang='%(lang)s' \\
locale='%(locale)s' \\
server-metrics=%(server_metrics_flag)s
WSGIImportScript '%(script)s' \\
process-group='service:%(name)s' \\
application-group=%%{GLOBAL}
"""
APACHE_SERVICE_WITH_LOG_CONFIG = """
<VirtualHost *:%(port)s>
<IfDefine MOD_WSGI_ROTATE_LOGS>
ErrorLog "|%(rotatelogs_executable)s \\
%(log_directory)s/%(log_file)s.%%Y-%%m-%%d-%%H_%%M_%%S %(max_log_size)sM"
</IfDefine>
<IfDefine !MOD_WSGI_ROTATE_LOGS>
ErrorLog "%(log_directory)s/%(log_file)s"
</IfDefine>
WSGIDaemonProcess 'service:%(name)s' \\
display-name=%%{GROUP} \\
user='%(user)s' \\
group='%(group)s' \\
home='%(working_directory)s' \\
threads=0 \\
python-path='%(python_path)s' \\
python-eggs='%(python_eggs)s' \\
lang='%(lang)s' \\
locale='%(locale)s' \\
server-metrics=%(server_metrics_flag)s
WSGIImportScript '%(script)s' \\
process-group='service:%(name)s' \\
application-group=%%{GLOBAL}
</VirtualHost>
"""
def generate_apache_config(options):
with open(options['httpd_conf'], 'w') as fp:
print(APACHE_GENERAL_CONFIG % options, file=fp)
if options['ignore_activity']:
for url in options['ignore_activity']:
print(APACHE_IGNORE_ACTIVITY_CONFIG % dict(url=url), file=fp)
if options['proxy_mount_points']:
for mount_point, url in options['proxy_mount_points']:
if mount_point.endswith('/'):
print(APACHE_PROXY_PASS_MOUNT_POINT_CONFIG % dict(
mount_point=mount_point, url=url), file=fp)
else:
print(APACHE_PROXY_PASS_MOUNT_POINT_SLASH_CONFIG % dict(
mount_point=mount_point, url=url), file=fp)
if options['proxy_virtual_hosts']:
for host, url in options['proxy_virtual_hosts']:
print(APACHE_PROXY_PASS_HOST_CONFIG % dict(
host=host, port=options['port'], url=url),
file=fp)
if options['url_aliases']:
for mount_point, target in sorted(options['url_aliases'],
reverse=True):
path = posixpath.abspath(target)
if os.path.isdir(path) or not os.path.exists(path):
if target.endswith('/') and path != '/':
directory = path + '/'
else:
directory = path
print(APACHE_ALIAS_DIRECTORY_CONFIG % dict(
mount_point=mount_point, directory=directory,
allow_override=options['allow_override']),
file=fp)
else:
directory = posixpath.dirname(path)
filename = posixpath.basename(path)
print(APACHE_ALIAS_FILENAME_CONFIG % dict(
mount_point=mount_point, directory=directory,
filename=filename), file=fp)
if options['enable_docs']:
print(APACHE_ALIAS_DOCUMENTATION % options, file=fp)
if options['error_documents']:
for status, document in options['error_documents']:
print(APACHE_ERROR_DOCUMENT_CONFIG % dict(status=status,
document=document.replace("'", "\\'")), file=fp)
if options['ssl_verify_client_urls']:
paths = sorted(options['ssl_verify_client_urls'], reverse=True)
for path in paths:
print(APACHE_VERIFY_CLIENT_CONFIG % dict(path=path), file=fp)
else:
print(APACHE_VERIFY_CLIENT_CONFIG % dict(path='/'), file=fp)
if options['setenv_variables']:
for name, value in options['setenv_variables']:
print(APACHE_SETENV_CONFIG % dict(name=name, value=value),
file=fp)
if options['passenv_variables']:
for name in options['passenv_variables']:
print(APACHE_PASSENV_CONFIG % dict(name=name), file=fp)
if options['handler_scripts']:
print(APACHE_HANDLER_SCRIPT_CONFIG % options, file=fp)
for extension, script in options['handler_scripts']:
print(APACHE_HANDLER_CONFIG % dict(handler='wsgi-resource',
extension=extension), file=fp)
if options['with_cgi']:
print(APACHE_HANDLER_CONFIG % dict(handler='cgi-script',
extension='.cgi'), file=fp)
if options['service_scripts']:
service_log_files = {}
if options['service_log_files']:
service_log_files.update(options['service_log_files'])
users = dict(options['service_users'] or [])
groups = dict(options['service_groups'] or [])
for name, script in options['service_scripts']:
user = users.get(name, '${MOD_WSGI_USER}')
group = groups.get(name, '${MOD_WSGI_GROUP}')
if name in service_log_files:
print(APACHE_SERVICE_WITH_LOG_CONFIG % dict(name=name,
user=user, group=group, script=script,
port=options['port'],
log_directory=options['log_directory'],
log_file=service_log_files[name],
rotatelogs_executable=options['rotatelogs_executable'],
max_log_size=options['max_log_size'],
python_path=options['python_path'],
working_directory=options['working_directory'],
python_eggs=options['python_eggs'],
lang=options['lang'], locale=options['locale'],
server_metrics_flag=options['server_metrics_flag']),
file=fp)
else:
print(APACHE_SERVICE_CONFIG % dict(name=name, user=user,
group=group, script=script,
python_path=options['python_path'],
working_directory=options['working_directory'],
python_eggs=options['python_eggs'],
lang=options['lang'], locale=options['locale'],
server_metrics_flag=options['server_metrics_flag']),
file=fp)
if options['include_files']:
for filename in options['include_files']:
filename = posixpath.abspath(filename)
print(APACHE_INCLUDE_CONFIG % dict(filename=filename),
file=fp)
if options['with_newrelic_platform']:
print(APACHE_TOOLS_CONFIG % options, file=fp)
if options['with_newrelic_platform']:
print(APACHE_METRICS_CONFIG % options, file=fp)
_interval = 1.0
_times = {}
_files = []
_running = False
_queue = queue.Queue()
_lock = threading.Lock()
def _restart(path):
_queue.put(True)
prefix = 'monitor (pid=%d):' % os.getpid()
print('%s Change detected to "%s".' % (prefix, path), file=sys.stderr)
print('%s Triggering process restart.' % prefix, file=sys.stderr)
os.kill(os.getpid(), signal.SIGINT)
def _modified(path):
try:
# If path doesn't denote a file and were previously
# tracking it, then it has been removed or the file type
# has changed so force a restart. If not previously
# tracking the file then we can ignore it as probably
# pseudo reference such as when file extracted from a
# collection of modules contained in a zip file.
if not os.path.isfile(path):
return path in _times
# Check for when file last modified.
mtime = os.stat(path).st_mtime
if path not in _times:
_times[path] = mtime
# Force restart when modification time has changed, even
# if time now older, as that could indicate older file
# has been restored.
if mtime != _times[path]:
return True
except Exception:
# If any exception occured, likely that file has been
# been removed just before stat(), so force a restart.
return True
return False
def _monitor():
global _files
while True:
# Check modification times on all files in sys.modules.
for module in list(sys.modules.values()):
if not hasattr(module, '__file__'):
continue
path = getattr(module, '__file__')
if not path:
continue
if os.path.splitext(path)[1] in ['.pyc', '.pyo', '.pyd']:
path = path[:-1]
if _modified(path):
return _restart(path)
# Check modification times on files which have
# specifically been registered for monitoring.
for path in _files:
if _modified(path):
return _restart(path)
# Go to sleep for specified interval.
try:
return _queue.get(timeout=_interval)
except queue.Empty:
pass
_thread = threading.Thread(target=_monitor)
_thread.setDaemon(True)
def _exiting():
try:
_queue.put(True)
except Exception:
pass
_thread.join()
def track_changes(path):
if not path in _files:
_files.append(path)
def start_reloader(interval=1.0):
global _interval
if interval < _interval:
_interval = interval
global _running
_lock.acquire()
if not _running:
prefix = 'monitor (pid=%d):' % os.getpid()
print('%s Starting change monitor.' % prefix, file=sys.stderr)
_running = True
_thread.start()
atexit.register(_exiting)
_lock.release()
class PostMortemDebugger(object):
def __init__(self, application, startup):
self.application = application
self.generator = None
import pdb
self.debugger = pdb.Pdb()
if startup:
self.activate_console()
def activate_console(self):
self.debugger.set_trace(sys._getframe().f_back)
def run_post_mortem(self):
self.debugger.reset()
self.debugger.interaction(None, sys.exc_info()[2])
def __call__(self, environ, start_response):
try:
self.generator = self.application(environ, start_response)
return self
except Exception:
self.run_post_mortem()
raise
def __iter__(self):
try:
for item in self.generator:
yield item
except Exception:
self.run_post_mortem()
raise
def close(self):
try:
if hasattr(self.generator, 'close'):
return self.generator.close()
except Exception:
self.run_post_mortem()
raise
class RequestRecorder(object):
def __init__(self, application, savedir):
self.application = application
self.savedir = savedir
self.lock = threading.Lock()
self.pid = os.getpid()
self.count = 0
def __call__(self, environ, start_response):
with self.lock:
self.count += 1
count = self.count
key = "%s-%s-%s" % (int(time.time()*1000000), self.pid, count)
iheaders = os.path.join(self.savedir, key + ".iheaders")
iheaders_fp = open(iheaders, 'w')
icontent = os.path.join(self.savedir, key + ".icontent")
icontent_fp = open(icontent, 'w+b')
oheaders = os.path.join(self.savedir, key + ".oheaders")
oheaders_fp = open(oheaders, 'w')
ocontent = os.path.join(self.savedir, key + ".ocontent")
ocontent_fp = open(ocontent, 'w+b')
oaexcept = os.path.join(self.savedir, key + ".oaexcept")
oaexcept_fp = open(oaexcept, 'w')
orexcept = os.path.join(self.savedir, key + ".orexcept")
orexcept_fp = open(orexcept, 'w')
ofexcept = os.path.join(self.savedir, key + ".ofexcept")
ofexcept_fp = open(ofexcept, 'w')
errors = environ['wsgi.errors']
pprint.pprint(environ, stream=iheaders_fp)
iheaders_fp.close()
input = environ['wsgi.input']
data = input.read(8192)
while data:
icontent_fp.write(data)
data = input.read(8192)
icontent_fp.flush()
icontent_fp.seek(0, os.SEEK_SET)
environ['wsgi.input'] = icontent_fp
def _start_response(status, response_headers, *args):
pprint.pprint(((status, response_headers)+args),
stream=oheaders_fp)
_write = start_response(status, response_headers, *args)
def write(self, data):
ocontent_fp.write(data)
ocontent_fp.flush()
return _write(data)
return write
try:
try:
result = self.application(environ, _start_response)
except:
traceback.print_exception(*sys.exc_info(), file=oaexcept_fp)
raise
try:
for data in result:
ocontent_fp.write(data)
ocontent_fp.flush()
yield data
except:
traceback.print_exception(*sys.exc_info(), file=orexcept_fp)
raise
finally:
try:
if hasattr(result, 'close'):
result.close()
except:
traceback.print_exception(*sys.exc_info(),
file=ofexcept_fp)
raise
finally:
oheaders_fp.close()
ocontent_fp.close()
oaexcept_fp.close()
orexcept_fp.close()
ofexcept_fp.close()
class ApplicationHandler(object):
def __init__(self, entry_point, application_type='script',
callable_object='application', mount_point='/',
with_newrelic_agent=False, debug_mode=False,
enable_debugger=False, debugger_startup=False,
enable_recorder=False, recorder_directory=None):
self.entry_point = entry_point
self.application_type = application_type
self.callable_object = callable_object
self.mount_point = mount_point
if application_type == 'module':
__import__(entry_point)
self.module = sys.modules[entry_point]
self.application = getattr(self.module, callable_object)
self.target = self.module.__file__
parts = os.path.splitext(self.target)[-1]
if parts[-1].lower() in ('.pyc', '.pyd', '.pyd'):
self.target = parts[0] + '.py'
elif application_type == 'paste':
from paste.deploy import loadapp
self.application = loadapp('config:%s' % entry_point)
self.target = entry_point
elif application_type != 'static':
self.module = types.ModuleType('__wsgi__')
self.module.__file__ = entry_point
with open(entry_point, 'r') as fp:
code = compile(fp.read(), entry_point, 'exec',
dont_inherit=True)
exec(code, self.module.__dict__)
sys.modules['__wsgi__'] = self.module
self.application = getattr(self.module, callable_object)
self.target = entry_point
try:
self.mtime = os.path.getmtime(self.target)
except Exception:
self.mtime = None
if with_newrelic_agent:
self.setup_newrelic_agent()
self.debug_mode = debug_mode
self.enable_debugger = enable_debugger
if enable_debugger:
self.setup_debugger(debugger_startup)
if enable_recorder:
self.setup_recorder(recorder_directory)
def setup_newrelic_agent(self):
import newrelic.agent
config_file = os.environ.get('NEW_RELIC_CONFIG_FILE')
environment = os.environ.get('NEW_RELIC_ENVIRONMENT')
global_settings = newrelic.agent.global_settings()
if global_settings.log_file is None:
global_settings.log_file = 'stderr'
newrelic.agent.initialize(config_file, environment)
newrelic.agent.register_application()
self.application = newrelic.agent.WSGIApplicationWrapper(
self.application)
def setup_debugger(self, startup):
self.application = PostMortemDebugger(self.application, startup)
def setup_recorder(self, savedir):
self.application = RequestRecorder(self.application, savedir)
def reload_required(self, environ):
if self.debug_mode:
return False
try:
mtime = os.path.getmtime(self.target)
except Exception:
mtime = None
return mtime != self.mtime
def handle_request(self, environ, start_response):
# Strip out the leading component due to internal redirect in
# Apache when using web application as fallback resource.
mount_point = environ.get('mod_wsgi.mount_point')
script_name = environ.get('SCRIPT_NAME')
path_info = environ.get('PATH_INFO')
if mount_point is not None:
# If this is set then it means that SCRIPT_NAME was
# overridden by a trusted proxy header. In this case
# we want to ignore any local mount point, simply
# stripping it from the path.
script_name = environ['mod_wsgi.script_name']
environ['PATH_INFO'] = script_name + path_info
if self.mount_point != '/':
if environ['PATH_INFO'].startswith(self.mount_point):
environ['PATH_INFO'] = environ['PATH_INFO'][len(
self.mount_point):]
else:
environ['SCRIPT_NAME'] = ''
environ['PATH_INFO'] = script_name + path_info
if self.mount_point != '/':
if environ['PATH_INFO'].startswith(self.mount_point):
environ['SCRIPT_NAME'] = self.mount_point
environ['PATH_INFO'] = environ['PATH_INFO'][len(
self.mount_point):]
return self.application(environ, start_response)
def __call__(self, environ, start_response):
return self.handle_request(environ, start_response)
class ResourceHandler(object):
def __init__(self, resources):
self.resources = {}
for extension, script in resources:
extension_name = re.sub(r'[^\w]{1}', '_', extension)
module_name = '__wsgi_resource%s__' % extension_name
module = types.ModuleType(module_name)
module.__file__ = script
with open(script, 'r') as fp:
code = compile(fp.read(), script, 'exec',
dont_inherit=True)
exec(code, module.__dict__)
sys.modules[module_name] = module
self.resources[extension] = module
def resource_extension(self, resource):
return os.path.splitext(resource)[-1]
def reload_required(self, resource):
extension = self.resource_extension(resource)
function = getattr(self.resources[extension], 'reload_required', None)
if function is not None:
return function(environ)
return False
def handle_request(self, environ, start_response):
resource = environ['SCRIPT_NAME']
extension = self.resource_extension(resource)
module = self.resources[extension]
function = getattr(module, 'handle_request', None)
if function is not None:
return function(environ, start_response)
function = getattr(module, 'application')
return function(environ, start_response)
def __call__(self, environ, start_response):
return self.handle_request(environ, start_response)
WSGI_HANDLER_SCRIPT = """
import os
import sys
import atexit
import time
import mod_wsgi.server
working_directory = r'%(working_directory)s'
entry_point = r'%(entry_point)s'
application_type = '%(application_type)s'
callable_object = '%(callable_object)s'
mount_point = '%(mount_point)s'
with_newrelic_agent = %(with_newrelic_agent)s
newrelic_config_file = '%(newrelic_config_file)s'
newrelic_environment = '%(newrelic_environment)s'
disable_reloading = %(disable_reloading)s
reload_on_changes = %(reload_on_changes)s
debug_mode = %(debug_mode)s
enable_debugger = %(enable_debugger)s
debugger_startup = %(debugger_startup)s
enable_coverage = %(enable_coverage)s
coverage_directory = '%(coverage_directory)s'
enable_profiler = %(enable_profiler)s
profiler_directory = '%(profiler_directory)s'
enable_recorder = %(enable_recorder)s
recorder_directory = '%(recorder_directory)s'
enable_gdb = %(enable_gdb)s
os.environ['MOD_WSGI_EXPRESS'] = 'true'
os.environ['MOD_WSGI_SERVER_NAME'] = '%(server_host)s'
os.environ['MOD_WSGI_SERVER_ALIASES'] = %(server_aliases)r or ''
if reload_on_changes:
os.environ['MOD_WSGI_RELOADER_ENABLED'] = 'true'
if debug_mode:
os.environ['MOD_WSGI_DEBUG_MODE'] = 'true'
# We need to fiddle sys.path as we are not using daemon mode and so
# the working directory will not be added to sys.path by virtue of
# 'home' option to WSGIDaemonProcess directive. We could use the
# WSGIPythonPath directive, but that will cause .pth files to also
# be evaluated.
sys.path.insert(0, working_directory)
if enable_debugger:
os.environ['MOD_WSGI_DEBUGGER_ENABLED'] = 'true'
def output_coverage_report():
coverage_info.stop()
coverage_info.html_report(directory=coverage_directory)
if enable_coverage:
os.environ['MOD_WSGI_COVERAGE_ENABLED'] = 'true'
from coverage import coverage
coverage_info = coverage()
coverage_info.start()
atexit.register(output_coverage_report)
def output_profiler_data():
profiler_info.disable()
output_file = '%%s-%%d.pstats' %% (int(time.time()*1000000), os.getpid())
output_file = os.path.join(profiler_directory, output_file)
profiler_info.dump_stats(output_file)
if enable_profiler:
os.environ['MOD_WSGI_PROFILER_ENABLED'] = 'true'
from cProfile import Profile
profiler_info = Profile()
profiler_info.enable()
atexit.register(output_profiler_data)
if enable_recorder:
os.environ['MOD_WSGI_RECORDER_ENABLED'] = 'true'
if enable_gdb:
os.environ['MOD_WSGI_GDB_ENABLED'] = 'true'
if with_newrelic_agent:
if newrelic_config_file:
os.environ['NEW_RELIC_CONFIG_FILE'] = newrelic_config_file
if newrelic_environment:
os.environ['NEW_RELIC_ENVIRONMENT'] = newrelic_environment
handler = mod_wsgi.server.ApplicationHandler(entry_point,
application_type=application_type, callable_object=callable_object,
mount_point=mount_point, with_newrelic_agent=with_newrelic_agent,
debug_mode=debug_mode, enable_debugger=enable_debugger,
debugger_startup=debugger_startup, enable_recorder=enable_recorder,
recorder_directory=recorder_directory)
if not disable_reloading:
reload_required = handler.reload_required
handle_request = handler.handle_request
if not disable_reloading and reload_on_changes and not debug_mode:
mod_wsgi.server.start_reloader()
"""
WSGI_RESOURCE_SCRIPT = """
import mod_wsgi.server
resources = %(resources)s
handler = mod_wsgi.server.ResourceHandler(resources)
reload_required = handler.reload_required
handle_request = handler.handle_request
"""
WSGI_DEFAULT_SCRIPT = """
CONTENT = b'''
<html>
<head>
<title>My web site runs on Malt Whiskey</title>
</head>
<body style="margin-top: 100px;">
<table align="center"; style="width: 850px;" border="0" cellpadding="30">
<tbody>
<tr>
<td>
<img style="width: 275px; height: 445px;"
src="/__wsgi__/images/snake-whiskey.jpg">
</td>
<td style="text-align: center;">
<span style="font-family: Arial,Helvetica,sans-serif;
font-weight: bold; font-size: 70px;">
My web site<br>runs on<br>Malt Whiskey<br>
<br>
</span>
<span style="font-family: Arial,Helvetica,sans-serif;
font-weight: bold;">
For further information on configuring mod_wsgi,<br>
see the <a href="%(documentation_url)s">documentation</a>.
</span>
</td>
</tr>
</tbody>
</table>
</body>
</html>
'''
def application(environ, start_response):
status = '200 OK'
output = CONTENT
response_headers = [('Content-type', 'text/html'),
('Content-Length', str(len(output)))]
start_response(status, response_headers)
return [output]
"""
def generate_wsgi_handler_script(options):
path = os.path.join(options['server_root'], 'handler.wsgi')
with open(path, 'w') as fp:
print(WSGI_HANDLER_SCRIPT % options, file=fp)
path = os.path.join(options['server_root'], 'resource.wsgi')
with open(path, 'w') as fp:
print(WSGI_RESOURCE_SCRIPT % dict(resources=repr(
options['handler_scripts'])), file=fp)
path = os.path.join(options['server_root'], 'default.wsgi')
with open(path, 'w') as fp:
print(WSGI_DEFAULT_SCRIPT % options, file=fp)
SERVER_METRICS_SCRIPT = """
import os
import logging
newrelic_config_file = '%(newrelic_config_file)s'
newrelic_environment = '%(newrelic_environment)s'
with_newrelic_platform = %(with_newrelic_platform)s
if with_newrelic_platform:
if newrelic_config_file:
os.environ['NEW_RELIC_CONFIG_FILE'] = newrelic_config_file
if newrelic_environment:
os.environ['NEW_RELIC_ENVIRONMENT'] = newrelic_environment
logging.basicConfig(level=logging.INFO,
format='%%(name)s (pid=%%(process)d, level=%%(levelname)s): %%(message)s')
_logger = logging.getLogger(__name__)
try:
from mod_wsgi.metrics.newrelic import Agent
agent = Agent()
agent.start()
except ImportError:
_logger.fatal('The module mod_wsgi.metrics.newrelic is not available. '
'The New Relic platform plugin has been disabled. Install the '
'"mod_wsgi-metrics" package.')
"""
def generate_server_metrics_script(options):
path = os.path.join(options['server_root'], 'server-metrics.py')
with open(path, 'w') as fp:
print(SERVER_METRICS_SCRIPT % options, file=fp)
WSGI_CONTROL_SCRIPT = """
#!%(shell_executable)s
# %(sys_argv)s
HTTPD="%(httpd_executable)s"
HTTPD_ARGS="%(httpd_arguments)s"
HTTPD_COMMAND="$HTTPD $HTTPD_ARGS"
MOD_WSGI_MODULES_DIRECTORY="%(modules_directory)s"
export MOD_WSGI_MODULES_DIRECTORY
SHLIBPATH="%(shlibpath)s"
if [ "x$SHLIBPATH" != "x" ]; then
%(shlibpath_var)s="$SHLIBPATH:$%(shlibpath_var)s"
export %(shlibpath_var)s
fi
MOD_WSGI_SERVER_ROOT="%(server_root)s"
export MOD_WSGI_SERVER_ROOT
MOD_WSGI_LISTENER_HOST="%(host)s"
export MOD_WSGI_LISTENER_HOST
MOD_WSGI_HTTP_PORT="%(port)s"
MOD_WSGI_HTTPS_PORT="%(https_port)s"
export MOD_WSGI_HTTP_PORT
export MOD_WSGI_HTTPS_PORT
WSGI_RUN_USER="${WSGI_RUN_USER:-%(user)s}"
WSGI_RUN_GROUP="${WSGI_RUN_GROUP:-%(group)s}"
MOD_WSGI_USER="${MOD_WSGI_USER:-${WSGI_RUN_USER}}"
MOD_WSGI_GROUP="${MOD_WSGI_GROUP:-${WSGI_RUN_GROUP}}"
export MOD_WSGI_USER
export MOD_WSGI_GROUP
if [ `id -u` = "0" -a ${MOD_WSGI_USER} = "root" ]; then
cat << EOF
WARNING: When running as the 'root' user, it is required that the options
'--user' and '--group' be specified to mod_wsgi-express. These should
define a non 'root' user and group under which the Apache child worker
processes and mod_wsgi daemon processes should be run. Failure to specify
these options will result in Apache and/or the mod_wsgi daemon processes
failing to start. See the mod_wsgi-express documentation for further
information on this restriction.
EOF
fi
MOD_WSGI_WORKING_DIRECTORY="%(working_directory)s"
export MOD_WSGI_WORKING_DIRECTORY
LANG='%(lang)s'
LC_ALL='%(locale)s'
export LANG
export LC_ALL
ACMD="$1"
ARGV="$@"
if test -f %(server_root)s/envvars; then
. %(server_root)s/envvars
fi
STATUSURL="http://%(host)s:%(port)s/server-status"
if [ "x$ARGV" = "x" ]; then
ARGV="-h"
fi
GDB="%(gdb_executable)s"
ENABLE_GDB="%(enable_gdb)s"
PROCESS_NAME="%(process_name)s"
cd $MOD_WSGI_WORKING_DIRECTORY
case $ACMD in
start|stop|restart|graceful|graceful-stop)
if [ "x$ENABLE_GDB" != "xTrue" ]; then
exec -a "$PROCESS_NAME" $HTTPD_COMMAND -k $ARGV
else
echo "run $HTTPD_ARGS -k $ARGV" > %(server_root)s/gdb.cmds
gdb -x %(server_root)s/gdb.cmds $HTTPD
fi
;;
configtest)
exec $HTTPD_COMMAND -t
;;
status)
exec %(python_executable)s -m webbrowser -t $STATUSURL
;;
*)
exec $HTTPD_COMMAND $ARGV
esac
"""
APACHE_ENVVARS_FILE = """
. %(envvars_script)s
"""
def generate_control_scripts(options):
path = os.path.join(options['server_root'], 'apachectl')
with open(path, 'w') as fp:
print(WSGI_CONTROL_SCRIPT.lstrip() % options, file=fp)
os.chmod(path, 0o755)
path = os.path.join(options['server_root'], 'envvars')
if options['envvars_script']:
with open(path, 'w') as fp:
if options['envvars_script']:
print(APACHE_ENVVARS_FILE.lstrip() % options, file=fp)
elif not os.path.isfile(path):
with open(path, 'w') as fp:
pass
def check_percentage(option, opt_str, value, parser):
if value is not None and value < 0 or value > 1:
raise optparse.OptionValueError('%s option value needs to be within '
'the range 0 to 1.' % opt_str)
setattr(parser.values, option.dest, value)
option_list = []
def add_option(platforms, *args, **kwargs):
targets = platforms.split('|')
suppress = False
if os.name == 'nt':
if 'all' not in targets and 'windows' not in targets:
suppress = True
else:
if 'all' not in targets and 'unix' not in targets:
suppress = True
if suppress:
kwargs['help'] = optparse.SUPPRESS_HELP
if 'hidden' in targets:
kwargs['help'] = optparse.SUPPRESS_HELP
option_list.append(optparse.make_option(*args, **kwargs))
add_option('all', '--application-type', default='script',
metavar='TYPE', help='The type of WSGI application entry point '
'that was provided. Defaults to \'script\', indicating the '
'traditional mod_wsgi style WSGI script file specified by a '
'filesystem path. Alternatively one can supply \'module\', '
'indicating that the provided entry point is a Python module '
'which should be imported using the standard Python import '
'mechanism, or \'paste\' indicating that the provided entry '
'point is a Paste deployment configuration file. If you want '
'to just use the server to host static files only, then you '
'can also instead supply \'static\' with the target being '
'the directory containing the files to server or the current '
'directory if none is supplied.')
add_option('all', '--entry-point', default=None,
metavar='FILE-PATH|MODULE', help='The file system path or '
'module name identifying the file which contains the WSGI '
'application entry point. How the value given is interpreted '
'depends on the corresponding type identified using the '
'\'--application-type\' option. Use of this option is the '
'same as if the value had been given as argument but without '
'any option specifier. A named option is also provided so '
'as to make it clearer in a long option list what the entry '
'point actually is. If both methods are used, that specified '
'by this option will take precedence.')
add_option('all', '--host', default=None, metavar='IP-ADDRESS',
help='The specific host (IP address) interface on which '
'requests are to be accepted. Defaults to listening on '
'all host interfaces.')
add_option('all', '--port', default=8000, type='int',
metavar='NUMBER', help='The specific port to bind to and '
'on which requests are to be accepted. Defaults to port 8000.')
add_option('all', '--http2', action='store_true', default=False,
help='Flag indicating whether HTTP/2 should be enabled.'
'Requires the mod_http2 module to be available.')
add_option('all', '--https-port', type='int', metavar='NUMBER',
help='The specific port to bind to and on which secure '
'requests are to be accepted.')
add_option('all', '--ssl-port', type='int', metavar='NUMBER',
dest='https_port', help=optparse.SUPPRESS_HELP)
add_option('all', '--ssl-certificate-file', default=None,
metavar='FILE-PATH', help='Specify the path to the SSL '
'certificate file.')
add_option('all', '--ssl-certificate-key-file', default=None,
metavar='FILE-PATH', help='Specify the path to the private '
'key file corresponding to the SSL certificate file.')
add_option('all', '--ssl-certificate', default=None,
metavar='FILE-PATH', help='Specify the common path to the SSL '
'certificate files. This is a convenience function so that '
'only one option is required to specify the location of the '
'certificate file and the private key file. It is expected that '
'the files have \'.crt\' and \'.key\' extensions. This option '
'should refer to the common part of the names for both files '
'which appears before the extension.')
add_option('all', '--ssl-ca-certificate-file', default=None,
metavar='FILE-PATH', help='Specify the path to the file with '
'the CA certificates to be used for client authentication. When '
'specified, access to the whole site will by default require '
'client authentication. To require client authentication for '
'only parts of the site, use the --ssl-verify-client option.')
add_option('all', '--ssl-verify-client', action='append',
metavar='URL-PATH', dest='ssl_verify_client_urls',
help='Specify a sub URL of the site for which client '
'authentication is required. When this option is specified, '
'the default of client authentication being required for the '
'whole site will be disabled and verification will only be '
'required for the specified sub URL.')
add_option('all', '--ssl-certificate-chain-file', default=None,
metavar='FILE-PATH', help='Specify the path to a file '
'containing the certificates of Certification Authorities (CA) '
'which form the certificate chain of the server certificate.')
add_option('all', '--ssl-environment', action='store_true',
default=False, help='Flag indicating whether the standard set '
'of SSL related variables are passed in the per request '
'environment passed to a handler.')
add_option('all', '--https-only', action='store_true',
default=False, help='Flag indicating whether any requests '
'made using a HTTP request over the non secure connection '
'should be redirected automatically to use a HTTPS request '
'over the secure connection.')
add_option('all', '--hsts-policy', default=None, metavar='PARAMS',
help='Specify the HSTS policy that should be applied when '
'HTTPS only connections are being enforced.')
add_option('all', '--server-name', default=None, metavar='HOSTNAME',
help='The primary host name of the web server. If this name '
'starts with \'www.\' then an automatic redirection from the '
'parent domain name to the \'www.\' server name will created.')
add_option('all', '--server-alias', action='append',
dest='server_aliases', metavar='HOSTNAME', help='A secondary '
'host name for the web server. May include wildcard patterns.')
add_option('all', '--allow-localhost', action='store_true',
default=False, help='Flag indicating whether access via '
'localhost should still be allowed when a server name has been '
'specified and a name based virtual host has been configured.')
add_option('unix', '--processes', type='int', metavar='NUMBER',
help='The number of worker processes (instances of the WSGI '
'application) to be started up and which will handle requests '
'concurrently. Defaults to a single process.')
add_option('all', '--threads', type='int', default=5, metavar='NUMBER',
help='The number of threads in the request thread pool of '
'each process for handling requests. Defaults to 5 in each '
'process. Note that if embedded mode and only prefork MPM '
'is available, then processes will instead be used.')
add_option('unix', '--max-clients', type='int', default=None,
metavar='NUMBER', help='The maximum number of simultaneous '
'client connections that will be accepted. This will default '
'to being 1.5 times the total number of threads in the '
'request thread pools across all process handling requests. '
'Note that if embedded mode is used this will be ignored.')
add_option('unix', '--initial-workers', type='float', default=None,
metavar='NUMBER', action='callback', callback=check_percentage,
help='The initial number of workers to create on startup '
'expressed as a percentage of the maximum number of clients. '
'The value provided should be between 0 and 1. The default is '
'dependent on the type of MPM being used. Note that if '
'embedded mode is used, this will be ignored.'),
add_option('unix', '--minimum-spare-workers', type='float',
default=None, metavar='NUMBER', action='callback',
callback=check_percentage, help='The minimum number of spare '
'workers to maintain expressed as a percentage of the maximum '
'number of clients. The value provided should be between 0 and '
'1. The default is dependent on the type of MPM being used. '
'Note that if embedded mode is used, this will be ignored.')
add_option('unix', '--maximum-spare-workers', type='float',
default=None, metavar='NUMBER', action='callback',
callback=check_percentage, help='The maximum number of spare '
'workers to maintain expressed as a percentage of the maximum '
'number of clients. The value provided should be between 0 and '
'1. The default is dependent on the type of MPM being used. '
'Note that if embedded mode is used, this will be ignored.')
add_option('all', '--limit-request-body', type='int', default=10485760,
metavar='NUMBER', help='The maximum number of bytes which are '
'allowed in a request body. Defaults to 10485760 (10MB).')
add_option('all', '--maximum-requests', type='int', default=0,
metavar='NUMBER', help='The number of requests after which '
'any one worker process will be restarted and the WSGI '
'application reloaded. Defaults to 0, indicating that the '
'worker process should never be restarted based on the number '
'of requests received.')
add_option('unix', '--startup-timeout', type='int', default=15,
metavar='SECONDS', help='Maximum number of seconds allowed '
'to pass waiting for the application to be successfully '
'loaded and started by a worker process. When this timeout '
'has been reached without the application having been '
'successfully loaded and started, the worker process will '
'be forced to restart. Defaults to 15 seconds.')
add_option('unix', '--shutdown-timeout', type='int', default=5,
metavar='SECONDS', help='Maximum number of seconds allowed '
'to pass when waiting for a worker process to shutdown as a '
'result of the maximum number of requests or inactivity timeout '
'being reached, or when a user initiated SIGINT signal is sent '
'to a worker process. When this timeout has been reached the '
'worker process will be forced to exit even if there are '
'still active requests or it is still running Python exit '
'functions. Defaults to 5 seconds.')
add_option('unix', '--restart-interval', type='int', default='0',
metavar='SECONDS', help='Number of seconds between worker '
'process restarts. If graceful timeout is also specified, '
'active requests will be given a chance to complete before '
'the process is forced to exit and restart. Not enabled by '
'default.')
add_option('unix', '--cpu-time-limit', type='int', default='0',
metavar='SECONDS', help='Number of seconds of CPU time the '
'process can use before it will be restarted. If graceful '
'timeout is also specified, active requests will be given '
'a chance to complete before the process is forced to exit '
'and restart. Not enabled by default.')
add_option('unix', '--graceful-timeout', type='int', default=15,
metavar='SECONDS', help='Grace period for requests to complete '
'normally, while still accepting new requests, when worker '
'processes are being shutdown and restarted due to maximum '
'requests being reached or restart interval having expired. '
'Defaults to 15 seconds.')
add_option('unix', '--eviction-timeout', type='int', default=0,
metavar='SECONDS', help='Grace period for requests to complete '
'normally, while still accepting new requests, when the WSGI '
'application is being evicted from the worker processes, and '
'the process restarted, due to forced graceful restart signal. '
'Defaults to timeout specified by \'--graceful-timeout\' '
'option.')
add_option('unix', '--deadlock-timeout', type='int', default=60,
metavar='SECONDS', help='Maximum number of seconds allowed '
'to pass before the worker process is forcibly shutdown and '
'restarted after a potential deadlock on the Python GIL has '
'been detected. Defaults to 60 seconds.')
add_option('unix', '--inactivity-timeout', type='int', default=0,
metavar='SECONDS', help='Maximum number of seconds allowed '
'to pass before the worker process is shutdown and restarted '
'when the worker process has entered an idle state and is no '
'longer receiving new requests. Not enabled by default.')
add_option('unix', '--ignore-activity', action='append',
dest='ignore_activity', metavar='URL-PATH', help='Specify '
'the URL path for any location where activity should be '
'ignored when the \'--activity-timeout\' option is used. '
'This would be used on health check URLs so that health '
'checks do not prevent process restarts due to inactivity.')
add_option('unix', '--request-timeout', type='int', default=60,
metavar='SECONDS', help='Maximum number of seconds allowed '
'to pass before the worker process is forcibly shutdown and '
'restarted when a request does not complete in the expected '
'time. In a multi threaded worker, the request time is '
'calculated as an average across all request threads. Defaults '
'to 60 seconds.')
add_option('unix', '--connect-timeout', type='int', default=15,
metavar='SECONDS', help='Maximum number of seconds allowed '
'to pass before giving up on attempting to get a connection '
'to the worker process from the Apache child process which '
'accepted the request. This comes into play when the worker '
'listener backlog limit is exceeded. Defaults to 15 seconds.')
add_option('all', '--socket-timeout', type='int', default=60,
metavar='SECONDS', help='Maximum number of seconds allowed '
'to pass before timing out on a read or write operation on '
'a socket and aborting the request. Defaults to 60 seconds.')
add_option('all', '--queue-timeout', type='int', default=45,
metavar='SECONDS', help='Maximum number of seconds allowed '
'for a request to be accepted by a worker process to be '
'handled, taken from the time when the Apache child process '
'originally accepted the request. Defaults to 45 seconds.')
add_option('all', '--header-timeout', type='int', default=15,
metavar='SECONDS', help='The number of seconds allowed for '
'receiving the request including the headers. This may be '
'dynamically increased if a minimum rate for reading the '
'request and headers is also specified, up to any limit '
'imposed by a maximum header timeout. Defaults to 15 seconds.')
add_option('all', '--header-max-timeout', type='int', default=30,
metavar='SECONDS', help='Maximum number of seconds allowed for '
'receiving the request including the headers. This is the hard '
'limit after taking into consideration and increases to the '
'basic timeout due to minimum rate for reading the request and '
'headers which may be specified. Defaults to 30 seconds.')
add_option('all', '--header-min-rate', type='int', default=500,
metavar='BYTES', help='The number of bytes required to be sent '
'as part of the request and headers to trigger a dynamic '
'increase in the timeout on receiving the request including '
'headers. Each time this number of bytes is received the timeout '
'will be increased by 1 second up to any maximum specified by '
'the maximum header timeout. Defaults to 500 bytes.')
add_option('all', '--body-timeout', type='int', default=15,
metavar='SECONDS', help='The number of seconds allowed for '
'receiving the request body. This may be dynamically increased '
'if a minimum rate for reading the request body is also '
'specified, up to any limit imposed by a maximum body timeout. '
'Defaults to 15 seconds.')
add_option('all', '--body-max-timeout', type='int', default=0,
metavar='SECONDS', help='Maximum number of seconds allowed for '
'receiving the request body. This is the hard limit after '
'taking into consideration and increases to the basic timeout '
'due to minimum rate for reading the request body which may be '
'specified. Defaults to 0 indicating there is no maximum.')
add_option('all', '--body-min-rate', type='int', default=500,
metavar='BYTES', help='The number of bytes required to be sent '
'as part of the request body to trigger a dynamic increase in '
'the timeout on receiving the request body. Each time this '
'number of bytes is received the timeout will be increased '
'by 1 second up to any maximum specified by the maximum body '
'timeout. Defaults to 500 bytes.')
add_option('all', '--server-backlog', type='int', default=500,
metavar='NUMBER', help='Depth of server socket listener '
'backlog for Apache child processes. Defaults to 500.')
add_option('unix', '--daemon-backlog', type='int', default=100,
metavar='NUMBER', help='Depth of server socket listener '
'backlog for daemon processes. Defaults to 100.')
add_option('unix', '--send-buffer-size', type='int', default=0,
metavar='NUMBER', help='Size of socket buffer for sending '
'data to daemon processes. Defaults to 0, indicating '
'the system default socket buffer size is used.')
add_option('unix', '--receive-buffer-size', type='int', default=0,
metavar='NUMBER', help='Size of socket buffer for receiving '
'data from daemon processes. Defaults to 0, indicating '
'the system default socket buffer size is used.')
add_option('unix', '--header-buffer-size', type='int', default=0,
metavar='NUMBER', help='Size of buffer used for reading '
'response headers from daemon processes. Defaults to 0, '
'indicating internal default of 32768 bytes is used.')
add_option('unix', '--response-buffer-size', type='int', default=0,
metavar='NUMBER', help='Maximum amount of response content '
'that will be allowed to be buffered in the Apache child '
'worker process when proxying the response from a daemon '
'process. Defaults to 0, indicating internal default of '
'65536 bytes is used.')
add_option('unix', '--response-socket-timeout', type='int', default=0,
metavar='SECONDS', help='Maximum number of seconds allowed '
'to pass before timing out on a write operation back to the '
'HTTP client when the response buffer has filled and data is '
'being forcibly flushed. Defaults to 0 seconds indicating that '
'it will default to the value of the \'socket-timeout\' option.')
add_option('all', '--enable-sendfile', action='store_true',
default=False, help='Flag indicating whether sendfile() support '
'should be enabled. Defaults to being disabled. This should '
'only be enabled if the operating system kernel and file system '
'type where files are hosted supports it.')
add_option('unix', '--disable-reloading', action='store_true',
default=False, help='Disables all reloading of daemon processes '
'due to changes to the file containing the WSGI application '
'entrypoint, or any other loaded source files. This has no '
'effect when embedded mode is used as reloading is automatically '
'disabled for embedded mode.')
add_option('unix', '--reload-on-changes', action='store_true',
default=False, help='Flag indicating whether worker processes '
'should be automatically restarted when any Python code file '
'loaded by the WSGI application has been modified. Defaults to '
'being disabled. When reloading on any code changes is disabled, '
'unless all reloading is also disabled, the worker processes '
'will still though be reloaded if the file containing the WSGI '
'application entrypoint is modified.')
add_option('unix', '--user', default=default_run_user(),
metavar='USERNAME', help='When being run by the root user, '
'the user that the WSGI application should be run as.')
add_option('unix', '--group', default=default_run_group(),
metavar='GROUP', help='When being run by the root user, the '
'group that the WSGI application should be run as.')
add_option('all', '--callable-object', default='application',
metavar='NAME', help='The name of the entry point for the WSGI '
'application within the WSGI script file. Defaults to '
'the name \'application\'.')
add_option('all', '--map-head-to-get', default='Auto',
metavar='OFF|ON|AUTO', help='Flag indicating whether HEAD '
'requests should be mapped to a GET request. By default a HEAD '
'request will be automatically mapped to a GET request when an '
'Apache output filter is detected that may want to see the '
'entire response in order to set up response headers correctly '
'for a HEAD request. This can be disable by setting to \'Off\'.')
add_option('all', '--document-root', metavar='DIRECTORY-PATH',
help='The directory which should be used as the document root '
'and which contains any static files.')
add_option('all', '--directory-index', metavar='FILE-NAME',
help='The name of a directory index resource to be found in the '
'document root directory. Requests mapping to the directory '
'will be mapped to this resource rather than being passed '
'through to the WSGI application.')
add_option('all', '--directory-listing', action='store_true',
default=False, help='Flag indicating if directory listing '
'should be enabled where static file application type is '
'being used and no directory index file has been specified.')
add_option('all', '--allow-override', metavar='DIRECTIVE-TYPE',
action='append', help='Allow directives to be overridden from a '
'\'.htaccess\' file. Defaults to \'None\', indicating that any '
'\'.htaccess\' file will be ignored with override directives '
'not being permitted.')
add_option('all', '--mount-point', metavar='URL-PATH', default='/',
help='The URL path at which the WSGI application will be '
'mounted. Defaults to being mounted at the root URL of the '
'site.')
add_option('all', '--url-alias', action='append', nargs=2,
dest='url_aliases', metavar='URL-PATH FILE-PATH|DIRECTORY-PATH',
help='Map a single static file or a directory of static files '
'to a sub URL.')
add_option('all', '--error-document', action='append', nargs=2,
dest='error_documents', metavar='STATUS URL-PATH', help='Map '
'a specific sub URL as the handler for HTTP errors generated '
'by the web server.')
add_option('all', '--error-override', action='store_true',
default=False, help='Flag indicating whether Apache error '
'documents will override application error responses.')
add_option('all', '--proxy-mount-point', action='append', nargs=2,
dest='proxy_mount_points', metavar='URL-PATH URL',
help='Map a sub URL such that any requests against it will be '
'proxied to the specified URL. This is only for proxying to a '
'site as a whole, or a sub site, not individual resources.')
add_option('all', '--proxy-url-alias', action='append', nargs=2,
dest='proxy_mount_points', metavar='URL-PATH URL',
help=optparse.SUPPRESS_HELP)
add_option('all', '--proxy-virtual-host', action='append', nargs=2,
dest='proxy_virtual_hosts', metavar='HOSTNAME URL',
help='Proxy any requests for the specified host name to the '
'remote URL.')
add_option('all', '--trust-proxy-header', action='append', default=[],
dest='trusted_proxy_headers', metavar='HEADER-NAME',
help='The name of any trusted HTTP header providing details '
'of the front end client request when proxying.')
add_option('all', '--trust-proxy', action='append', default=[],
dest='trusted_proxies', metavar='IP-ADDRESS/SUBNET',
help='The IP address or subnet corresponding to any trusted '
'proxy.')
add_option('all', '--keep-alive-timeout', type='int', default=2,
metavar='SECONDS', help='The number of seconds which a client '
'connection will be kept alive to allow subsequent requests '
'to be made over the same connection when a keep alive '
'connection is requested. Defaults to 2, indicating that keep '
'alive connections are set for 2 seconds.')
add_option('all', '--compress-responses', action='store_true',
default=False, help='Flag indicating whether responses for '
'common text based responses, such as plain text, HTML, XML, '
'CSS and Javascript should be compressed.')
add_option('all', '--server-metrics', action='store_true',
default=False, help='Flag indicating whether internal server '
'metrics will be available within the WSGI application. '
'Defaults to being disabled.')
add_option('all', '--server-status', action='store_true',
default=False, help='Flag indicating whether web server status '
'will be available at the /server-status sub URL. Defaults to '
'being disabled.')
add_option('all', '--host-access-script', metavar='SCRIPT-PATH',
default=None, help='Specify a Python script file for '
'performing host access checks.')
add_option('all', '--auth-user-script', metavar='SCRIPT-PATH',
default=None, help='Specify a Python script file for '
'performing user authentication.')
add_option('all', '--auth-type', metavar='TYPE',
default='Basic', help='Specify the type of authentication '
'scheme used when authenticating users. Defaults to using '
'\'Basic\'. Alternate schemes available are \'Digest\'.')
add_option('all', '--auth-group-script', metavar='SCRIPT-PATH',
default=None, help='Specify a Python script file for '
'performing group based authorization in conjunction with '
'a user authentication script.')
add_option('all', '--auth-group', metavar='NAME',
default='wsgi', help='Specify the group which users should '
'be a member of when using a group based authorization script. '
'Defaults to \'wsgi\' as a place holder but should be '
'overridden to be the actual group you use rather than '
'making your group name match the default.')
add_option('all', '--include-file', action='append',
dest='include_files', metavar='FILE-PATH', help='Specify the '
'path to an additional web server configuration file to be '
'included at the end of the generated web server configuration '
'file.')
add_option('all', '--rewrite-rules', metavar='FILE-PATH',
help='Specify an alternate server configuration file which '
'contains rewrite rules. Defaults to using the '
'\'rewrite.conf\' stored under the server root directory.')
add_option('unix', '--envvars-script', metavar='FILE-PATH',
help='Specify an alternate script file for user defined web '
'server environment variables. Defaults to using the '
'\'envvars\' stored under the server root directory.')
add_option('unix', '--lang', default=None, metavar='NAME',
help=optparse.SUPPRESS_HELP)
add_option('all', '--locale', default=None, metavar='NAME',
help='Specify the natural language locale for the process '
'as normally defined by the \'LC_ALL\' environment variable. '
'If not specified, then the default locale for this process '
'will be used. If the default locale is however \'C\' or '
'\'POSIX\' then an attempt will be made to use either the '
'\'en_US.UTF-8\' or \'C.UTF-8\' locales and if that is not '
'possible only then fallback to the default locale of this '
'process.')
add_option('all', '--setenv', action='append', nargs=2,
dest='setenv_variables', metavar='KEY VALUE', help='Specify '
'a name/value pairs to be added to the per request WSGI environ '
'dictionary')
add_option('all', '--passenv', action='append',
dest='passenv_variables', metavar='KEY', help='Specify the '
'names of any process level environment variables which should '
'be passed as a name/value pair in the per request WSGI '
'environ dictionary.')
add_option('all', '--working-directory', metavar='DIRECTORY-PATH',
help='Specify the directory which should be used as the '
'current working directory of the WSGI application. This '
'directory will be searched when importing Python modules '
'so long as the WSGI application doesn\'t subsequently '
'change the current working directory. Defaults to the '
'directory this script is run from.')
add_option('all', '--pid-file', metavar='FILE-PATH',
help='Specify an alternate file to be used to store the '
'process ID for the root process of the web server.')
add_option('all', '--server-root', metavar='DIRECTORY-PATH',
help='Specify an alternate directory for where the generated '
'web server configuration, startup files and logs will be '
'stored. On Linux defaults to the sub directory specified by '
'the TMPDIR environment variable, or /tmp if not specified. '
'On macOS, defaults to the /var/tmp directory.')
add_option('unix', '--server-mpm', action='append',
dest='server_mpm_variables', metavar='NAME', help='Specify '
'preferred MPM to use when using Apache 2.4 with dynamically '
'loadable MPMs and more than one is available. By default '
'the MPM precedence order when no preference is given is '
'\"event\", \"worker" and \"prefork\".')
add_option('all', '--log-directory', metavar='DIRECTORY-PATH',
help='Specify an alternate directory for where the log files '
'will be stored. Defaults to the server root directory.')
add_option('all', '--log-level', default='warn', metavar='NAME',
help='Specify the log level for logging. Defaults to \'warn\'.')
add_option('all', '--access-log', action='store_true', default=False,
help='Flag indicating whether the web server access log '
'should be enabled. Defaults to being disabled.')
add_option('unix', '--startup-log', action='store_true', default=False,
help='Flag indicating whether the web server startup log should '
'be enabled. Defaults to being disabled.')
add_option('all', '--verbose-debugging', action='store_true',
dest='verbose_debugging', help=optparse.SUPPRESS_HELP)
add_option('unix', '--log-to-terminal', action='store_true',
default=False, help='Flag indicating whether logs should '
'be directed back to the terminal. Defaults to being disabled. '
'If --log-directory is set explicitly, it will override this '
'option. If logging to the terminal is carried out, any '
'rotating of log files will be disabled.')
add_option('all', '--access-log-format', metavar='FORMAT',
help='Specify the format of the access log records.'),
add_option('all', '--error-log-format', metavar='FORMAT',
help='Specify the format of the error log records.'),
add_option('all', '--error-log-name', metavar='FILE-NAME',
default='error_log', help='Specify the name of the error '
'log file when it is being written to the log directory.'),
add_option('all', '--access-log-name', metavar='FILE-NAME',
default='access_log', help='Specify the name of the access '
'log file when it is being written to the log directory.'),
add_option('unix', '--startup-log-name', metavar='FILE-NAME',
default='startup_log', help='Specify the name of the startup '
'log file when it is being written to the log directory.'),
add_option('unix', '--rotate-logs', action='store_true', default=False,
help='Flag indicating whether log rotation should be performed.'),
add_option('unix', '--max-log-size', default=5, type='int',
metavar='MB', help='The maximum size in MB the log file should '
'be allowed to reach before log file rotation is performed.'),
add_option('unix', '--rotatelogs-executable',
default=apxs_config.ROTATELOGS, metavar='FILE-PATH',
help='Override the path to the rotatelogs executable.'),
add_option('all', '--python-path', action='append',
dest='python_paths', metavar='DIRECTORY-PATH', help='Specify '
'the path to any additional directory that should be added to '
'the Python module search path. Note that these directories will '
'not be processed for \'.pth\' files. If processing of \'.pth\' '
'files is required, set the \'PYTHONPATH\' environment variable '
'in a script specified by the \'--envvars-script\' option.')
add_option('all', '--python-eggs', metavar='DIRECTORY-PATH',
help='Specify an alternate directory which should be used for '
'unpacking of Python eggs. Defaults to a sub directory of '
'the server root directory.')
add_option('unix', '--shell-executable', default=SHELL,
metavar='FILE-PATH', help='Override the path to the shell '
'used in the \'apachectl\' script. The \'bash\' shell will '
'be used if available.')
add_option('unix', '--httpd-executable', default=apxs_config.HTTPD,
metavar='FILE-PATH', help='Override the path to the Apache web '
'server executable.')
add_option('unix', '--process-name', metavar='NAME', help='Override '
'the name given to the Apache parent process. This might be '
'needed when a process manager expects the process to be named '
'a certain way but due to a sequence of exec calls the name '
'changed.')
add_option('all', '--modules-directory', default=apxs_config.LIBEXECDIR,
metavar='DIRECTORY-PATH', help='Override the path to the Apache '
'web server modules directory.')
add_option('unix', '--mime-types', default=find_mimetypes(),
metavar='FILE-PATH', help='Override the path to the mime types '
'file used by the web server.')
add_option('unix', '--socket-prefix', metavar='DIRECTORY-PATH',
help='Specify an alternate directory name prefix to be used '
'for the UNIX domain sockets used by mod_wsgi to communicate '
'between the Apache child processes and the daemon processes.')
add_option('all', '--add-handler', action='append', nargs=2,
dest='handler_scripts', metavar='EXTENSION SCRIPT-PATH',
help='Specify a WSGI application to be used as a special '
'handler for any resources matched from the document root '
'directory with a specific extension type.')
add_option('all', '--chunked-request', action='store_true',
default=False, help='Flag indicating whether requests which '
'use chunked transfer encoding will be accepted.')
add_option('hidden', '--with-newrelic', action='store_true',
default=False, help='Flag indicating whether all New Relic '
'performance monitoring features should be enabled.')
add_option('hidden', '--with-newrelic-agent', action='store_true',
default=False, help='Flag indicating whether the New Relic '
'Python agent should be enabled for reporting application server '
'metrics.')
add_option('hidden', '--with-newrelic-platform', action='store_true',
default=False, help='Flag indicating whether the New Relic '
'platform plugin should be enabled for reporting server level '
'metrics.')
add_option('hidden', '--newrelic-config-file', metavar='FILE-PATH',
default='', help='Specify the location of the New Relic agent '
'configuration file.')
add_option('hidden', '--newrelic-environment', metavar='NAME',
default='', help='Specify the name of the environment section '
'that should be used from New Relic agent configuration file.')
add_option('hidden', '--with-php5', action='store_true', default=False,
help='Flag indicating whether PHP 5 support should be enabled. '
'PHP code files must use the \'.php\' extension.')
add_option('all', '--with-cgi', action='store_true', default=False,
help='Flag indicating whether CGI script support should be '
'enabled. CGI scripts must use the \'.cgi\' extension and be '
'executable')
add_option('unix', '--service-script', action='append', nargs=2,
dest='service_scripts', metavar='SERVICE SCRIPT-PATH',
help='Specify the name of a Python script to be loaded and '
'executed in the context of a distinct daemon process. Used '
'for running a managed service.')
add_option('unix', '--service-user', action='append', nargs=2,
dest='service_users', metavar='SERVICE USERNAME',
help='When being run by the root user, the user that the '
'distinct daemon process started to run the managed service '
'should be run as.')
add_option('unix', '--service-group', action='append', nargs=2,
dest='service_groups', metavar='SERVICE GROUP',
help='When being run by the root user, the group that the '
'distinct daemon process started to run the managed service '
'should be run as.')
add_option('unix', '--service-log-file', action='append', nargs=2,
dest='service_log_files', metavar='SERVICE FILE-NAME',
help='Specify the name of a separate log file to be used for '
'the managed service.')
add_option('unix', '--embedded-mode', action='store_true', default=False,
help='Flag indicating whether to run in embedded mode rather '
'than the default daemon mode. Numerous daemon mode specific '
'features will not operate when this mode is used.')
add_option('all', '--enable-docs', action='store_true', default=False,
help='Flag indicating whether the mod_wsgi documentation should '
'be made available at the /__wsgi__/docs sub URL.')
add_option('unix', '--debug-mode', action='store_true', default=False,
help='Flag indicating whether to run in single process mode '
'to allow the running of an interactive Python debugger. This '
'will override all options related to processes, threads and '
'communication with workers. All forms of source code reloading '
'will also be disabled. Both stdin and stdout will be attached '
'to the console to allow interaction with the Python debugger.')
add_option('unix', '--enable-debugger', action='store_true',
default=False, help='Flag indicating whether post mortem '
'debugging of any exceptions which propagate out from the '
'WSGI application when running in debug mode should be '
'performed. Post mortem debugging is performed using the '
'Python debugger (pdb).'),
add_option('unix', '--debugger-startup', action='store_true',
default=False, help='Flag indicating whether when post '
'mortem debugging is enabled, that the debugger should '
'also be thrown into the interactive console on initial '
'startup of the server to allow breakpoints to be setup.'),
add_option('unix', '--enable-coverage', action='store_true',
default=False, help='Flag indicating whether coverage analysis '
'is enabled when running in debug mode.')
add_option('unix', '--coverage-directory', metavar='DIRECTORY-PATH',
default='', help='Override the path to the directory into '
'which coverage analysis will be generated when enabled under '
'debug mode.')
add_option('unix', '--enable-profiler', action='store_true',
default=False, help='Flag indicating whether code profiling '
'is enabled when running in debug mode.')
add_option('unix', '--profiler-directory', metavar='DIRECTORY-PATH',
default='', help='Override the path to the directory into '
'which profiler data will be written when enabled under debug '
'mode.')
add_option('unix', '--enable-recorder', action='store_true',
default=False, help='Flag indicating whether recording of '
'requests is enabled when running in debug mode.')
add_option('unix', '--recorder-directory', metavar='DIRECTORY-PATH',
default='', help='Override the path to the directory into '
'which recorder data will be written when enabled under debug '
'mode.')
add_option('unix', '--enable-gdb', action='store_true',
default=False, help='Flag indicating whether Apache should '
'be run under \'gdb\' when running in debug mode. This '
'would be use to debug process crashes.')
add_option('unix', '--gdb-executable', default='gdb',
metavar='FILE-PATH', help='Override the path to the gdb '
'executable.')
add_option('unix', '--setup-only', action='store_true', default=False,
help='Flag indicating that after the configuration files have '
'been setup, that the command should then exit and not go on '
'to actually run up the Apache server. This is to allow for '
'the generation of the configuration with Apache then later '
'being started separately using the generated \'apachectl\' '
'script.')
# add_option('unix', '--isatty', action='store_true', default=False,
# help='Flag indicating whether should assume being run in an '
# 'interactive terminal session. In this case Apache will not '
# 'replace this wrapper script, but will be run as a sub process.'
# 'Signals such as SIGINT, SIGTERM, SIGHUP and SIGUSR1 will be '
# 'forwarded onto Apache, but SIGWINCH will be blocked so that '
# 'resizing of a terminal session window will not cause Apache '
# 'to shutdown. This is a separate option at this time rather '
# 'than being determined automatically while the reliability of '
# 'intercepting and forwarding signals is verified.')
def cmd_setup_server(params):
formatter = optparse.IndentedHelpFormatter()
formatter.set_long_opt_delimiter(' ')
usage = '%prog setup-server script [options]'
parser = optparse.OptionParser(usage=usage, option_list=option_list,
formatter=formatter)
(options, args) = parser.parse_args(params)
_cmd_setup_server('setup-server', args, vars(options))
def _mpm_module_defines(modules_directory, preferred=None):
if os.name == 'nt':
return ['-DMOD_WSGI_MPM_ENABLE_WINNT_MODULE']
result = []
workers = ['event', 'worker', 'prefork']
found = False
for name in workers:
if not preferred or name in preferred:
if os.path.exists(os.path.join(modules_directory,
'mod_mpm_%s.so' % name)):
if not found:
result.append('-DMOD_WSGI_MPM_ENABLE_%s_MODULE' % name.upper())
found = True
result.append('-DMOD_WSGI_MPM_EXISTS_%s_MODULE' % name.upper())
return result
def _cmd_setup_server(command, args, options):
options['sys_argv'] = repr(sys.argv)
options['mod_wsgi_so'] = where()
options['working_directory'] = options['working_directory'] or os.getcwd()
options['working_directory'] = os.path.abspath(options['working_directory'])
if not options['host']:
options['listener_host'] = None
options['host'] = 'localhost'
else:
options['listener_host'] = options['host']
if os.name == 'nt':
options['daemon_name'] = '(wsgi:%s:%s:%s)' % (options['host'],
options['port'], getpass.getuser())
else:
options['daemon_name'] = '(wsgi:%s:%s:%s)' % (options['host'],
options['port'], os.getuid())
if not options['server_root']:
if os.name == 'nt':
tmpdir = tempfile.gettempdir()
elif sys.platform == 'darwin':
tmpdir = '/var/tmp'
else:
tmpdir = os.environ.get('TMPDIR')
tmpdir = tmpdir or '/tmp'
tmpdir = tmpdir.rstrip('/')
if os.name == 'nt':
options['server_root'] = ('%s/mod_wsgi-%s-%s-%s' % (tmpdir,
options['host'], options['port'], getpass.getuser())
).replace('\\','/')
else:
options['server_root'] = '%s/mod_wsgi-%s:%s:%s' % (tmpdir,
options['host'], options['port'], os.getuid())
if not os.path.isdir(options['server_root']):
os.mkdir(options['server_root'])
if options['ssl_certificate_file']:
options['ssl_certificate_file'] = os.path.abspath(
options['ssl_certificate_file'])
if options['ssl_certificate_key_file']:
options['ssl_certificate_key_file'] = os.path.abspath(
options['ssl_certificate_key_file'])
if options['ssl_certificate']:
options['ssl_certificate'] = os.path.abspath(
options['ssl_certificate'])
options['ssl_certificate_file'] = options['ssl_certificate']
options['ssl_certificate_file'] += '.crt'
options['ssl_certificate_key_file'] = options['ssl_certificate']
options['ssl_certificate_key_file'] += '.key'
if options['ssl_ca_certificate_file']:
options['ssl_ca_certificate_file'] = os.path.abspath(
options['ssl_ca_certificate_file'])
if options['ssl_certificate_chain_file']:
options['ssl_certificate_chain_file'] = os.path.abspath(
options['ssl_certificate_chain_file'])
if options['entry_point']:
args = [options['entry_point']]
if not args:
if options['application_type'] != 'static':
options['entry_point'] = posixpath.join(
options['server_root'], 'default.wsgi')
options['application_type'] = 'script'
options['enable_docs'] = True
else:
if not options['document_root']:
options['document_root'] = os.getcwd()
options['entry_point'] = '(static)'
else:
if options['application_type'] in ('script', 'paste'):
options['entry_point'] = posixpath.abspath(args[0])
elif options['application_type'] == 'static':
if not options['document_root']:
options['document_root'] = posixpath.abspath(args[0])
options['entry_point'] = 'ignored'
else:
options['entry_point'] = 'overridden'
else:
options['entry_point'] = args[0]
if options['host_access_script']:
options['host_access_script'] = posixpath.abspath(
options['host_access_script'])
if options['auth_user_script']:
options['auth_user_script'] = posixpath.abspath(
options['auth_user_script'])
if options['auth_group_script']:
options['auth_group_script'] = posixpath.abspath(
options['auth_group_script'])
options['documentation_directory'] = os.path.join(os.path.dirname(
os.path.dirname(__file__)), 'docs')
options['images_directory'] = os.path.join(os.path.dirname(
os.path.dirname(__file__)), 'images')
if os.path.exists(posixpath.join(options['documentation_directory'],
'index.html')):
options['documentation_url'] = '/__wsgi__/docs/'
else:
options['documentation_url'] = 'http://www.modwsgi.org/'
if not os.path.isabs(options['server_root']):
options['server_root'] = posixpath.abspath(options['server_root'])
if not options['document_root']:
options['document_root'] = posixpath.join(options['server_root'],
'htdocs')
try:
os.mkdir(options['document_root'])
except Exception:
pass
if not options['allow_override']:
options['allow_override'] = 'None'
else:
options['allow_override'] = ' '.join(options['allow_override'])
if not options['mount_point'].startswith('/'):
options['mount_point'] = posixpath.normpath('/' + options['mount_point'])
# Create subdirectories for mount points in document directory
# so that fallback resource rewrite rule will work.
if options['mount_point'] != '/':
parts = options['mount_point'].rstrip('/').split('/')[1:]
subdir = options['document_root']
try:
for part in parts:
subdir = posixpath.join(subdir, part)
if not os.path.exists(subdir):
os.mkdir(subdir)
except Exception:
raise
if not os.path.isabs(options['document_root']):
options['document_root'] = posixpath.abspath(options['document_root'])
if not options['log_directory']:
options['log_directory'] = options['server_root']
else:
# The --log-directory option overrides --log-to-terminal.
options['log_to_terminal'] = False
if options['log_to_terminal']:
# The --log-to-terminal option overrides --rotate-logs.
options['rotate_logs'] = False
try:
os.mkdir(options['log_directory'])
except Exception:
pass
if not os.path.isabs(options['log_directory']):
options['log_directory'] = posixpath.abspath(options['log_directory'])
if not options['log_to_terminal']:
options['error_log_file'] = posixpath.join(options['log_directory'],
options['error_log_name'])
else:
if os.name == 'nt':
options['error_log_file'] = 'CON'
else:
try:
with open('/dev/stderr', 'w'):
pass
except IOError:
options['error_log_file'] = '|%s' % find_program(
['tee'], default='tee')
else:
options['error_log_file'] = '/dev/stderr'
if not options['log_to_terminal']:
options['access_log_file'] = posixpath.join(
options['log_directory'], options['access_log_name'])
else:
try:
with open('/dev/stdout', 'w'):
pass
except IOError:
options['access_log_file'] = '|%s' % find_program(
['tee'], default='tee')
else:
options['access_log_file'] = '/dev/stdout'
if options['access_log_format']:
if options['access_log_format'] in ('common', 'combined'):
options['log_format_nickname'] = options['access_log_format']
options['access_log_format'] = 'undefined'
else:
options['log_format_nickname'] = 'custom'
else:
options['log_format_nickname'] = 'common'
options['access_log_format'] = 'undefined'
options['access_log_format'] = options['access_log_format'].replace(
'\"', '\\"')
if options['error_log_format']:
options['error_log_format'] = options['error_log_format'].replace(
'\"', '\\"')
options['pid_file'] = ((options['pid_file'] and posixpath.abspath(
options['pid_file'])) or posixpath.join(options['server_root'],
'httpd.pid'))
options['python_eggs'] = (posixpath.abspath(options['python_eggs']) if
options['python_eggs'] is not None else None)
if options['python_eggs'] is None:
options['python_eggs'] = posixpath.join(options['server_root'],
'python-eggs')
try:
os.mkdir(options['python_eggs'])
if os.name != 'nt' and os.getuid() == 0:
import pwd
import grp
os.chown(options['python_eggs'],
pwd.getpwnam(options['user']).pw_uid,
grp.getgrnam(options['group']).gr_gid)
except Exception:
pass
if options['python_paths'] is None:
options['python_paths'] = []
if options['debug_mode'] or options['embedded_mode']:
if options['working_directory'] not in options['python_paths']:
options['python_paths'].insert(0, options['working_directory'])
if options['debug_mode']:
options['server_mpm_variables'] = ['worker', 'prefork']
elif options['embedded_mode']:
if not options['server_mpm_variables']:
options['server_mpm_variables'] = ['worker', 'prefork']
# Special case to check for when being executed from shiv variant
# of a zipapp application bundle. We need to work out where the
# site packages directory is and pass it with Python module search
# path so is known about by the Apache sub process when executed.
site_packages = []
if '_bootstrap' in sys.modules:
bootstrap = sys.modules['_bootstrap']
if 'bootstrap' in dir(bootstrap):
frame = inspect.currentframe()
while frame is not None:
code = frame.f_code
if (code and code.co_filename == bootstrap.__file__ and
code.co_name == 'bootstrap' and
'site_packages' in frame.f_locals):
site_packages.append(str(frame.f_locals['site_packages']))
break
frame = frame.f_back
options['python_paths'].extend(site_packages)
options['python_path'] = ':'.join(options['python_paths'])
options['multiprocess'] = options['processes'] is not None
options['processes'] = options['processes'] or 1
options['python_home'] = sys.prefix.replace('\\','/')
options['keep_alive'] = options['keep_alive_timeout'] != 0
request_read_timeout = ''
if options['header_timeout'] > 0:
request_read_timeout += 'header=%d' % options['header_timeout']
if options['header_max_timeout'] > 0:
request_read_timeout += '-%d' % options['header_max_timeout']
if options['header_min_rate'] > 0:
request_read_timeout += ',MinRate=%d' % options['header_min_rate']
if options['body_timeout'] > 0:
request_read_timeout += ' body=%d' % options['body_timeout']
if options['body_max_timeout'] > 0:
request_read_timeout += '-%d' % options['body_max_timeout']
if options['body_min_rate'] > 0:
request_read_timeout += ',MinRate=%d' % options['body_min_rate']
options['request_read_timeout'] = request_read_timeout
if options['server_metrics']:
options['server_metrics_flag'] = 'On'
else:
options['server_metrics_flag'] = 'Off'
if options['handler_scripts']:
handler_scripts = []
for extension, script in options['handler_scripts']:
if not os.path.isabs(script):
script = posixpath.abspath(script)
handler_scripts.append((extension, script))
options['handler_scripts'] = handler_scripts
if options['newrelic_config_file']:
options['newrelic_config_file'] = posixpath.abspath(
options['newrelic_config_file'])
if options['with_newrelic']:
options['with_newrelic_agent'] = True
options['with_newrelic_platform'] = True
if options['with_newrelic_platform']:
options['server_metrics'] = True
if options['service_scripts']:
service_scripts = []
for name, script in options['service_scripts']:
if not os.path.isabs(script):
script = posixpath.abspath(script)
service_scripts.append((name, script))
options['service_scripts'] = service_scripts
# Node that all the below calculations are overridden if are using
# embedded mode.
max_clients = options['processes'] * options['threads']
if options['max_clients'] is not None:
max_clients = max(options['max_clients'], max_clients)
else:
max_clients = 10 + max(10, int(1.5 * max_clients))
initial_workers = options['initial_workers']
min_spare_workers = options['minimum_spare_workers']
max_spare_workers = options['maximum_spare_workers']
if initial_workers is None:
prefork_initial_workers = 0.05
else:
prefork_initial_workers = initial_workers
if min_spare_workers is None:
prefork_min_spare_workers = prefork_initial_workers
else:
prefork_min_spare_workers = min_spare_workers
if max_spare_workers is None:
prefork_max_spare_workers = 0.1
else:
prefork_max_spare_workers = max_spare_workers
options['prefork_max_clients'] = max_clients
options['prefork_server_limit'] = max_clients
options['prefork_start_servers'] = max(1, int(
prefork_initial_workers * max_clients))
options['prefork_min_spare_servers'] = max(1, int(
prefork_min_spare_workers * max_clients))
options['prefork_max_spare_servers'] = max(1, int(
prefork_max_spare_workers * max_clients))
if initial_workers is None:
worker_initial_workers = 0.2
else:
worker_initial_workers = initial_workers
if min_spare_workers is None:
worker_min_spare_workers = worker_initial_workers
else:
worker_min_spare_workers = min_spare_workers
if max_spare_workers is None:
worker_max_spare_workers = 0.6
else:
worker_max_spare_workers = max_spare_workers
options['worker_max_clients'] = max_clients
if max_clients > 20:
options['worker_threads_per_child'] = int(max_clients /
(int(max_clients / 20) + 1))
else:
options['worker_threads_per_child'] = 10
options['worker_thread_limit'] = options['worker_threads_per_child']
count = max_clients / options['worker_threads_per_child']
options['worker_server_limit'] = int(math.floor(count))
if options['worker_server_limit'] != count:
options['worker_server_limit'] += 1
options['worker_max_clients'] = (options['worker_server_limit'] *
options['worker_threads_per_child'])
options['worker_start_servers'] = max(1,
int(worker_initial_workers * options['worker_server_limit']))
options['worker_min_spare_threads'] = max(
options['worker_threads_per_child'],
int(worker_min_spare_workers * options['worker_server_limit']) *
options['worker_threads_per_child'])
options['worker_max_spare_threads'] = max(
options['worker_threads_per_child'],
int(worker_max_spare_workers * options['worker_server_limit']) *
options['worker_threads_per_child'])
if options['embedded_mode']:
max_clients = options['processes'] * options['threads']
options['prefork_max_clients'] = max_clients
options['prefork_server_limit'] = max_clients
options['prefork_start_servers'] = max_clients
options['prefork_min_spare_servers'] = max_clients
options['prefork_max_spare_servers'] = max_clients
options['worker_max_clients'] = max_clients
options['worker_server_limit'] = options['processes']
options['worker_thread_limit'] = options['threads']
options['worker_threads_per_child'] = options['threads']
options['worker_start_servers'] = options['processes']
options['worker_min_spare_threads'] = max_clients
options['worker_max_spare_threads'] = max_clients
options['httpd_conf'] = posixpath.join(options['server_root'], 'httpd.conf')
options['httpd_executable'] = os.environ.get('HTTPD',
options['httpd_executable'])
if os.name != 'nt':
if not os.path.isabs(options['httpd_executable']):
options['httpd_executable'] = find_program(
[options['httpd_executable']], 'httpd', ['/usr/sbin'])
if not options['process_name']:
options['process_name'] = posixpath.basename(
options['httpd_executable']) + ' (mod_wsgi-express)'
options['process_name'] = options['process_name'].ljust(
len(options['daemon_name']))
options['rewrite_rules'] = (posixpath.abspath(
options['rewrite_rules']) if options['rewrite_rules'] is
not None else None)
options['envvars_script'] = (posixpath.abspath(
options['envvars_script']) if options['envvars_script'] is
not None else None)
if options['locale'] is None:
options['locale'] = options['lang']
if options['locale'] is None:
language, encoding = locale.getdefaultlocale()
if language is None:
language = 'C'
if encoding is None:
options['locale'] = locale.normalize(language)
else:
options['locale'] = locale.normalize(language + '.' + encoding)
if options['locale'].upper() in ('C', 'POSIX'):
oldlocale = locale.setlocale(locale.LC_ALL)
try:
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
options['locale'] = 'en_US.UTF-8'
except locale.Error:
try:
locale.setlocale(locale.LC_ALL, 'C.UTF-8')
options['locale'] = 'C.UTF-8'
except locale.Error:
pass
locale.setlocale(locale.LC_ALL, oldlocale)
options['lang'] = options['locale']
options['httpd_arguments_list'] = []
options['trusted_proxy_headers'] = ' '.join(
options['trusted_proxy_headers'])
options['trusted_proxies'] = ' '.join(options['trusted_proxies'])
if options['startup_log']:
if not options['log_to_terminal']:
options['startup_log_file'] = posixpath.join(
options['log_directory'], options['startup_log_name'])
else:
if os.name == 'nt':
options['startup_log_file'] = 'CON'
else:
try:
with open('/dev/stderr', 'w'):
pass
except IOError:
try:
with open('/dev/tty', 'w'):
pass
except IOError:
options['startup_log_file'] = None
else:
options['startup_log_file'] = '/dev/tty'
else:
options['startup_log_file'] = '/dev/stderr'
if options['startup_log_file']:
options['httpd_arguments_list'].append('-E')
options['httpd_arguments_list'].append(options['startup_log_file'])
if options['verbose_debugging']:
options['verbose_debugging_flag'] = 'On'
else:
options['verbose_debugging_flag'] = 'Off'
if options['server_name']:
host = options['server_name']
else:
host = options['host']
options['server_host'] = host
if options['port'] == 80:
options['url'] = 'http://%s/' % host
else:
options['url'] = 'http://%s:%s/' % (host, options['port'])
if options['https_port'] == 443:
options['https_url'] = 'https://%s/' % host
elif options['https_port'] is not None:
options['https_url'] = 'https://%s:%s/' % (host, options['https_port'])
else:
options['https_url'] = None
if options['embedded_mode']:
options['httpd_arguments_list'].append('-DEMBEDDED_MODE')
options['disable_reloading'] = True
if any((options['enable_debugger'], options['enable_coverage'],
options['enable_profiler'], options['enable_recorder'],
options['enable_gdb'])):
options['debug_mode'] = True
if options['debug_mode']:
options['httpd_arguments_list'].append('-DONE_PROCESS')
if options['debug_mode']:
if options['enable_coverage']:
if not options['coverage_directory']:
options['coverage_directory'] = posixpath.join(
options['server_root'], 'htmlcov')
else:
options['coverage_directory'] = posixpath.abspath(
options['coverage_directory'])
try:
os.mkdir(options['coverage_directory'])
except Exception:
pass
if options['enable_profiler']:
if not options['profiler_directory']:
options['profiler_directory'] = posixpath.join(
options['server_root'], 'pstats')
else:
options['profiler_directory'] = posixpath.abspath(
options['profiler_directory'])
try:
os.mkdir(options['profiler_directory'])
except Exception:
pass
if options['enable_recorder']:
if not options['recorder_directory']:
options['recorder_directory'] = posixpath.join(
options['server_root'], 'archive')
else:
options['recorder_directory'] = posixpath.abspath(
options['recorder_directory'])
try:
os.mkdir(options['recorder_directory'])
except Exception:
pass
else:
options['enable_debugger'] = False
options['enable_coverage'] = False
options['enable_profiler'] = False
options['enable_recorder'] = False
options['enable_gdb'] = False
options['parent_domain'] = 'unspecified'
if options['server_name']:
options['httpd_arguments_list'].append('-DMOD_WSGI_VIRTUAL_HOST')
if options['server_name'].lower().startswith('www.'):
options['httpd_arguments_list'].append('-DMOD_WSGI_REDIRECT_WWW')
options['parent_domain'] = options['server_name'][4:]
if options['http2']:
options['httpd_arguments_list'].append('-DMOD_WSGI_WITH_HTTP2')
if (options['https_port'] and options['ssl_certificate_file'] and
options['ssl_certificate_key_file']):
options['httpd_arguments_list'].append('-DMOD_WSGI_WITH_HTTPS')
if options['ssl_ca_certificate_file']:
options['httpd_arguments_list'].append('-DMOD_WSGI_VERIFY_CLIENT')
if options['ssl_certificate_chain_file']:
options['httpd_arguments_list'].append('-DMOD_WSGI_CERTIFICATE_CHAIN')
if options['ssl_environment']:
options['httpd_arguments_list'].append('-DMOD_WSGI_SSL_ENVIRONMENT')
if options['https_only']:
options['httpd_arguments_list'].append('-DMOD_WSGI_HTTPS_ONLY')
if options['hsts_policy']:
options['httpd_arguments_list'].append('-DMOD_WSGI_HSTS_POLICY')
if options['server_aliases']:
options['httpd_arguments_list'].append('-DMOD_WSGI_SERVER_ALIAS')
options['server_aliases'] = ' '.join(options['server_aliases'])
if options['allow_localhost']:
options['httpd_arguments_list'].append('-DMOD_WSGI_ALLOW_LOCALHOST')
if options['application_type'] == 'static':
options['httpd_arguments_list'].append('-DMOD_WSGI_STATIC_ONLY')
if options['enable_sendfile']:
options['httpd_arguments_list'].append('-DMOD_WSGI_ENABLE_SENDFILE')
if options['server_metrics']:
options['httpd_arguments_list'].append('-DMOD_WSGI_SERVER_METRICS')
if options['server_status']:
options['httpd_arguments_list'].append('-DMOD_WSGI_SERVER_METRICS')
options['httpd_arguments_list'].append('-DMOD_WSGI_SERVER_STATUS')
if options['directory_index']:
options['httpd_arguments_list'].append('-DMOD_WSGI_DIRECTORY_INDEX')
if options['directory_listing']:
options['httpd_arguments_list'].append('-DMOD_WSGI_DIRECTORY_LISTING')
if options['error_log_format']:
options['httpd_arguments_list'].append('-DMOD_WSGI_ERROR_LOG_FORMAT')
if options['access_log']:
options['httpd_arguments_list'].append('-DMOD_WSGI_ACCESS_LOG')
if options['rotate_logs']:
options['httpd_arguments_list'].append('-DMOD_WSGI_ROTATE_LOGS')
if options['keep_alive'] != 0:
options['httpd_arguments_list'].append('-DMOD_WSGI_KEEP_ALIVE')
if options['compress_responses'] != 0:
options['httpd_arguments_list'].append('-DMOD_WSGI_COMPRESS_RESPONSES')
if options['multiprocess']:
options['httpd_arguments_list'].append('-DMOD_WSGI_MULTIPROCESS')
if options['listener_host']:
options['httpd_arguments_list'].append('-DMOD_WSGI_WITH_LISTENER_HOST')
if options['error_override']:
options['httpd_arguments_list'].append('-DMOD_WSGI_ERROR_OVERRIDE')
if options['host_access_script']:
options['httpd_arguments_list'].append('-DMOD_WSGI_HOST_ACCESS')
if options['auth_user_script']:
options['httpd_arguments_list'].append('-DMOD_WSGI_AUTH_USER')
if options['auth_group_script']:
options['httpd_arguments_list'].append('-DMOD_WSGI_AUTH_GROUP')
if options['chunked_request']:
options['httpd_arguments_list'].append('-DMOD_WSGI_CHUNKED_REQUEST')
if options['with_php5']:
options['httpd_arguments_list'].append('-DMOD_WSGI_WITH_PHP5')
if options['proxy_mount_points'] or options['proxy_virtual_hosts']:
options['httpd_arguments_list'].append('-DMOD_WSGI_WITH_PROXY')
if options['trusted_proxy_headers']:
options['httpd_arguments_list'].append('-DMOD_WSGI_WITH_PROXY_HEADERS')
if options['trusted_proxies']:
options['httpd_arguments_list'].append('-DMOD_WSGI_WITH_TRUSTED_PROXIES')
if options['python_path']:
options['httpd_arguments_list'].append('-DMOD_WSGI_WITH_PYTHON_PATH')
if options['socket_prefix']:
options['httpd_arguments_list'].append('-DMOD_WSGI_WITH_SOCKET_PREFIX')
if options['disable_reloading']:
options['httpd_arguments_list'].append('-DMOD_WSGI_DISABLE_RELOADING')
if options['with_cgi']:
if os.path.exists(posixpath.join(options['modules_directory'],
'mod_cgid.so')):
options['httpd_arguments_list'].append('-DMOD_WSGI_CGID_SCRIPT')
else:
options['httpd_arguments_list'].append('-DMOD_WSGI_CGI_SCRIPT')
options['httpd_arguments_list'].extend(
_mpm_module_defines(options['modules_directory'],
options['server_mpm_variables']))
options['python_executable'] = sys.executable
options['shlibpath_var'] = apxs_config.SHLIBPATH_VAR
options['shlibpath'] = apxs_config.SHLIBPATH
if _py_dylib:
options['httpd_arguments_list'].append('-DMOD_WSGI_LOAD_PYTHON_DYLIB')
options['python_dylib'] = _py_dylib
options['httpd_arguments'] = '-f %s %s' % (options['httpd_conf'],
' '.join(options['httpd_arguments_list']))
generate_wsgi_handler_script(options)
if options['with_newrelic_platform']:
generate_server_metrics_script(options)
print('Server URL :', options['url'])
if options['https_url']:
print('Server URL (HTTPS) :', options['https_url'])
if options['server_status']:
print('Server Status :', '%sserver-status' % options['url'])
print('Server Root :', options['server_root'])
print('Server Conf :', options['httpd_conf'])
print('Error Log File : %s (%s)' % (options['error_log_file'],
options['log_level']))
if options['access_log']:
print('Access Log File :', options['access_log_file'])
if options['startup_log']:
print('Startup Log File :', options['startup_log_file'])
if options['enable_coverage']:
print('Coverage Output :', posixpath.join(
options['coverage_directory'], 'index.html'))
if options['enable_profiler']:
print('Profiler Output :', options['profiler_directory'])
if options['enable_recorder']:
print('Recorder Output :', options['recorder_directory'])
if options['rewrite_rules']:
print('Rewrite Rules :', options['rewrite_rules'])
if os.name != 'nt':
if options['envvars_script']:
print('Environ Variables :', options['envvars_script'])
if command == 'setup-server' or options['setup_only']:
if not options['rewrite_rules']:
print('Rewrite Rules :', options['server_root'] + '/rewrite.conf')
if os.name != 'nt':
if not options['envvars_script']:
print('Environ Variables :', options['server_root'] + '/envvars')
print('Control Script :', options['server_root'] + '/apachectl')
if options['debug_mode']:
print('Operating Mode : debug')
elif options['embedded_mode']:
print('Operating Mode : embedded')
else:
print('Operating Mode : daemon')
if options['processes'] == 1:
print('Request Capacity : %s (%s process * %s threads)' % (
options['processes']*options['threads'],
options['processes'], options['threads']))
else:
print('Request Capacity : %s (%s processes * %s threads)' % (
options['processes']*options['threads'],
options['processes'], options['threads']))
if not options['debug_mode'] and not options['embedded_mode']:
print('Request Timeout : %s (seconds)' % options['request_timeout'])
if options['startup_timeout']:
print('Startup Timeout : %s (seconds)' % options['startup_timeout'])
print('Queue Backlog : %s (connections)' % options['daemon_backlog'])
print('Queue Timeout : %s (seconds)' % options['queue_timeout'])
print('Server Capacity : %s (event/worker), %s (prefork)' % (
options['worker_max_clients'], options['prefork_max_clients']))
print('Server Backlog : %s (connections)' % options['server_backlog'])
print('Locale Setting :', options['locale'])
sys.stdout.flush()
if not options['rewrite_rules']:
options['rewrite_rules'] = options['server_root'] + '/rewrite.conf'
if not os.path.isfile(options['rewrite_rules']):
with open(options['rewrite_rules'], 'w') as fp:
pass
generate_apache_config(options)
if os.name != 'nt':
generate_control_scripts(options)
return options
def cmd_start_server(params):
formatter = optparse.IndentedHelpFormatter()
formatter.set_long_opt_delimiter(' ')
usage = '%prog start-server script [options]'
parser = optparse.OptionParser(usage=usage, option_list=option_list,
formatter=formatter)
(options, args) = parser.parse_args(params)
config = _cmd_setup_server('start-server', args, vars(options))
if config['setup_only']:
return
if os.name == 'nt':
executable = config['httpd_executable']
environ = copy.deepcopy(os.environ)
environ['MOD_WSGI_MODULES_DIRECTORY'] = config['modules_directory']
httpd_arguments = list(config['httpd_arguments_list'])
httpd_arguments.extend(['-f', config['httpd_conf']])
httpd_arguments.extend(['-DONE_PROCESS'])
os.environ['MOD_WSGI_MODULES_DIRECTORY'] = config['modules_directory']
subprocess.call([executable]+httpd_arguments)
sys.exit(0)
else:
executable = posixpath.join(config['server_root'], 'apachectl')
if sys.stdout.isatty():
process = None
def handler(signum, frame):
if process is None:
sys.exit(1)
else:
if signum not in [signal.SIGWINCH]:
os.kill(process.pid, signum)
signal.signal(signal.SIGINT, handler)
signal.signal(signal.SIGTERM, handler)
signal.signal(signal.SIGHUP, handler)
signal.signal(signal.SIGUSR1, handler)
signal.signal(signal.SIGWINCH, handler)
process = subprocess.Popen([executable, 'start', '-DFOREGROUND'],
preexec_fn=os.setpgrp)
process.wait()
else:
os.execl(executable, executable, 'start', '-DFOREGROUND')
def cmd_module_config(params):
formatter = optparse.IndentedHelpFormatter()
formatter.set_long_opt_delimiter(' ')
usage = '%prog module-config'
parser = optparse.OptionParser(usage=usage, formatter=formatter)
(options, args) = parser.parse_args(params)
if len(args) != 0:
parser.error('Incorrect number of arguments.')
if os.name == 'nt':
real_prefix = getattr(sys, 'real_prefix', None)
base_prefix = getattr(sys, 'base_prefix', None)
real_prefix = real_prefix or base_prefix or sys.prefix
library_version = sysconfig.get_config_var('VERSION')
library_name = 'python%s.dll' % library_version
library_path = posixpath.join(real_prefix, library_name)
if not os.path.exists(library_path):
library_name = 'python%s.dll' % library_version[0]
library_path = posixpath.join(real_prefix, 'DLLs', library_name)
if not os.path.exists(library_path):
library_path = None
if library_path:
library_path = posixpath.normpath(library_path)
library_path = library_path.replace('\\', '/')
print('LoadFile "%s"' % library_path)
module_path = where()
module_path = module_path.replace('\\', '/')
prefix = sys.prefix
prefix = posixpath.normpath(prefix)
prefix = prefix.replace('\\', '/')
print('LoadModule wsgi_module "%s"' % module_path)
print('WSGIPythonHome "%s"' % prefix)
else:
module_path = where()
prefix = sys.prefix
prefix = posixpath.normpath(prefix)
if _py_dylib:
print('LoadFile "%s"' % _py_dylib)
print('LoadModule wsgi_module "%s"' % module_path)
print('WSGIPythonHome "%s"' % prefix)
def cmd_install_module(params):
formatter = optparse.IndentedHelpFormatter()
formatter.set_long_opt_delimiter(' ')
usage = '%prog install-module [options]'
parser = optparse.OptionParser(usage=usage, formatter=formatter)
parser.add_option('--modules-directory', metavar='DIRECTORY',
default=apxs_config.LIBEXECDIR)
(options, args) = parser.parse_args(params)
if len(args) != 0:
parser.error('Incorrect number of arguments.')
target = posixpath.abspath(posixpath.join(options.modules_directory,
posixpath.basename(MOD_WSGI_SO)))
shutil.copyfile(where(), target)
if _py_dylib:
print('LoadFile "%s"' % _py_dylib)
print('LoadModule wsgi_module "%s"' % target)
print('WSGIPythonHome "%s"' % posixpath.normpath(sys.prefix))
def cmd_module_location(params):
formatter = optparse.IndentedHelpFormatter()
formatter.set_long_opt_delimiter(' ')
usage = '%prog module-location'
parser = optparse.OptionParser(usage=usage, formatter=formatter)
(options, args) = parser.parse_args(params)
if len(args) != 0:
parser.error('Incorrect number of arguments.')
print(where())
if os.name == 'nt':
main_usage="""
%prog command [params]
Commands:
module-config
module-location
"""
else:
main_usage="""
%prog command [params]
Commands:
install-module
module-config
module-location
setup-server
start-server
"""
def main():
parser = optparse.OptionParser(main_usage.strip())
args = sys.argv[1:]
if not args:
parser.error('No command was specified.')
command = args.pop(0)
args = [os.path.expandvars(arg) for arg in args]
if os.name == 'nt':
if command == 'module-config':
cmd_module_config(args)
elif command == 'module-location':
cmd_module_location(args)
elif command == 'x-start-server':
cmd_start_server(args)
else:
parser.error('Invalid command was specified.')
else:
if command == 'install-module':
cmd_install_module(args)
elif command == 'module-config':
cmd_module_config(args)
elif command == 'module-location':
cmd_module_location(args)
elif command == 'setup-server':
cmd_setup_server(args)
elif command == 'start-server':
cmd_start_server(args)
else:
parser.error('Invalid command was specified.')
def start(*args):
cmd_start_server(list(args))
if __name__ == '__main__':
main()
|
speedtest_server.py
|
import socket
import time
import json
import threading
import sys
from queue import Queue
import struct
MAX_PACKET_LOSS = .15
def max_packet_loss(pl_dict):
max_p_loss = 0
for key in pl_dict:
if pl_dict[key]['packet_loss'] > max_p_loss:
max_p_loss = pl_dict[key]['packet_loss']
return max_p_loss
#assume a socket disconnect (data returned is empty string) means all data was #done being sent.
def recv_basic(the_socket):
total_data=[]
while True:
data = the_socket.recv(8192)
if not data: break
total_data.append(data)
return ''.join(total_data)
def recv_timeout(the_socket, timeout=2):
# make socket non blocking
the_socket.setblocking(0)
# total data partwise in an array
total_data = [];
data = '';
# beginning time
begin = time.time()
while 1:
# if you got some data, then break after timeout
if total_data and time.time() - begin > timeout:
break
# if you got no data at all, wait a little longer, twice the timeout
elif time.time() - begin > timeout * 2:
break
# recv something
try:
data = the_socket.recv(8192)
if data:
total_data.append(data)
# change the beginning time for measurement
begin = time.time()
else:
# sleep for sometime to indicate a gap
time.sleep(0.1)
except:
pass
# join all parts to make final string
return ''.join(total_data)
End='\n'
def recv_end(the_socket):
total_data=[];data=''
while True:
data=the_socket.recv(8192)
data = data.decode('utf-8')
if End in data:
total_data.append(data[:data.find(End)])
break
total_data.append(data)
if len(total_data)>1:
#check if end_of_data was split
last_pair=total_data[-2]+total_data[-1]
if End in last_pair:
total_data[-2]=last_pair[:last_pair.find(End)]
total_data.pop()
break
return ''.join(total_data)
def recv_size(the_socket):
#data length is packed into 4 bytes
total_len=0;total_data=[];size=sys.maxint
size_data=sock_data='';recv_size=8192
while total_len<size:
sock_data=the_socket.recv(recv_size)
if not total_data:
if len(sock_data)>4:
size_data+=sock_data
size=struct.unpack('>i', size_data[:4])[0]
recv_size=size
if recv_size>524288:recv_size=524288
total_data.append(size_data[4:])
else:
size_data+=sock_data
else:
total_data.append(sock_data)
total_len=sum([len(i) for i in total_data ])
return ''.join(total_data)
def control_listener(d):
server_address = ('', 8802)
print("Listening of port:", server_address)
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(server_address)
# Listen for incoming connections
sock.listen(1)
while True:
# Wait for a connection
connection, client_address = sock.accept()
try:
# Receive the data in small chunks and retransmit it
while max_packet_loss(d)< MAX_PACKET_LOSS:
# Wait for a request for data
msg = recv_end(connection)
if msg == 'Get stats':
# Send the stats
try:
data = json.dumps(d)
except Exception as e:
print(e)
header_str = str(len(data)) + '\n'
connection.send(bytes(header_str,encoding='utf-8'))
connection.sendall(bytes(data,encoding='utf-8'))
if msg == "Done":
d.clear()
connection.close()
break
except Exception as e:
print(e)
print("Resetting the stats")
d.clear() # Clear the dictionary
finally:
# Clean up the connection
connection.close()
def data_listener(pl_dict):
hostname = socket.gethostname()
# ip = socket.gethostbyname(hostname)
ip = ''
port = 8801
s= socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_address = (ip,port)
s.bind(server_address)
last_counter = dict()
packet_count = dict()
packets_missed = dict()
print("### Server is listening for ", server_address)
while True:
data, address = s.recvfrom(4096)
recv_ip = address[0]
port = address[1]
data_list = data.decode().split(" ")
test = data_list[0]
if test == 'Data':
counter = int(data_list[1])
if port not in last_counter:
last_counter[port] = counter
packet_count[port] = 1
packets_missed[port] = 0
elif counter == last_counter[port]+1:
packet_count[port] += 1
last_counter[port] = counter
else:
packets_missed[port] += counter - last_counter[port]
last_counter[port] = counter
pl_dict[port] = {
'packet_count': packet_count[port],
'packets_missed': packets_missed[port],
'recv_ip': recv_ip,
'recv_port': port,
'packet_loss': (packets_missed[port] / packet_count[port]),
'last_counter': last_counter[port]
}
def main(argv):
pdict = {}
#pdict[0] = {
# 'packet_count': 0,
# 'packets_missed': 0,
# 'recv_ip': 0,
# 'recv_port': 0,
# 'packet_loss': 0
#}
#control_listener(pl_dict=pdict)
t = threading.Thread(target=control_listener, args=(pdict,))
t.start()
data_listener(pdict)
t.join()
if __name__ == "__main__":
main(sys.argv[1:])
|
application.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorBoard WSGI Application Logic.
Provides TensorBoardWSGIApp for building a TensorBoard WSGI app.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import atexit
import collections
import contextlib
import json
import os
import re
import shutil
import sqlite3
import tempfile
import threading
import time
import six
from six.moves.urllib import parse as urlparse # pylint: disable=wrong-import-order
from werkzeug import wrappers
from tensorboard import errors
from tensorboard.backend import empty_path_redirect
from tensorboard.backend import http_util
from tensorboard.backend import path_prefix
from tensorboard.backend.event_processing import db_import_multiplexer
from tensorboard.backend.event_processing import data_provider as event_data_provider # pylint: disable=line-too-long
from tensorboard.backend.event_processing import plugin_event_accumulator as event_accumulator # pylint: disable=line-too-long
from tensorboard.backend.event_processing import plugin_event_multiplexer as event_multiplexer # pylint: disable=line-too-long
from tensorboard.plugins import base_plugin
from tensorboard.plugins.audio import metadata as audio_metadata
from tensorboard.plugins.core import core_plugin
from tensorboard.plugins.histogram import metadata as histogram_metadata
from tensorboard.plugins.image import metadata as image_metadata
from tensorboard.plugins.pr_curve import metadata as pr_curve_metadata
from tensorboard.plugins.scalar import metadata as scalar_metadata
from tensorboard.util import tb_logging
DEFAULT_SIZE_GUIDANCE = {
event_accumulator.TENSORS: 10,
}
# TODO(@wchargin): Once SQL mode is in play, replace this with an
# alternative that does not privilege first-party plugins.
DEFAULT_TENSOR_SIZE_GUIDANCE = {
scalar_metadata.PLUGIN_NAME: 1000,
image_metadata.PLUGIN_NAME: 10,
audio_metadata.PLUGIN_NAME: 10,
histogram_metadata.PLUGIN_NAME: 500,
pr_curve_metadata.PLUGIN_NAME: 100,
}
DATA_PREFIX = '/data'
PLUGIN_PREFIX = '/plugin'
PLUGINS_LISTING_ROUTE = '/plugins_listing'
# Slashes in a plugin name could throw the router for a loop. An empty
# name would be confusing, too. To be safe, let's restrict the valid
# names as follows.
_VALID_PLUGIN_RE = re.compile(r'^[A-Za-z0-9_.-]+$')
logger = tb_logging.get_logger()
def tensor_size_guidance_from_flags(flags):
"""Apply user per-summary size guidance overrides."""
tensor_size_guidance = dict(DEFAULT_TENSOR_SIZE_GUIDANCE)
if not flags or not flags.samples_per_plugin:
return tensor_size_guidance
for token in flags.samples_per_plugin.split(','):
k, v = token.strip().split('=')
tensor_size_guidance[k] = int(v)
return tensor_size_guidance
def standard_tensorboard_wsgi(flags, plugin_loaders, assets_zip_provider):
"""Construct a TensorBoardWSGIApp with standard plugins and multiplexer.
Args:
flags: An argparse.Namespace containing TensorBoard CLI flags.
plugin_loaders: A list of TBLoader instances.
assets_zip_provider: See TBContext documentation for more information.
Returns:
The new TensorBoard WSGI application.
:type plugin_loaders: list[base_plugin.TBLoader]
:rtype: TensorBoardWSGI
"""
data_provider = None
multiplexer = None
reload_interval = flags.reload_interval
if flags.db_import:
# DB import mode.
db_uri = flags.db
# Create a temporary DB file if we weren't given one.
if not db_uri:
tmpdir = tempfile.mkdtemp(prefix='tbimport')
atexit.register(shutil.rmtree, tmpdir)
db_uri = 'sqlite:%s/tmp.sqlite' % tmpdir
db_connection_provider = create_sqlite_connection_provider(db_uri)
logger.info('Importing logdir into DB at %s', db_uri)
multiplexer = db_import_multiplexer.DbImportMultiplexer(
db_uri=db_uri,
db_connection_provider=db_connection_provider,
purge_orphaned_data=flags.purge_orphaned_data,
max_reload_threads=flags.max_reload_threads)
elif flags.db:
# DB read-only mode, never load event logs.
reload_interval = -1
db_connection_provider = create_sqlite_connection_provider(flags.db)
multiplexer = _DbModeMultiplexer(flags.db, db_connection_provider)
else:
# Regular logdir loading mode.
multiplexer = event_multiplexer.EventMultiplexer(
size_guidance=DEFAULT_SIZE_GUIDANCE,
tensor_size_guidance=tensor_size_guidance_from_flags(flags),
purge_orphaned_data=flags.purge_orphaned_data,
max_reload_threads=flags.max_reload_threads,
event_file_active_filter=_get_event_file_active_filter(flags))
if flags.generic_data != 'false':
data_provider = event_data_provider.MultiplexerDataProvider(
multiplexer, flags.logdir or flags.logdir_spec
)
if reload_interval >= 0:
# We either reload the multiplexer once when TensorBoard starts up, or we
# continuously reload the multiplexer.
if flags.logdir:
path_to_run = {os.path.expanduser(flags.logdir): None}
else:
path_to_run = parse_event_files_spec(flags.logdir_spec)
start_reloading_multiplexer(
multiplexer, path_to_run, reload_interval, flags.reload_task)
return TensorBoardWSGIApp(
flags, plugin_loaders, data_provider, assets_zip_provider, multiplexer)
def _handling_errors(wsgi_app):
def wrapper(*args):
(environ, start_response) = (args[-2], args[-1])
try:
return wsgi_app(*args)
except errors.PublicError as e:
request = wrappers.Request(environ)
error_app = http_util.Respond(
request, str(e), "text/plain", code=e.http_code
)
return error_app(environ, start_response)
# Let other exceptions be handled by the server, as an opaque
# internal server error.
return wrapper
def TensorBoardWSGIApp(
flags,
plugins,
data_provider=None,
assets_zip_provider=None,
deprecated_multiplexer=None):
"""Constructs a TensorBoard WSGI app from plugins and data providers.
Args:
flags: An argparse.Namespace containing TensorBoard CLI flags.
plugins: A list of plugins, which can be provided as TBPlugin subclasses
or TBLoader instances or subclasses.
assets_zip_provider: See TBContext documentation for more information.
data_provider: Instance of `tensorboard.data.provider.DataProvider`. May
be `None` if `flags.generic_data` is set to `"false"` in which case
`deprecated_multiplexer` must be passed instead.
deprecated_multiplexer: Optional `plugin_event_multiplexer.EventMultiplexer`
to use for any plugins not yet enabled for the DataProvider API.
Required if the data_provider argument is not passed.
Returns:
A WSGI application that implements the TensorBoard backend.
"""
db_uri = None
db_connection_provider = None
if isinstance(
deprecated_multiplexer,
(db_import_multiplexer.DbImportMultiplexer, _DbModeMultiplexer)):
db_uri = deprecated_multiplexer.db_uri
db_connection_provider = deprecated_multiplexer.db_connection_provider
plugin_name_to_instance = {}
context = base_plugin.TBContext(
data_provider=data_provider,
db_connection_provider=db_connection_provider,
db_uri=db_uri,
flags=flags,
logdir=flags.logdir,
multiplexer=deprecated_multiplexer,
assets_zip_provider=assets_zip_provider,
plugin_name_to_instance=plugin_name_to_instance,
window_title=flags.window_title)
tbplugins = []
for plugin_spec in plugins:
loader = make_plugin_loader(plugin_spec)
plugin = loader.load(context)
if plugin is None:
continue
tbplugins.append(plugin)
plugin_name_to_instance[plugin.plugin_name] = plugin
return TensorBoardWSGI(tbplugins, flags.path_prefix)
class TensorBoardWSGI(object):
"""The TensorBoard WSGI app that delegates to a set of TBPlugin."""
def __init__(self, plugins, path_prefix=''):
"""Constructs TensorBoardWSGI instance.
Args:
plugins: A list of base_plugin.TBPlugin subclass instances.
flags: An argparse.Namespace containing TensorBoard CLI flags.
Returns:
A WSGI application for the set of all TBPlugin instances.
Raises:
ValueError: If some plugin has no plugin_name
ValueError: If some plugin has an invalid plugin_name (plugin
names must only contain [A-Za-z0-9_.-])
ValueError: If two plugins have the same plugin_name
ValueError: If some plugin handles a route that does not start
with a slash
:type plugins: list[base_plugin.TBPlugin]
"""
self._plugins = plugins
if path_prefix.endswith('/'):
self._path_prefix = path_prefix[:-1]
else:
self._path_prefix = path_prefix
self.exact_routes = {
# TODO(@chihuahua): Delete this RPC once we have skylark rules that
# obviate the need for the frontend to determine which plugins are
# active.
DATA_PREFIX + PLUGINS_LISTING_ROUTE: self._serve_plugins_listing,
}
unordered_prefix_routes = {}
# Serve the routes from the registered plugins using their name as the route
# prefix. For example if plugin z has two routes /a and /b, they will be
# served as /data/plugin/z/a and /data/plugin/z/b.
plugin_names_encountered = set()
for plugin in self._plugins:
if plugin.plugin_name is None:
raise ValueError('Plugin %s has no plugin_name' % plugin)
if not _VALID_PLUGIN_RE.match(plugin.plugin_name):
raise ValueError('Plugin %s has invalid name %r' % (plugin,
plugin.plugin_name))
if plugin.plugin_name in plugin_names_encountered:
raise ValueError('Duplicate plugins for name %s' % plugin.plugin_name)
plugin_names_encountered.add(plugin.plugin_name)
try:
plugin_apps = plugin.get_plugin_apps()
except Exception as e: # pylint: disable=broad-except
if type(plugin) is core_plugin.CorePlugin: # pylint: disable=unidiomatic-typecheck
raise
logger.warn('Plugin %s failed. Exception: %s',
plugin.plugin_name, str(e))
continue
for route, app in plugin_apps.items():
if not route.startswith('/'):
raise ValueError('Plugin named %r handles invalid route %r: '
'route does not start with a slash' %
(plugin.plugin_name, route))
if type(plugin) is core_plugin.CorePlugin: # pylint: disable=unidiomatic-typecheck
path = route
else:
path = (
DATA_PREFIX + PLUGIN_PREFIX + '/' + plugin.plugin_name + route
)
if path.endswith('/*'):
# Note we remove the '*' but leave the slash in place.
path = path[:-1]
if '*' in path:
# note we re-add the removed * in the format string
raise ValueError('Plugin %r handles invalid route \'%s*\': Only '
'trailing wildcards are supported '
'(i.e., `/.../*`)' %
(plugin.plugin_name, path))
unordered_prefix_routes[path] = app
else:
if '*' in path:
raise ValueError('Plugin %r handles invalid route %r: Only '
'trailing wildcards are supported '
'(i.e., `/.../*`)' %
(plugin.plugin_name, path))
self.exact_routes[path] = app
# Wildcard routes will be checked in the given order, so we sort them
# longest to shortest so that a more specific route will take precedence
# over a more general one (e.g., a catchall route `/*` should come last).
self.prefix_routes = collections.OrderedDict(
sorted(
six.iteritems(unordered_prefix_routes),
key=lambda x: len(x[0]),
reverse=True))
self._app = self._create_wsgi_app()
def _create_wsgi_app(self):
"""Apply middleware to create the final WSGI app."""
app = self._route_request
app = empty_path_redirect.EmptyPathRedirectMiddleware(app)
app = path_prefix.PathPrefixMiddleware(app, self._path_prefix)
app = _handling_errors(app)
return app
@wrappers.Request.application
def _serve_plugins_listing(self, request):
"""Serves an object mapping plugin name to whether it is enabled.
Args:
request: The werkzeug.Request object.
Returns:
A werkzeug.Response object.
"""
response = collections.OrderedDict()
for plugin in self._plugins:
if type(plugin) is core_plugin.CorePlugin: # pylint: disable=unidiomatic-typecheck
# This plugin's existence is a backend implementation detail.
continue
start = time.time()
is_active = plugin.is_active()
elapsed = time.time() - start
logger.info(
'Plugin listing: is_active() for %s took %0.3f seconds',
plugin.plugin_name, elapsed)
plugin_metadata = plugin.frontend_metadata()
output_metadata = {
'disable_reload': plugin_metadata.disable_reload,
'enabled': is_active,
# loading_mechanism set below
'remove_dom': plugin_metadata.remove_dom,
# tab_name set below
}
if plugin_metadata.tab_name is not None:
output_metadata['tab_name'] = plugin_metadata.tab_name
else:
output_metadata['tab_name'] = plugin.plugin_name
es_module_handler = plugin_metadata.es_module_path
element_name = plugin_metadata.element_name
if element_name is not None and es_module_handler is not None:
logger.error(
'Plugin %r declared as both legacy and iframed; skipping',
plugin.plugin_name,
)
continue
elif element_name is not None and es_module_handler is None:
loading_mechanism = {
'type': 'CUSTOM_ELEMENT',
'element_name': element_name,
}
elif element_name is None and es_module_handler is not None:
loading_mechanism = {
'type': 'IFRAME',
'module_path': ''.join([
request.script_root, DATA_PREFIX, PLUGIN_PREFIX, '/',
plugin.plugin_name, es_module_handler,
]),
}
else:
# As a compatibility measure (for plugins that we don't
# control), we'll pull it from the frontend registry for now.
loading_mechanism = {
'type': 'NONE',
}
output_metadata['loading_mechanism'] = loading_mechanism
response[plugin.plugin_name] = output_metadata
return http_util.Respond(request, response, 'application/json')
def __call__(self, environ, start_response):
"""Central entry point for the TensorBoard application.
This __call__ method conforms to the WSGI spec, so that instances of this
class are WSGI applications.
Args:
environ: See WSGI spec (PEP 3333).
start_response: See WSGI spec (PEP 3333).
"""
return self._app(environ, start_response)
def _route_request(self, environ, start_response):
"""Delegate an incoming request to sub-applications.
This method supports strict string matching and wildcard routes of a
single path component, such as `/foo/*`. Other routing patterns,
like regular expressions, are not supported.
This is the main TensorBoard entry point before middleware is
applied. (See `_create_wsgi_app`.)
Args:
environ: See WSGI spec (PEP 3333).
start_response: See WSGI spec (PEP 3333).
"""
request = wrappers.Request(environ)
parsed_url = urlparse.urlparse(request.path)
clean_path = _clean_path(parsed_url.path)
# pylint: disable=too-many-function-args
if clean_path in self.exact_routes:
return self.exact_routes[clean_path](environ, start_response)
else:
for path_prefix in self.prefix_routes:
if clean_path.startswith(path_prefix):
return self.prefix_routes[path_prefix](environ, start_response)
logger.warn('path %s not found, sending 404', clean_path)
return http_util.Respond(request, 'Not found', 'text/plain', code=404)(
environ, start_response)
# pylint: enable=too-many-function-args
def parse_event_files_spec(logdir_spec):
"""Parses `logdir_spec` into a map from paths to run group names.
The `--logdir_spec` flag format is a comma-separated list of path
specifications. A path spec looks like 'group_name:/path/to/directory' or
'/path/to/directory'; in the latter case, the group is unnamed. Group names
cannot start with a forward slash: /foo:bar/baz will be interpreted as a spec
with no name and path '/foo:bar/baz'.
Globs are not supported.
Args:
logdir: A comma-separated list of run specifications.
Returns:
A dict mapping directory paths to names like {'/path/to/directory': 'name'}.
Groups without an explicit name are named after their path. If logdir is
None, returns an empty dict, which is helpful for testing things that don't
require any valid runs.
"""
files = {}
if logdir_spec is None:
return files
# Make sure keeping consistent with ParseURI in core/lib/io/path.cc
uri_pattern = re.compile('[a-zA-Z][0-9a-zA-Z.]*://.*')
for specification in logdir_spec.split(','):
# Check if the spec contains group. A spec start with xyz:// is regarded as
# URI path spec instead of group spec. If the spec looks like /foo:bar/baz,
# then we assume it's a path with a colon. If the spec looks like
# [a-zA-z]:\foo then we assume its a Windows path and not a single letter
# group
if (uri_pattern.match(specification) is None and ':' in specification and
specification[0] != '/' and not os.path.splitdrive(specification)[0]):
# We split at most once so run_name:/path:with/a/colon will work.
run_name, _, path = specification.partition(':')
else:
run_name = None
path = specification
if uri_pattern.match(path) is None:
path = os.path.realpath(os.path.expanduser(path))
files[path] = run_name
return files
def start_reloading_multiplexer(multiplexer, path_to_run, load_interval,
reload_task):
"""Starts automatically reloading the given multiplexer.
If `load_interval` is positive, the thread will reload the multiplexer
by calling `ReloadMultiplexer` every `load_interval` seconds, starting
immediately. Otherwise, reloads the multiplexer once and never again.
Args:
multiplexer: The `EventMultiplexer` to add runs to and reload.
path_to_run: A dict mapping from paths to run names, where `None` as the run
name is interpreted as a run name equal to the path.
load_interval: An integer greater than or equal to 0. If positive, how many
seconds to wait after one load before starting the next load. Otherwise,
reloads the multiplexer once and never again (no continuous reloading).
reload_task: Indicates the type of background task to reload with.
Raises:
ValueError: If `load_interval` is negative.
"""
if load_interval < 0:
raise ValueError('load_interval is negative: %d' % load_interval)
def _reload():
while True:
start = time.time()
logger.info('TensorBoard reload process beginning')
for path, name in six.iteritems(path_to_run):
multiplexer.AddRunsFromDirectory(path, name)
logger.info('TensorBoard reload process: Reload the whole Multiplexer')
multiplexer.Reload()
duration = time.time() - start
logger.info('TensorBoard done reloading. Load took %0.3f secs', duration)
if load_interval == 0:
# Only load the multiplexer once. Do not continuously reload.
break
time.sleep(load_interval)
if reload_task == 'process':
logger.info('Launching reload in a child process')
import multiprocessing
process = multiprocessing.Process(target=_reload, name='Reloader')
# Best-effort cleanup; on exit, the main TB parent process will attempt to
# kill all its daemonic children.
process.daemon = True
process.start()
elif reload_task in ('thread', 'auto'):
logger.info('Launching reload in a daemon thread')
thread = threading.Thread(target=_reload, name='Reloader')
# Make this a daemon thread, which won't block TB from exiting.
thread.daemon = True
thread.start()
elif reload_task == 'blocking':
if load_interval != 0:
raise ValueError('blocking reload only allowed with load_interval=0')
_reload()
else:
raise ValueError('unrecognized reload_task: %s' % reload_task)
def create_sqlite_connection_provider(db_uri):
"""Returns function that returns SQLite Connection objects.
Args:
db_uri: A string URI expressing the DB file, e.g. "sqlite:~/tb.db".
Returns:
A function that returns a new PEP-249 DB Connection, which must be closed,
each time it is called.
Raises:
ValueError: If db_uri is not a valid sqlite file URI.
"""
uri = urlparse.urlparse(db_uri)
if uri.scheme != 'sqlite':
raise ValueError('Only sqlite DB URIs are supported: ' + db_uri)
if uri.netloc:
raise ValueError('Can not connect to SQLite over network: ' + db_uri)
if uri.path == ':memory:':
raise ValueError('Memory mode SQLite not supported: ' + db_uri)
path = os.path.expanduser(uri.path)
params = _get_connect_params(uri.query)
# TODO(@jart): Add thread-local pooling.
return lambda: sqlite3.connect(path, **params)
def _get_connect_params(query):
params = urlparse.parse_qs(query)
if any(len(v) > 2 for v in params.values()):
raise ValueError('DB URI params list has duplicate keys: ' + query)
return {k: json.loads(v[0]) for k, v in params.items()}
def _clean_path(path):
"""Removes a trailing slash from a non-root path.
Arguments:
path: The path of a request.
Returns:
The route to use to serve the request.
"""
if path != '/' and path.endswith('/'):
return path[:-1]
return path
def _get_event_file_active_filter(flags):
"""Returns a predicate for whether an event file load timestamp is active.
Returns:
A predicate function accepting a single UNIX timestamp float argument, or
None if multi-file loading is not enabled.
"""
if not flags.reload_multifile:
return None
inactive_secs = flags.reload_multifile_inactive_secs
if inactive_secs == 0:
return None
if inactive_secs < 0:
return lambda timestamp: True
return lambda timestamp: timestamp + inactive_secs >= time.time()
class _DbModeMultiplexer(event_multiplexer.EventMultiplexer):
"""Shim EventMultiplexer to use when in read-only DB mode.
In read-only DB mode, the EventMultiplexer is nonfunctional - there is no
logdir to reload, and the data is all exposed via SQL. This class represents
the do-nothing EventMultiplexer for that purpose, which serves only as a
conduit for DB-related parameters.
The load APIs raise exceptions if called, and the read APIs always
return empty results.
"""
def __init__(self, db_uri, db_connection_provider):
"""Constructor for `_DbModeMultiplexer`.
Args:
db_uri: A URI to the database file in use.
db_connection_provider: Provider function for creating a DB connection.
"""
logger.info('_DbModeMultiplexer initializing for %s', db_uri)
super(_DbModeMultiplexer, self).__init__()
self.db_uri = db_uri
self.db_connection_provider = db_connection_provider
logger.info('_DbModeMultiplexer done initializing')
def AddRun(self, path, name=None):
"""Unsupported."""
raise NotImplementedError()
def AddRunsFromDirectory(self, path, name=None):
"""Unsupported."""
raise NotImplementedError()
def Reload(self):
"""Unsupported."""
raise NotImplementedError()
def make_plugin_loader(plugin_spec):
"""Returns a plugin loader for the given plugin.
Args:
plugin_spec: A TBPlugin subclass, or a TBLoader instance or subclass.
Returns:
A TBLoader for the given plugin.
"""
if isinstance(plugin_spec, base_plugin.TBLoader):
return plugin_spec
if isinstance(plugin_spec, type):
if issubclass(plugin_spec, base_plugin.TBLoader):
return plugin_spec()
if issubclass(plugin_spec, base_plugin.TBPlugin):
return base_plugin.BasicLoader(plugin_spec)
raise TypeError("Not a TBLoader or TBPlugin subclass: %r" % (plugin_spec,))
|
message_mass_send.py
|
from work_materials.globals import dispatcher, psql_creditals
import logging, traceback, time, threading, psycopg2
def mass_send(text):
conn = psycopg2.connect("dbname={0} user={1} password={2}".format(psql_creditals['dbname'], psql_creditals['user'],
psql_creditals['pass']))
conn.set_session(autocommit=True)
cursor = conn.cursor()
text = "Время искать свои половинки!\n/shipper"
request = "select telegram_id from players limit 2"
cursor.execute(request)
row = cursor.fetchone()
i = 0
while row:
try:
dispatcher.bot.send_message(chat_id=row[0], text = text)
except Exception:
logging.warning("Error in sending message, telegram_id ={}\n{}".format(row[0], traceback.format_exc().splitlines()[-1]))
else:
i += 1
print("sent {} message, chat_id = {}".format(i, row[0]))
time.sleep(0.1)
row = cursor.fetchone()
cursor.close()
def mass_send_start(bot, update):
threading.Thread(target=mass_send, args = ("",)).start()
bot.send_message(chat_id = update.message_chat_id, text = "Начата рассылка...")
|
nrf802154_sniffer.py
|
#!/usr/bin/env python
# Copyright 2018, Nordic Semiconductor ASA
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Nordic Semiconductor ASA nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
import os
is_standalone = __name__ == '__main__'
if sys.version[0] == '2':
# py2 support
import Queue as Queue
else:
import queue as Queue
if __name__ == '__main__':
sys.path.insert(0, os.getcwd())
import re
import select
import signal
import struct
import threading
import time
import logging
from argparse import ArgumentParser
from binascii import a2b_hex
from serial import Serial, serialutil
from serial.tools.list_ports import comports
class Nrf802154Sniffer(object):
# Various options for pcap files: http://www.tcpdump.org/linktypes.html
#DLT='user'
DLT='802.15.4'
DLT_NO = 147 if DLT == 'user' else 230
# helper for wireshark arg parsing
CTRL_ARG_CHANNEL = 0
# pattern for packets being printed over serial
RCV_REGEX = 'received:\s+([0-9a-fA-F]+)\s+power:\s+(-?\d+)\s+lqi:\s+(\d+)\s+time:\s+(-?\d+)'
def __init__(self):
self.serial_queue = Queue.Queue()
self.running = threading.Event()
self.setup_done = threading.Event()
self.setup_done.clear()
self.logger = logging.getLogger(__name__)
self.dev = None
self.channel = None
def stop_sig_handler(self, *args, **kwargs):
"""
Function responsible for stopping the sniffer firmware and closing all threads.
"""
# Let's wait with closing afer we're sure that the sniffer started. Protects us
# from very short tests (NOTE: the serial_reader has a delayed started)
while self.running.is_set() and not self.setup_done.is_set():
time.sleep(1)
if self.running.is_set():
self.serial_queue.put(b'')
self.serial_queue.put(b'sleep')
self.running.clear()
else:
self.logger.warning("Asked to stop {} while it was already stopped".format(self))
@staticmethod
def extcap_interfaces():
"""
Wireshark-related method that returns configuration options
:return: string with wireshark-compatible information
"""
# TODO: Detect connected sniffers and print one interface per each sniffer
res = []
res.append("extcap {version=1.0}{help=https://github.com/NordicSemiconductor/nRF-IEEE-802.15.4-radio-driver}{display=nRF 802.15.4 sniffer}")
res.append("interface {value=nrf802154}{display=nRF 802.15.4 sniffer}")
res.append("control {number=%d}{type=selector}{display=Channel}{tooltip=IEEE 802.15.4 channel}" % Nrf802154Sniffer.CTRL_ARG_CHANNEL)
for i in range(11, 27):
res.append("value {control=%d}{value=%d}{display=%d}" % (Nrf802154Sniffer.CTRL_ARG_CHANNEL, i, i))
return "\n".join(res)
@staticmethod
def extcap_dlts():
"""
Wireshark-related method that returns configuration options
:return: string with wireshark-compatible information
"""
return "dlt {number=%d}{name=IEEE802_15_4_NOFCS}{display=IEEE 802.15.4 without FCS}" % Nrf802154Sniffer.DLT_NO
@staticmethod
def extcap_config(option):
"""
Wireshark-related method that returns configuration options
:return: string with wireshark-compatible information
"""
def list_comports():
result = []
for port in comports():
result.append ( (1, port[0], port[0], "false") )
return result
args = []
values = []
res =[]
args.append ( (0, '--channel', 'Channel', 'IEEE 802.15.4 channel', 'selector', '{required=true}{default=11}') )
# TODO: Instead of 'dev', 'interface' should define connected sniffer.
args.append ( (1, '--dev', 'Device', 'Serial device connected to the sniffer', 'selector', '{required=true}{reload=true}{placeholder=Loading serial devices ...}'))
if option == "dev":
values = list_comports()
if len(option) <= 0:
for arg in args:
res.append("arg {number=%d}{call=%s}{display=%s}{tooltip=%s}{type=%s}%s" % arg)
values = values + [ (0, "%d" % i, "%d" % i, "true" if i == 11 else "false" ) for i in range(11,27) ]
values = values + list_comports()
for value in values:
res.append("value {arg=%d}{value=%s}{display=%s}{default=%s}" % value)
return "\n".join(res)
def pcap_header(self):
"""
Returns pcap header to be written into pcap file.
"""
header = bytearray()
header += struct.pack('<L', int ('a1b2c3d4', 16 ))
header += struct.pack('<H', 2 ) # Pcap Major Version
header += struct.pack('<H', 4 ) # Pcap Minor Version
header += struct.pack('<I', int(0)) # Timezone
header += struct.pack('<I', int(0)) # Accurancy of timestamps
header += struct.pack('<L', int ('000000ff', 16 )) # Max Length of capture frame
header += struct.pack('<L', self.DLT_NO) # DLT
return header
@staticmethod
def pcap_packet(frame, channel, rssi, lqi, timestamp):
"""
Creates pcap packet to be seved in pcap file.
"""
pcap = bytearray()
caplength = len(frame)
if Nrf802154Sniffer.DLT == 'user':
caplength += 6
pcap += struct.pack('<L', timestamp // 1000000 ) # timestamp seconds
pcap += struct.pack('<L', timestamp % 1000000 ) # timestamp nanoseconds
pcap += struct.pack('<L', caplength ) # length captured
pcap += struct.pack('<L', caplength ) # length in frame
if Nrf802154Sniffer.DLT == 'user':
pcap += struct.pack('<H', channel)
pcap += struct.pack('<h', rssi)
pcap += struct.pack('<H', lqi)
pcap += frame
return pcap
@staticmethod
def control_read(fn):
"""
Method used for reading wireshark command.
"""
try:
header = fn.read(6)
sp, _, length, arg, typ = struct.unpack('>sBHBB', header)
if length > 2:
payload = fn.read(length - 2)
else:
payload = ''
return arg, typ, payload
except:
return None, None, None
@staticmethod
def control_reader(fifo):
"""
Thread responsible for reading wireshark commands (read from fifo).
Related to not-yet-implemented wireshark toolbar features.
"""
with open(fifo, 'rb', 0 ) as fn:
arg = 0
while arg != None:
arg, typ, payload = Nrf802154Sniffer.control_read(fn)
def control_writer(self, fifo, queue):
"""
Thread responsible for sending wireshark commands (read from fifo).
Related to not-yet-implemented wireshark toolbar features.
"""
with open(fifo, 'wb', 0 ) as fn:
while self.running.is_set():
time.sleep(1)
def serial_write(self, ser):
"""
Function responsible for sending commands to serial port
"""
command = self.serial_queue.get(block=True, timeout=1)
try:
ser.write(command + b'\r\n')
ser.write(b'\r\n')
except IOError:
self.logger.error("Cannot write to {}".format(self))
self.running.clear()
def serial_writer(self, ser):
"""
Thread responsible for sending commands to serial port
"""
while self.running.is_set():
try:
self.serial_write(ser)
except Queue.Empty:
pass
# Write final commands and break out
while True:
try:
self.serial_write(ser)
except Queue.Empty:
break
def serial_reader(self, dev, channel, queue):
"""
Thread responsible for reading from serial port, parsing the output and storing parsed packets into queue.
"""
# Wireshark needs this sleep for reset purposes
time.sleep(2)
try:
with Serial(dev, 115200, timeout=1) as ser:
ser.reset_input_buffer()
ser.reset_output_buffer()
writer_thread = threading.Thread(target=self.serial_writer, args=(ser,), name="writer_thread")
writer_thread.start()
buf = b''
#TODO: Disable auto ack
init_cmd = []
init_cmd.append(b'')
init_cmd.append(b'promiscuous on')
init_cmd.append(b'channel ' + bytes(str(channel).encode()))
for cmd in init_cmd:
self.serial_queue.put(cmd)
# serial_write appends twice '\r\n' to each command, so we have to calculate that for the echo
init_res = ser.read(len(b"".join(c + b"\r\n\r\n" for c in init_cmd)))
if not all(cmd.decode() in init_res.decode() for cmd in init_cmd):
msg = "{} did not reply properly to setup commands. Is it flashed properly? " \
"Recieved: {}\n".format(self, init_res)
self.logger.error(msg)
self.serial_queue.put(b'receive')
self.setup_done.set()
while self.running.is_set():
ch = ser.read()
if ch != b'\n':
buf += ch
else:
m = re.search(self.RCV_REGEX, str(buf))
if m:
packet = a2b_hex(m.group(1)[:-4])
rssi = int(m.group(2))
lqi = int(m.group(3))
timestamp = int(m.group(4)) & 0xffffffff
channel = int(channel)
queue.put(self.pcap_packet(packet, channel, rssi, lqi, timestamp))
buf = b''
writer_thread.join()
# Let's clear serial link buffer after writer_thread is finished.
while ser.read():
pass
except (serialutil.SerialException, serialutil.SerialTimeoutException):
raise RuntimeError("Cannot communicate with '{}' serial device: {}".format(self, dev))
finally:
self.setup_done.set() # in case it wasn't set before
if self.running.is_set(): # another precaution
self.stop_sig_handler()
def fifo_writer(self, fifo, queue):
"""
Thread responsible for writing packets into pcap file/fifo from queue.
"""
with open(fifo, 'wb', 0 ) as fh:
fh.write(self.pcap_header())
fh.flush()
while self.running.is_set():
try:
packet = queue.get(block=True, timeout=1)
try:
if is_standalone:
sys.stdout.write('.')
sys.stdout.flush()
fh.write(packet)
fh.flush()
except IOError:
pass
except Queue.Empty:
pass
def fifo_detector(self, fifo):
"""
Thread responsible for closing the sniffer in case of closed file/fifo.
"""
with open(fifo, 'wb', 0) as fd:
p = select.poll()
p.register(fd, select.POLLHUP | select.POLLERR)
while self.running.is_set():
for descriptor, mask in p.poll(1000):
if descriptor == fd.fileno() and mask & (select.POLLHUP | select.POLLERR):
self.stop_sig_handler()
def extcap_capture(self, fifo, dev, channel, control_in=None, control_out=None):
"""
Main method responsible for starting all other threads. In case of standalone execution this method will block
until SIGTERM/SIGINT and/or stop_sig_handler disables the loop via self.running event.
"""
packet_queue = Queue.Queue()
self.channel = channel
self.dev = dev
self.running.set()
# TODO: Add toolbar with channel selector (channel per interface?)
if control_in:
ctr_in_thread = threading.Thread(target=self.control_reader, args=(control_in,))
ctr_in_thread.start()
serial_thread = threading.Thread(target=self.serial_reader, args=(self.dev, self.channel, packet_queue), name="serial_thread")
fifo_thread = threading.Thread(target=self.fifo_writer, args=(fifo, packet_queue), name="fifo_thread")
hup_thread = threading.Thread(target=self.fifo_detector, args=(fifo,), name="hup_thread")
hup_thread.start()
serial_thread.start()
fifo_thread.start()
while is_standalone and self.running.is_set():
time.sleep(1)
@staticmethod
def parse_args():
"""
Helper methods to make the standalone script work in console and wireshark
"""
parser = ArgumentParser(description="Extcap program for the nRF 802.15.4 sniffer")
parser.add_argument("--extcap-interfaces", help="Provide a list of interfaces to capture from", action="store_true")
parser.add_argument("--extcap-interface", help="Provide the interface to capture from")
parser.add_argument("--extcap-dlts", help="Provide a list of dlts for the given interface", action="store_true")
parser.add_argument("--extcap-config", help="Provide a list of configurations for the given interface", action="store_true")
parser.add_argument("--extcap-reload-option", help="Reload elements for the given option")
parser.add_argument("--capture", help="Start the capture routine", action="store_true" )
parser.add_argument("--fifo", help="Use together with capture to provide the fifo to dump data to")
parser.add_argument("--extcap-capture-filter", help="Used together with capture to provide a capture filter")
parser.add_argument("--extcap-control-in", help="Used to get control messages from toolbar")
parser.add_argument("--extcap-control-out", help="Used to send control messages to toolbar")
parser.add_argument("--channel", help="IEEE 802.15.4 capture channel [11-26]")
parser.add_argument("--dev", help="Serial device connected to the sniffer")
result = parser.parse_args()
if result.capture and not result.dev:
parser.error("--dev is required if --capture is present")
return result
def __str__(self):
return "{} ({}) channel {}".format(type(self).__name__, self.dev, self.channel)
def __repr__(self):
return self.__str__()
if is_standalone:
args = Nrf802154Sniffer.parse_args()
logging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s', level=logging.INFO)
sniffer_comm = Nrf802154Sniffer()
if args.extcap_interfaces:
print(sniffer_comm.extcap_interfaces())
if args.extcap_dlts:
print(sniffer_comm.extcap_dlts())
if args.extcap_config:
if args.extcap_reload_option and len(args.extcap_reload_option) > 0:
option = args.extcap_reload_option
else:
option = ''
print(sniffer_comm.extcap_config(option))
if args.capture and args.fifo:
channel = args.channel if args.channel else 11
signal.signal(signal.SIGINT, sniffer_comm.stop_sig_handler)
signal.signal(signal.SIGTERM, sniffer_comm.stop_sig_handler)
try:
sniffer_comm.extcap_capture(args.fifo, args.dev, channel, args.extcap_control_in, args.extcap_control_out)
except KeyboardInterrupt as e:
sniffer_comm.stop_sig_handler()
|
standings.py
|
import time
import threading
import os
import sys
import itertools
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
done = False
def wait():
for c in itertools.cycle(['+', '-', '*', '/']):
if done:
break
sys.stdout.write('\rloading ' + c)
sys.stdout.flush()
time.sleep(0.2)
t = threading.Thread(target=wait)
t.start()
chrome_options = Options()
chrome_options.add_argument("--headless")
browser = webdriver.Chrome('./chromedriver',options=chrome_options)
browser.get('http://www.google.com/search?q=premier+league')
elem = browser.find_element_by_xpath('//*[@id="sports-app"]/div/div[2]/div/div/div/ol/li[3]')
total_height = elem.size["height"]+1000
browser.set_window_size(800, total_height)
elem.click()
time.sleep(1)
browser.save_screenshot('standing.png')
os.system("open standing.png ; /usr/bin/osascript -e 'tell application \"Preview\"' -e \"activate\" -e 'tell application \"System Events\"' -e 'keystroke \"f\" using {control down, command down}' -e \"end tell\" -e \"end tell\"")
browser.quit()
done = True
|
dialogs.py
|
# -*- coding: utf-8 -*-
from threading import Thread, RLock
import xbmc
import xbmcaddon
import xbmcgui
def select_ext(title, populator, tasks_count, sort_function = None):
addonPath = xbmcaddon.Addon().getAddonInfo('path').decode('utf-8')
dlg = SelectorDialog("DialogSelect.xml", addonPath, title=title,
populator=populator, steps=tasks_count, sort_function=sort_function)
with ExtendedDialogHacks():
dlg.doModal()
selection = dlg.get_selection()
items = dlg.items
del dlg
return (selection, items)
class FanArtWindow(xbmcgui.WindowDialog):
def __init__(self):
control_background = xbmcgui.ControlImage(0, 0, 1280, 720, xbmcaddon.Addon().getAddonInfo('fanart'))
self.addControl(control_background)
fanart = xbmc.getInfoLabel('ListItem.Property(Fanart_Image)')
if fanart and fanart != "Fanart_Image":
control_fanart = xbmcgui.ControlImage(0, 0, 1280, 720, fanart)
self.addControl(control_fanart)
class ExtendedDialogHacks(object):
def __init__(self):
self.active = False
self.hide_progress = False
self.hide_info = False
self.autohidedialogs = False
if self.autohidedialogs:
self.hide_progress = False
self.hide_info = False
if not self.hide_progress and not self.hide_info:
self.autohidedialogs = False
def __enter__(self):
self.active = True
# self.numeric_keyboard = None
self.fanart_window = FanArtWindow()
## Keyboard hack
# if plugin.get_setting(SETTING_ADVANCED_KEYBOARD_HACKS, converter=bool):
# self.numeric_keyboard = xbmcgui.Window(10109)
# Thread(target = lambda: self.numeric_keyboard.show()).start()
# wait_for_dialog('numericinput', interval=50)
# Show fanart background
self.fanart_window.show()
# Run background task
if self.autohidedialogs:
Thread(target=self.background_task).start()
def background_task(self):
xbmc.sleep(1000)
while not xbmc.abortRequested and self.active:
if self.hide_progress:
active_window = xbmcgui.getCurrentWindowDialogId()
if active_window in [10101, 10151]:
xbmc.executebuiltin("Dialog.Close(%d, true)" % active_window)
if self.hide_info:
if xbmc.getCondVisibility("Window.IsActive(infodialog)"):
xbmc.executebuiltin('Dialog.Close(infodialog, true)')
xbmc.sleep(100)
def __exit__(self, exc_type, exc_value, traceback):
self.active = False
# if self.numeric_keyboard is not None:
# self.numeric_keyboard.close()
# del self.numeric_keyboard
# xbmc.executebuiltin("Dialog.Close(numericinput, true)")
self.fanart_window.close()
del self.fanart_window
class SelectorDialog(xbmcgui.WindowXMLDialog):
def __init__(self, *args, **kwargs):
xbmcgui.WindowXMLDialog.__init__(self)
self.title = kwargs['title']
self.populator = kwargs['populator']
self.steps = kwargs['steps']
self.sort_function = kwargs['sort_function']
self.items = []
self.selection = None
self.insideIndex = -1
self.completed_steps = 0
self.thread = None
self.lock = RLock()
def get_selection(self):
""" get final selection """
return self.selection
def onInit(self):
# set title
self.label = self.getControl(1)
self.label.setLabel(self.title)
# Hide ok button
self.getControl(5).setVisible(False)
# Get active list
try:
self.list = self.getControl(6)
self.list.controlLeft(self.list)
self.list.controlRight(self.list)
self.getControl(3).setVisible(False)
except:
self.list = self.getControl(3)
self.setFocus(self.list)
# populate list
self.thread = Thread(target=self._populate)
self.thread.start()
def onAction(self, action):
if action.getId() in (9, 10, 92, 216, 247, 257, 275, 61467, 61448,):
if self.insideIndex == -1:
self.close()
else:
self._inside_root(select=self.insideIndex)
def onClick(self, controlID):
if controlID == 6 or controlID == 3:
num = self.list.getSelectedPosition()
if num >= 0:
if self.insideIndex == -1:
self._inside(num)
else:
self.selection = self.items[self.insideIndex][1][num]
self.close()
def onFocus(self, controlID):
if controlID in (3, 61):
self.setFocus(self.list)
def _inside_root(self, select=-1):
with self.lock:
self.list.reset()
for source, links in self.items:
if len(links) > 1:
source += " >>"
listitem = xbmcgui.ListItem(source)
try:
#icon = xbmcaddon.Addon(id=links[0]['path'].split("/")[2]).getAddonInfo('icon')
icon = xbmcaddon.Addon().getAddonInfo('icon')
listitem.setIconImage(icon)
except:
pass
self.list.addItem(listitem)
if select >= 0:
self.list.selectItem(select)
self.insideIndex = -1
def _inside(self, num):
if num == -1:
self._inside_root(select=self.insideIndex)
return
with self.lock:
source, links = self.items[num]
if len(links) == 1:
self.selection = links[0]
self.close()
return
self.list.reset()
for item in links:
listitem = xbmcgui.ListItem(item['label'])
#listitem.setProperty("Path", item['path'])
try:
#pluginid = item['path'].split("/")[2]
icon = xbmcaddon.Addon().getAddonInfo('icon')
listitem.setIconImage(icon)
except:
pass
self.list.addItem(listitem)
self.insideIndex = num
def step(self):
self.completed_steps += 1
progress = self.completed_steps * 100 / self.steps
self.label.setLabel(u"{0} - {1:d}% ({2}/{3})".format(self.title, progress,
self.completed_steps, self.steps))
def _populate(self):
xbmc.sleep(500) # Delay population to let ui settle
self.label.setLabel(self.title)
try:
for result in self.populator():
self.step()
if not result:
continue
with self.lock:
# Remember selected item
selectedItem = None
if self.insideIndex == -1:
selectedIndex = self.list.getSelectedPosition()
else:
selectedIndex = self.insideIndex
if selectedIndex >= 0:
selectedItem = self.items[selectedIndex]
# Add new item
self.items.extend(result)
if self.sort_function:
self.items = sorted(self.items, key = self.sort_function)
#self.items.sort()
# Retrived new selection-index
if selectedItem is not None:
selectedIndex = self.items.index(selectedItem)
if self.insideIndex != -1:
self.insideIndex = selectedIndex
# Update only if in root
if self.insideIndex == -1:
self._inside_root(select=selectedIndex)
self.setFocus(self.list)
except:
self.label.setLabel(
u"{0} - {1:d}% ({2}/{3})".format(self.title, 100,
self.steps, self.steps))
pass
|
psdns.py
|
"""
Functions for resolving hostnames and IPs
"""
import dns.reversename
import dns.resolver
import multiprocessing
import multiprocessing.dummy
import os
import Queue
import socket
import threading
# See Python 2.6 workaround below.
import weakref
__DEFAULT_TIMEOUT__ = 2
def dns_default_timeout():
return __DEFAULT_TIMEOUT__
def __check_ip_version__(ip_version):
if not ip_version in [4, 6]:
raise ValueError("Invalid IP version; must be 4 or 6")
#
# Single Resolution
#
def __dns_resolve_host(host, ip_version, timeout):
"""
Resolve a host using the system's facilities
"""
family = socket.AF_INET if ip_version == 4 else socket.AF_INET6
def proc(host, family, queue):
try:
queue.put(socket.getaddrinfo(host, 0, family))
except socket.gaierror as ex:
# TODO: Would be nice if we could isolate just the not
# found error.
queue.put([])
except socket.timeout:
# Don't care, we just want the queue to be empty if
# there's an error.
pass
queue = Queue.Queue()
thread = threading.Thread(target=proc, args=(host, family, queue))
thread.setDaemon(True)
thread.start()
try:
results = queue.get(True, timeout)
if len(results) == 0:
return None
family, socktype, proto, canonname, sockaddr = results[0]
except Queue.Empty:
return None
# NOTE: Don't make any attempt to kill the thread, as it will get
# Python all confused if it holds the GIL.
(ip) = sockaddr
return str(ip[0])
def dns_resolve(host,
query=None,
ip_version=4,
timeout=__DEFAULT_TIMEOUT__
):
"""
Resolve a hostname to its A record, returning None if not found or
there was a timeout.
"""
__check_ip_version__(ip_version)
if query is None:
# The default query is for a host,
return __dns_resolve_host(host, ip_version, timeout)
else:
# Any other explicit query value is forced to use DNS.
try:
resolver = dns.resolver.Resolver()
resolver.timeout = timeout
resolver.lifetime = timeout
if query is None:
query = 'A' if ip_version == 4 else 'AAAA'
answers = resolver.query(host, query)
except (dns.exception.Timeout,
dns.name.EmptyLabel,
dns.resolver.NXDOMAIN,
dns.resolver.NoAnswer,
dns.resolver.NoNameservers):
return None
return str(answers[0])
def dns_resolve_reverse(ip,
timeout=__DEFAULT_TIMEOUT__):
"""
Resolve an IP (v4 or v6) to its hostname, returning None if not
found or there was a timeout.
"""
"""
Reverse-resolve a host using the system's facilities
"""
# TODO: It would be nice of the queue/timeout code wasn't duplicated
# TODO: Validate 'ip' as an IP and raise a ValueError
def proc(ip_addr, queue):
"""Process the query"""
try:
queue.put(socket.gethostbyaddr(ip_addr)[0])
except socket.herror:
queue.put(None)
except socket.gaierror as ex:
if ex.errno != -2:
raise ex
queue.put(None)
queue = Queue.Queue()
thread = threading.Thread(target=proc, args=(ip, queue))
thread.setDaemon(True)
thread.start()
try:
return queue.get(True, timeout)
except Queue.Empty:
return None
# NOTE: Don't make any attempt to kill the thread, as it will get
# Python all confused if it holds the GIL.
#
# Bulk Resolution
#
def __forwarder__(arg):
"""
Query DNS for (name) and return (name, ip))
"""
host, ip_version = arg
return (host, dns_resolve(host, ip_version=ip_version))
def __reverser__(arg):
"""
Query reverse DNS for (ip) and return (ip, hostname)
"""
host, ip_version = arg
return (host, dns_resolve_reverse(host))
def dns_bulk_resolve(candidates, reverse=False, ip_version=None, threads=50):
"""
Resolve a list of host names to IPs or, if reverse is true, IPs to
host names. Return a map of each result keyed to its candidate.
WARNING: This function will create a pool of up to 'threads'
threads.
"""
# This is based loosely on http://stackoverflow.com/a/34377198
if reverse and ip_version is not None:
raise ValueError("Unable to force IP version when reverse-resolving")
if ip_version is None:
ip_version = 4
__check_ip_version__(ip_version)
result = {}
if len(candidates) == 0:
return result
# Work around a bug in 2.6
# TODO: Get rid of this when 2.6 is no longer in the picture.
if not hasattr(threading.current_thread(), "_children"):
threading.current_thread()._children = weakref.WeakKeyDictionary()
pool = multiprocessing.dummy.Pool(
processes=min(len(candidates), threads) )
candidate_args = [ (candidate, ip_version) for candidate in candidates ]
for ip, name in pool.imap(
__reverser__ if reverse else __forwarder__,
candidate_args,
chunksize=1):
result[ip] = name
pool.close()
return result
if __name__ == "__main__":
print "IPv4:"
print dns_resolve('localhost')
print dns_resolve('www.perfsonar.net', ip_version=4)
print dns_resolve('www.perfsonar.net', ip_version=4, query='SOA')
print dns_bulk_resolve([
'www.perfsonar.net',
'www.es.net',
'www.geant.org',
'www.iu.edu',
'www.internet2.edu',
'does-not-exist.internet2.edu',
], ip_version=4)
print
print "IPv6:"
print dns_resolve('www.perfsonar.net', ip_version=6)
print dns_bulk_resolve([
'www.perfsonar.net', 'www.google.com'
], ip_version=6)
print
print "Bulk reverse:"
print dns_bulk_resolve([
'127.0.0.1',
'::1',
'10.0.0.7',
'192.168.12.34',
'8.8.8.8',
'198.6.1.1',
'8.8.8.0',
'2607:f8b0:4002:c06::67',
'this-is-not-valid'
], reverse=True)
print
print "Bulk none:"
print dns_bulk_resolve([])
|
_server.py
|
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Service-side implementation of gRPC Python."""
import collections
from concurrent import futures
import enum
import logging
import threading
import time
import grpc
from grpc import _common
from grpc import _compression
from grpc import _interceptor
from grpc._cython import cygrpc
import six
_LOGGER = logging.getLogger(__name__)
_SHUTDOWN_TAG = 'shutdown'
_REQUEST_CALL_TAG = 'request_call'
_RECEIVE_CLOSE_ON_SERVER_TOKEN = 'receive_close_on_server'
_SEND_INITIAL_METADATA_TOKEN = 'send_initial_metadata'
_RECEIVE_MESSAGE_TOKEN = 'receive_message'
_SEND_MESSAGE_TOKEN = 'send_message'
_SEND_INITIAL_METADATA_AND_SEND_MESSAGE_TOKEN = (
'send_initial_metadata * send_message')
_SEND_STATUS_FROM_SERVER_TOKEN = 'send_status_from_server'
_SEND_INITIAL_METADATA_AND_SEND_STATUS_FROM_SERVER_TOKEN = (
'send_initial_metadata * send_status_from_server')
_OPEN = 'open'
_CLOSED = 'closed'
_CANCELLED = 'cancelled'
_EMPTY_FLAGS = 0
_DEALLOCATED_SERVER_CHECK_PERIOD_S = 1.0
_INF_TIMEOUT = 1e9
def _serialized_request(request_event):
return request_event.batch_operations[0].message()
def _application_code(code):
cygrpc_code = _common.STATUS_CODE_TO_CYGRPC_STATUS_CODE.get(code)
return cygrpc.StatusCode.unknown if cygrpc_code is None else cygrpc_code
def _completion_code(state):
if state.code is None:
return cygrpc.StatusCode.ok
else:
return _application_code(state.code)
def _abortion_code(state, code):
if state.code is None:
return code
else:
return _application_code(state.code)
def _details(state):
return b'' if state.details is None else state.details
class _HandlerCallDetails(
collections.namedtuple('_HandlerCallDetails', (
'method',
'invocation_metadata',
)), grpc.HandlerCallDetails):
pass
class _RPCState(object):
def __init__(self):
self.condition = threading.Condition()
self.due = set()
self.request = None
self.client = _OPEN
self.initial_metadata_allowed = True
self.compression_algorithm = None
self.disable_next_compression = False
self.trailing_metadata = None
self.code = None
self.details = None
self.statused = False
self.rpc_errors = []
self.callbacks = []
self.aborted = False
def _raise_rpc_error(state):
rpc_error = grpc.RpcError()
state.rpc_errors.append(rpc_error)
raise rpc_error
def _possibly_finish_call(state, token):
state.due.remove(token)
if not _is_rpc_state_active(state) and not state.due:
callbacks = state.callbacks
state.callbacks = None
return state, callbacks
else:
return None, ()
def _send_status_from_server(state, token):
def send_status_from_server(unused_send_status_from_server_event):
with state.condition:
return _possibly_finish_call(state, token)
return send_status_from_server
def _get_initial_metadata(state, metadata):
with state.condition:
if state.compression_algorithm:
compression_metadata = (
_compression.compression_algorithm_to_metadata(
state.compression_algorithm),)
if metadata is None:
return compression_metadata
else:
return compression_metadata + tuple(metadata)
else:
return metadata
def _get_initial_metadata_operation(state, metadata):
operation = cygrpc.SendInitialMetadataOperation(
_get_initial_metadata(state, metadata), _EMPTY_FLAGS)
return operation
def _abort(state, call, code, details):
if state.client is not _CANCELLED:
effective_code = _abortion_code(state, code)
effective_details = details if state.details is None else state.details
if state.initial_metadata_allowed:
operations = (
_get_initial_metadata_operation(state, None),
cygrpc.SendStatusFromServerOperation(state.trailing_metadata,
effective_code,
effective_details,
_EMPTY_FLAGS),
)
token = _SEND_INITIAL_METADATA_AND_SEND_STATUS_FROM_SERVER_TOKEN
else:
operations = (cygrpc.SendStatusFromServerOperation(
state.trailing_metadata, effective_code, effective_details,
_EMPTY_FLAGS),)
token = _SEND_STATUS_FROM_SERVER_TOKEN
call.start_server_batch(operations,
_send_status_from_server(state, token))
state.statused = True
state.due.add(token)
def _receive_close_on_server(state):
def receive_close_on_server(receive_close_on_server_event):
with state.condition:
if receive_close_on_server_event.batch_operations[0].cancelled():
state.client = _CANCELLED
elif state.client is _OPEN:
state.client = _CLOSED
state.condition.notify_all()
return _possibly_finish_call(state, _RECEIVE_CLOSE_ON_SERVER_TOKEN)
return receive_close_on_server
def _receive_message(state, call, request_deserializer):
def receive_message(receive_message_event):
serialized_request = _serialized_request(receive_message_event)
if serialized_request is None:
with state.condition:
if state.client is _OPEN:
state.client = _CLOSED
state.condition.notify_all()
return _possibly_finish_call(state, _RECEIVE_MESSAGE_TOKEN)
else:
request = _common.deserialize(serialized_request,
request_deserializer)
with state.condition:
if request is None:
_abort(state, call, cygrpc.StatusCode.internal,
b'Exception deserializing request!')
else:
state.request = request
state.condition.notify_all()
return _possibly_finish_call(state, _RECEIVE_MESSAGE_TOKEN)
return receive_message
def _send_initial_metadata(state):
def send_initial_metadata(unused_send_initial_metadata_event):
with state.condition:
return _possibly_finish_call(state, _SEND_INITIAL_METADATA_TOKEN)
return send_initial_metadata
def _send_message(state, token):
def send_message(unused_send_message_event):
with state.condition:
state.condition.notify_all()
return _possibly_finish_call(state, token)
return send_message
class _Context(grpc.ServicerContext):
def __init__(self, rpc_event, state, request_deserializer):
self._rpc_event = rpc_event
self._state = state
self._request_deserializer = request_deserializer
def is_active(self):
with self._state.condition:
return _is_rpc_state_active(self._state)
def time_remaining(self):
return max(self._rpc_event.call_details.deadline - time.time(), 0)
def cancel(self):
self._rpc_event.call.cancel()
def add_callback(self, callback):
with self._state.condition:
if self._state.callbacks is None:
return False
else:
self._state.callbacks.append(callback)
return True
def disable_next_message_compression(self):
with self._state.condition:
self._state.disable_next_compression = True
def invocation_metadata(self):
return self._rpc_event.invocation_metadata
def peer(self):
return _common.decode(self._rpc_event.call.peer())
def peer_identities(self):
return cygrpc.peer_identities(self._rpc_event.call)
def peer_identity_key(self):
id_key = cygrpc.peer_identity_key(self._rpc_event.call)
return id_key if id_key is None else _common.decode(id_key)
def auth_context(self):
return {
_common.decode(key): value for key, value in six.iteritems(
cygrpc.auth_context(self._rpc_event.call))
}
def set_compression(self, compression):
with self._state.condition:
self._state.compression_algorithm = compression
def send_initial_metadata(self, initial_metadata):
with self._state.condition:
if self._state.client is _CANCELLED:
_raise_rpc_error(self._state)
else:
if self._state.initial_metadata_allowed:
operation = _get_initial_metadata_operation(
self._state, initial_metadata)
self._rpc_event.call.start_server_batch(
(operation,), _send_initial_metadata(self._state))
self._state.initial_metadata_allowed = False
self._state.due.add(_SEND_INITIAL_METADATA_TOKEN)
else:
raise ValueError('Initial metadata no longer allowed!')
def set_trailing_metadata(self, trailing_metadata):
with self._state.condition:
self._state.trailing_metadata = trailing_metadata
def trailing_metadata(self):
return self._state.trailing_metadata
def abort(self, code, details):
# treat OK like other invalid arguments: fail the RPC
if code == grpc.StatusCode.OK:
_LOGGER.error(
'abort() called with StatusCode.OK; returning UNKNOWN')
code = grpc.StatusCode.UNKNOWN
details = ''
with self._state.condition:
self._state.code = code
self._state.details = _common.encode(details)
self._state.aborted = True
raise Exception()
def abort_with_status(self, status):
self._state.trailing_metadata = status.trailing_metadata
self.abort(status.code, status.details)
def set_code(self, code):
with self._state.condition:
self._state.code = code
def code(self):
return self._state.code
def set_details(self, details):
with self._state.condition:
self._state.details = _common.encode(details)
def details(self):
return self._state.details
def _finalize_state(self):
pass
class _RequestIterator(object):
def __init__(self, state, call, request_deserializer):
self._state = state
self._call = call
self._request_deserializer = request_deserializer
def _raise_or_start_receive_message(self):
if self._state.client is _CANCELLED:
_raise_rpc_error(self._state)
elif not _is_rpc_state_active(self._state):
raise StopIteration()
else:
self._call.start_server_batch(
(cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),),
_receive_message(self._state, self._call,
self._request_deserializer))
self._state.due.add(_RECEIVE_MESSAGE_TOKEN)
def _look_for_request(self):
if self._state.client is _CANCELLED:
_raise_rpc_error(self._state)
elif (self._state.request is None and
_RECEIVE_MESSAGE_TOKEN not in self._state.due):
raise StopIteration()
else:
request = self._state.request
self._state.request = None
return request
raise AssertionError() # should never run
def _next(self):
with self._state.condition:
self._raise_or_start_receive_message()
while True:
self._state.condition.wait()
request = self._look_for_request()
if request is not None:
return request
def __iter__(self):
return self
def __next__(self):
return self._next()
def next(self):
return self._next()
def _unary_request(rpc_event, state, request_deserializer):
def unary_request():
with state.condition:
if not _is_rpc_state_active(state):
return None
else:
rpc_event.call.start_server_batch(
(cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),),
_receive_message(state, rpc_event.call,
request_deserializer))
state.due.add(_RECEIVE_MESSAGE_TOKEN)
while True:
state.condition.wait()
if state.request is None:
if state.client is _CLOSED:
details = '"{}" requires exactly one request message.'.format(
rpc_event.call_details.method)
_abort(state, rpc_event.call,
cygrpc.StatusCode.unimplemented,
_common.encode(details))
return None
elif state.client is _CANCELLED:
return None
else:
request = state.request
state.request = None
return request
return unary_request
def _call_behavior(rpc_event,
state,
behavior,
argument,
request_deserializer,
send_response_callback=None):
from grpc import _create_servicer_context
with _create_servicer_context(rpc_event, state,
request_deserializer) as context:
try:
response_or_iterator = None
if send_response_callback is not None:
response_or_iterator = behavior(argument, context,
send_response_callback)
else:
response_or_iterator = behavior(argument, context)
return response_or_iterator, True
except Exception as exception: # pylint: disable=broad-except
with state.condition:
if state.aborted:
_abort(state, rpc_event.call, cygrpc.StatusCode.unknown,
b'RPC Aborted')
elif exception not in state.rpc_errors:
details = 'Exception calling application: {}'.format(
exception)
_LOGGER.exception(details)
_abort(state, rpc_event.call, cygrpc.StatusCode.unknown,
_common.encode(details))
return None, False
def _take_response_from_response_iterator(rpc_event, state, response_iterator):
try:
return next(response_iterator), True
except StopIteration:
return None, True
except Exception as exception: # pylint: disable=broad-except
with state.condition:
if state.aborted:
_abort(state, rpc_event.call, cygrpc.StatusCode.unknown,
b'RPC Aborted')
elif exception not in state.rpc_errors:
details = 'Exception iterating responses: {}'.format(exception)
_LOGGER.exception(details)
_abort(state, rpc_event.call, cygrpc.StatusCode.unknown,
_common.encode(details))
return None, False
def _serialize_response(rpc_event, state, response, response_serializer):
serialized_response = _common.serialize(response, response_serializer)
if serialized_response is None:
with state.condition:
_abort(state, rpc_event.call, cygrpc.StatusCode.internal,
b'Failed to serialize response!')
return None
else:
return serialized_response
def _get_send_message_op_flags_from_state(state):
if state.disable_next_compression:
return cygrpc.WriteFlag.no_compress
else:
return _EMPTY_FLAGS
def _reset_per_message_state(state):
with state.condition:
state.disable_next_compression = False
def _send_response(rpc_event, state, serialized_response):
with state.condition:
if not _is_rpc_state_active(state):
return False
else:
if state.initial_metadata_allowed:
operations = (
_get_initial_metadata_operation(state, None),
cygrpc.SendMessageOperation(
serialized_response,
_get_send_message_op_flags_from_state(state)),
)
state.initial_metadata_allowed = False
token = _SEND_INITIAL_METADATA_AND_SEND_MESSAGE_TOKEN
else:
operations = (cygrpc.SendMessageOperation(
serialized_response,
_get_send_message_op_flags_from_state(state)),)
token = _SEND_MESSAGE_TOKEN
rpc_event.call.start_server_batch(operations,
_send_message(state, token))
state.due.add(token)
_reset_per_message_state(state)
while True:
state.condition.wait()
if token not in state.due:
return _is_rpc_state_active(state)
def _status(rpc_event, state, serialized_response):
with state.condition:
if state.client is not _CANCELLED:
code = _completion_code(state)
details = _details(state)
operations = [
cygrpc.SendStatusFromServerOperation(state.trailing_metadata,
code, details,
_EMPTY_FLAGS),
]
if state.initial_metadata_allowed:
operations.append(_get_initial_metadata_operation(state, None))
if serialized_response is not None:
operations.append(
cygrpc.SendMessageOperation(
serialized_response,
_get_send_message_op_flags_from_state(state)))
rpc_event.call.start_server_batch(
operations,
_send_status_from_server(state, _SEND_STATUS_FROM_SERVER_TOKEN))
state.statused = True
_reset_per_message_state(state)
state.due.add(_SEND_STATUS_FROM_SERVER_TOKEN)
def _unary_response_in_pool(rpc_event, state, behavior, argument_thunk,
request_deserializer, response_serializer):
cygrpc.install_context_from_request_call_event(rpc_event)
try:
argument = argument_thunk()
if argument is not None:
response, proceed = _call_behavior(rpc_event, state, behavior,
argument, request_deserializer)
if proceed:
serialized_response = _serialize_response(
rpc_event, state, response, response_serializer)
if serialized_response is not None:
_status(rpc_event, state, serialized_response)
finally:
cygrpc.uninstall_context()
def _stream_response_in_pool(rpc_event, state, behavior, argument_thunk,
request_deserializer, response_serializer):
cygrpc.install_context_from_request_call_event(rpc_event)
def send_response(response):
if response is None:
_status(rpc_event, state, None)
else:
serialized_response = _serialize_response(rpc_event, state,
response,
response_serializer)
if serialized_response is not None:
_send_response(rpc_event, state, serialized_response)
try:
argument = argument_thunk()
if argument is not None:
if hasattr(behavior, 'experimental_non_blocking'
) and behavior.experimental_non_blocking:
_call_behavior(rpc_event,
state,
behavior,
argument,
request_deserializer,
send_response_callback=send_response)
else:
response_iterator, proceed = _call_behavior(
rpc_event, state, behavior, argument, request_deserializer)
if proceed:
_send_message_callback_to_blocking_iterator_adapter(
rpc_event, state, send_response, response_iterator)
finally:
cygrpc.uninstall_context()
def _is_rpc_state_active(state):
return state.client is not _CANCELLED and not state.statused
def _send_message_callback_to_blocking_iterator_adapter(rpc_event, state,
send_response_callback,
response_iterator):
while True:
response, proceed = _take_response_from_response_iterator(
rpc_event, state, response_iterator)
if proceed:
send_response_callback(response)
if not _is_rpc_state_active(state):
break
else:
break
def _select_thread_pool_for_behavior(behavior, default_thread_pool):
if hasattr(behavior, 'experimental_thread_pool') and isinstance(
behavior.experimental_thread_pool, futures.ThreadPoolExecutor):
return behavior.experimental_thread_pool
else:
return default_thread_pool
def _handle_unary_unary(rpc_event, state, method_handler, default_thread_pool):
unary_request = _unary_request(rpc_event, state,
method_handler.request_deserializer)
thread_pool = _select_thread_pool_for_behavior(method_handler.unary_unary,
default_thread_pool)
return thread_pool.submit(_unary_response_in_pool, rpc_event, state,
method_handler.unary_unary, unary_request,
method_handler.request_deserializer,
method_handler.response_serializer)
def _handle_unary_stream(rpc_event, state, method_handler, default_thread_pool):
unary_request = _unary_request(rpc_event, state,
method_handler.request_deserializer)
thread_pool = _select_thread_pool_for_behavior(method_handler.unary_stream,
default_thread_pool)
return thread_pool.submit(_stream_response_in_pool, rpc_event, state,
method_handler.unary_stream, unary_request,
method_handler.request_deserializer,
method_handler.response_serializer)
def _handle_stream_unary(rpc_event, state, method_handler, default_thread_pool):
request_iterator = _RequestIterator(state, rpc_event.call,
method_handler.request_deserializer)
thread_pool = _select_thread_pool_for_behavior(method_handler.stream_unary,
default_thread_pool)
return thread_pool.submit(_unary_response_in_pool, rpc_event, state,
method_handler.stream_unary,
lambda: request_iterator,
method_handler.request_deserializer,
method_handler.response_serializer)
def _handle_stream_stream(rpc_event, state, method_handler,
default_thread_pool):
request_iterator = _RequestIterator(state, rpc_event.call,
method_handler.request_deserializer)
thread_pool = _select_thread_pool_for_behavior(method_handler.stream_stream,
default_thread_pool)
return thread_pool.submit(_stream_response_in_pool, rpc_event, state,
method_handler.stream_stream,
lambda: request_iterator,
method_handler.request_deserializer,
method_handler.response_serializer)
def _find_method_handler(rpc_event, generic_handlers, interceptor_pipeline):
def query_handlers(handler_call_details):
for generic_handler in generic_handlers:
method_handler = generic_handler.service(handler_call_details)
if method_handler is not None:
return method_handler
return None
handler_call_details = _HandlerCallDetails(
_common.decode(rpc_event.call_details.method),
rpc_event.invocation_metadata)
if interceptor_pipeline is not None:
return interceptor_pipeline.execute(query_handlers,
handler_call_details)
else:
return query_handlers(handler_call_details)
def _reject_rpc(rpc_event, status, details):
rpc_state = _RPCState()
operations = (
_get_initial_metadata_operation(rpc_state, None),
cygrpc.ReceiveCloseOnServerOperation(_EMPTY_FLAGS),
cygrpc.SendStatusFromServerOperation(None, status, details,
_EMPTY_FLAGS),
)
rpc_event.call.start_server_batch(operations, lambda ignored_event: (
rpc_state,
(),
))
return rpc_state
def _handle_with_method_handler(rpc_event, method_handler, thread_pool):
state = _RPCState()
with state.condition:
rpc_event.call.start_server_batch(
(cygrpc.ReceiveCloseOnServerOperation(_EMPTY_FLAGS),),
_receive_close_on_server(state))
state.due.add(_RECEIVE_CLOSE_ON_SERVER_TOKEN)
if method_handler.request_streaming:
if method_handler.response_streaming:
return state, _handle_stream_stream(rpc_event, state,
method_handler, thread_pool)
else:
return state, _handle_stream_unary(rpc_event, state,
method_handler, thread_pool)
else:
if method_handler.response_streaming:
return state, _handle_unary_stream(rpc_event, state,
method_handler, thread_pool)
else:
return state, _handle_unary_unary(rpc_event, state,
method_handler, thread_pool)
def _handle_call(rpc_event, generic_handlers, interceptor_pipeline, thread_pool,
concurrency_exceeded):
if not rpc_event.success:
return None, None
if rpc_event.call_details.method is not None:
try:
method_handler = _find_method_handler(rpc_event, generic_handlers,
interceptor_pipeline)
except Exception as exception: # pylint: disable=broad-except
details = 'Exception servicing handler: {}'.format(exception)
_LOGGER.exception(details)
return _reject_rpc(rpc_event, cygrpc.StatusCode.unknown,
b'Error in service handler!'), None
if method_handler is None:
return _reject_rpc(rpc_event, cygrpc.StatusCode.unimplemented,
b'Method not found!'), None
elif concurrency_exceeded:
return _reject_rpc(rpc_event, cygrpc.StatusCode.resource_exhausted,
b'Concurrent RPC limit exceeded!'), None
else:
return _handle_with_method_handler(rpc_event, method_handler,
thread_pool)
else:
return None, None
@enum.unique
class _ServerStage(enum.Enum):
STOPPED = 'stopped'
STARTED = 'started'
GRACE = 'grace'
class _ServerState(object):
# pylint: disable=too-many-arguments
def __init__(self, completion_queue, server, generic_handlers,
interceptor_pipeline, thread_pool, maximum_concurrent_rpcs):
self.lock = threading.RLock()
self.completion_queue = completion_queue
self.server = server
self.generic_handlers = list(generic_handlers)
self.interceptor_pipeline = interceptor_pipeline
self.thread_pool = thread_pool
self.stage = _ServerStage.STOPPED
self.termination_event = threading.Event()
self.shutdown_events = [self.termination_event]
self.maximum_concurrent_rpcs = maximum_concurrent_rpcs
self.active_rpc_count = 0
# TODO(https://github.com/grpc/grpc/issues/6597): eliminate these fields.
self.rpc_states = set()
self.due = set()
# A "volatile" flag to interrupt the daemon serving thread
self.server_deallocated = False
def _add_generic_handlers(state, generic_handlers):
with state.lock:
state.generic_handlers.extend(generic_handlers)
def _add_insecure_port(state, address):
with state.lock:
return state.server.add_http2_port(address)
def _add_secure_port(state, address, server_credentials):
with state.lock:
return state.server.add_http2_port(address,
server_credentials._credentials)
def _request_call(state):
state.server.request_call(state.completion_queue, state.completion_queue,
_REQUEST_CALL_TAG)
state.due.add(_REQUEST_CALL_TAG)
# TODO(https://github.com/grpc/grpc/issues/6597): delete this function.
def _stop_serving(state):
if not state.rpc_states and not state.due:
state.server.destroy()
for shutdown_event in state.shutdown_events:
shutdown_event.set()
state.stage = _ServerStage.STOPPED
return True
else:
return False
def _on_call_completed(state):
with state.lock:
state.active_rpc_count -= 1
def _process_event_and_continue(state, event):
should_continue = True
if event.tag is _SHUTDOWN_TAG:
with state.lock:
state.due.remove(_SHUTDOWN_TAG)
if _stop_serving(state):
should_continue = False
elif event.tag is _REQUEST_CALL_TAG:
with state.lock:
state.due.remove(_REQUEST_CALL_TAG)
concurrency_exceeded = (
state.maximum_concurrent_rpcs is not None and
state.active_rpc_count >= state.maximum_concurrent_rpcs)
rpc_state, rpc_future = _handle_call(event, state.generic_handlers,
state.interceptor_pipeline,
state.thread_pool,
concurrency_exceeded)
if rpc_state is not None:
state.rpc_states.add(rpc_state)
if rpc_future is not None:
state.active_rpc_count += 1
rpc_future.add_done_callback(
lambda unused_future: _on_call_completed(state))
if state.stage is _ServerStage.STARTED:
_request_call(state)
elif _stop_serving(state):
should_continue = False
else:
rpc_state, callbacks = event.tag(event)
for callback in callbacks:
try:
callback()
except Exception: # pylint: disable=broad-except
_LOGGER.exception('Exception calling callback!')
if rpc_state is not None:
with state.lock:
state.rpc_states.remove(rpc_state)
if _stop_serving(state):
should_continue = False
return should_continue
def _serve(state):
while True:
timeout = time.time() + _DEALLOCATED_SERVER_CHECK_PERIOD_S
event = state.completion_queue.poll(timeout)
if state.server_deallocated:
_begin_shutdown_once(state)
if event.completion_type != cygrpc.CompletionType.queue_timeout:
if not _process_event_and_continue(state, event):
return
# We want to force the deletion of the previous event
# ~before~ we poll again; if the event has a reference
# to a shutdown Call object, this can induce spinlock.
event = None
def _begin_shutdown_once(state):
with state.lock:
if state.stage is _ServerStage.STARTED:
state.server.shutdown(state.completion_queue, _SHUTDOWN_TAG)
state.stage = _ServerStage.GRACE
state.due.add(_SHUTDOWN_TAG)
def _stop(state, grace):
with state.lock:
if state.stage is _ServerStage.STOPPED:
shutdown_event = threading.Event()
shutdown_event.set()
return shutdown_event
else:
_begin_shutdown_once(state)
shutdown_event = threading.Event()
state.shutdown_events.append(shutdown_event)
if grace is None:
state.server.cancel_all_calls()
else:
def cancel_all_calls_after_grace():
shutdown_event.wait(timeout=grace)
with state.lock:
state.server.cancel_all_calls()
thread = threading.Thread(target=cancel_all_calls_after_grace)
thread.start()
return shutdown_event
shutdown_event.wait()
return shutdown_event
def _start(state):
with state.lock:
if state.stage is not _ServerStage.STOPPED:
raise ValueError('Cannot start already-started server!')
state.server.start()
state.stage = _ServerStage.STARTED
_request_call(state)
thread = threading.Thread(target=_serve, args=(state,))
thread.daemon = True
thread.start()
def _validate_generic_rpc_handlers(generic_rpc_handlers):
for generic_rpc_handler in generic_rpc_handlers:
service_attribute = getattr(generic_rpc_handler, 'service', None)
if service_attribute is None:
raise AttributeError(
'"{}" must conform to grpc.GenericRpcHandler type but does '
'not have "service" method!'.format(generic_rpc_handler))
def _augment_options(base_options, compression):
compression_option = _compression.create_channel_option(compression)
return tuple(base_options) + compression_option
class _Server(grpc.Server):
# pylint: disable=too-many-arguments
def __init__(self, thread_pool, generic_handlers, interceptors, options,
maximum_concurrent_rpcs, compression, xds):
completion_queue = cygrpc.CompletionQueue()
server = cygrpc.Server(_augment_options(options, compression), xds)
server.register_completion_queue(completion_queue)
self._state = _ServerState(completion_queue, server, generic_handlers,
_interceptor.service_pipeline(interceptors),
thread_pool, maximum_concurrent_rpcs)
def add_generic_rpc_handlers(self, generic_rpc_handlers):
_validate_generic_rpc_handlers(generic_rpc_handlers)
_add_generic_handlers(self._state, generic_rpc_handlers)
def add_insecure_port(self, address):
return _common.validate_port_binding_result(
address, _add_insecure_port(self._state, _common.encode(address)))
def add_secure_port(self, address, server_credentials):
return _common.validate_port_binding_result(
address,
_add_secure_port(self._state, _common.encode(address),
server_credentials))
def start(self):
_start(self._state)
def wait_for_termination(self, timeout=None):
# NOTE(https://bugs.python.org/issue35935)
# Remove this workaround once threading.Event.wait() is working with
# CTRL+C across platforms.
return _common.wait(self._state.termination_event.wait,
self._state.termination_event.is_set,
timeout=timeout)
def stop(self, grace):
return _stop(self._state, grace)
def __del__(self):
if hasattr(self, '_state'):
# We can not grab a lock in __del__(), so set a flag to signal the
# serving daemon thread (if it exists) to initiate shutdown.
self._state.server_deallocated = True
def create_server(thread_pool, generic_rpc_handlers, interceptors, options,
maximum_concurrent_rpcs, compression, xds):
_validate_generic_rpc_handlers(generic_rpc_handlers)
return _Server(thread_pool, generic_rpc_handlers, interceptors, options,
maximum_concurrent_rpcs, compression, xds)
|
braeker.py
|
import winsound
import time
import datetime
import re
import ctypes
import socket
import threading
from os.path import isfile
from ctypes import wintypes
user32 = ctypes.windll.user32
SW_SHOWMAXIMIZED = 3
SW_HIDE = 0
SW_MINIMIZE = 6
keybd_event = ctypes.windll.user32.keybd_event
alt_key = 0x12
extended_key = 0x0001
key_up = 0x0002
HOST = '127.0.0.1'
PORT = 51476
REFRESH_PERIOD = 1
HELP_STRING = '''
This program is designed to serve as trigger that notifies the user \
that its time to stand up, leave workstation and have some rest.
Basic idea is that you print how many time you plan to work, console window disappears and shows up after the time passed. \
If you need to interrupt program just run another instance.
Examples of accepted inputs (no quotes)
"120" - 120 seconds
"15m" - 15 minutes
"2h" - 2 hours
"q" - show stats and quit
"?" - display this help
'''
hwnd = user32.GetForegroundWindow()
period = None
start = None
parserPeriod = re.compile(r'(\d+)(\w?)')
showUpInterruption = False
workTime = datetime.datetime.now() - datetime.datetime.now()
def ListenToConnection():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
try:
s.bind((HOST, PORT))
except OSError:
s.connect((HOST, PORT))
print(f'Unexpected bind error. Exiting!')
exit()
s.listen(2)
while True:
conn = s.accept()[0]
conn.close()
global showUpInterruption
showUpInterruption = True
if __name__ == '__main__':
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
try:
s.bind((HOST, PORT))
except OSError as err:
s.connect((HOST, PORT))
exit()
time.sleep(0.5)
listenerThread = threading.Thread(target=ListenToConnection, daemon=True)
listenerThread.start()
def Interrupt():
global start
global showUpInterruption
global workTime
workTime = workTime + (datetime.datetime.now() - start)
start = None
showUpInterruption = False
user32.ShowWindow(hwnd, SW_SHOWMAXIMIZED)
keybd_event(alt_key, 0, extended_key | 0, 0)
user32.SetForegroundWindow(hwnd)
# Steal focus. Emulating alt key in order to bypass Windows 'security'
keybd_event(alt_key, 0, extended_key | key_up, 0)
while True:
if not start:
unparsedPeriod = input(
f'[{datetime.datetime.now()}] $ ')
if 'q' in unparsedPeriod:
break
if '?' in unparsedPeriod:
print(HELP_STRING)
continue
match = parserPeriod.search(unparsedPeriod)
if not match:
print('Input is not valid! Type "?" for help. ')
continue
value = int(match.group(1))
suffix = match.group(2)
coef = 1
if suffix == 'm' or suffix == 'м':
coef = 60
elif suffix == 'h' or suffix == 'ч':
coef = 3600
period = value*coef
start = datetime.datetime.now()
user32.ShowWindow(hwnd, SW_HIDE)
showUpInterruption = False
elif (datetime.datetime.now() - start).total_seconds() > period:
if isfile('brokenGlass.wav'):
winsound.PlaySound('brokenGlass.wav',
winsound.SND_FILENAME | winsound.SND_ASYNC)
Interrupt()
else:
time.sleep(REFRESH_PERIOD)
if showUpInterruption:
Interrupt()
mikes = workTime.total_seconds()/60
hours = workTime.total_seconds()/3600
input(f'\nTotal worktime: {mikes:.1f} minutes, or approximately {hours:.1f} hours\nPress Enter to exit.')
|
core.py
|
# -*- coding: utf-8 -*-
# author: Григорий Никониров
# Gregoriy Nikonirov
# email: mrgbh007@gmail.com
#
#~ import GUI.core as gg
from GUI.core import *
from GUI.core import _TEXTURIES
from MGD.item import Inventory,Item,_ITEM_CLASS,_ITEM_TYPES
import glob
import random
import time
import threading
def test1():
s=GUICore()
#~ s=GUICore(100,50)
s.mainloop()
def test2():
s=GUICore()
#~ tl=s._GUICore__textures.getTextureList()
tl=_TEXTURIES.getTextureList()
for i,e in enumerate(tl):
s.placeTexture(e,i%10*50,i//10*50,str(i))
threading.Thread(target=test2_1(s,tl))
s.mainloop()
def test2_1(s,tl):
time.sleep(2)
for i,e in enumerate(tl):
s.removeByTag(str(i))
time.sleep(1)
def test3():
s=GUICore()
a=AskFrame('lol?',lambda:print('ok'),lambda:print('no'))
s.placeWidget(a,50,50)
s.mainloop()
def test4():
s=GUICore()
b=Inventory([123,12,312,3,3,42,4])
a=InventoryFrame(b)
s.placeWidget(a,10,10)
s.mainloop()
def test5():
s=GUICore()
b=Item()
a=ItemShortInfoFrame(b)
s.placeWidget(a,10,10)
s.mainloop()
def test6():
s=GUICore()
b=Inventory()
for i in range(30):
b.add(Item(str(i),random.choice(_ITEM_TYPES),random.choice(_ITEM_CLASS)))
a=InventoryFrame(b)
s.placeWidget(a,10,10)
c=EntityInfoFrame(1)
s.placeWidget(c,300,10)
s.mainloop()
def test7():
s=GUICore()
a=EntityInfoFrame(1)
s.placeWidget(a,10,10)
s.mainloop()
|
testthread.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by vici on 22/01/2017
import threading
import platform
import time
import logging
from Queue import Queue
from random import randint
__doc__ = """ 抄董伟明,官方文档,廖雪峰 bogo 。。。。 threading 实验结果记录
"""
logging.basicConfig(level=logging.DEBUG,
format='(%(threadName)-9s) |||| %(message)s',)
def timeit(func):
"""一个简单的函数时间装饰器"""
def wrapper(*args, **kwargs):
import time
start = time.time()
func(*args, **kwargs)
end = time.time()
print('{} COST: {}'.format(func.__name__, end-start))
return wrapper
def show_thread_itself(arg):
print("threading {} is running! ".format(threading.current_thread().getName()))
for i in xrange(5):
print('Thread {} >>> {}'.format(threading.current_thread().getName(), i))
time.sleep(1)
print("Thread {} end".format(threading.current_thread().getName()))
pass
def fib(n):
if n <= 2:
return 1
return fib(n-1) + fib(n-2)
@timeit
def nothread():
fib(34)
fib(34)
@timeit
def withthread():
for i in xrange(2):
t = threading.Thread(target=fib, args=(34,))
t.start()
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
balance = 1000
lock = threading.Lock()
def change_balance():
global balance
balance += 1000
balance -= 1000
def money_laundering(num):
for i in xrange(num):
change_balance()
def money_laundering_priveate(num):
for i in xrange(num):
lock.acquire()
try:
change_balance()
finally:
lock.release()
def lock_is_important():
"""
这里的lock是一种互斥的 可重获的 锁 在一个加锁thread进行完之后将会释放。
lock 相当于信号量(Sempahore) 为 1
"""
@timeit
def test1():
print('Before i go to bank ,I have {}'.format(balance))
t1 = threading.Thread(target=money_laundering, args=(2000000,))
t2 = threading.Thread(target=money_laundering, args=(4000000,))
t1.start()
t2.start()
t1.join()
t2.join()
print("after two public_money_laundering, I have {}".format(balance))
@timeit
def test2():
global balance
balance = 2000
print('Before i go to a new bank ,I have {}'.format(balance))
t3 = threading.Thread(target=money_laundering_priveate, args=(3000000,))
t4 = threading.Thread(target=money_laundering_priveate, args=(3000000,))
t3.start()
t4.start()
t3.join()
t4.join()
print("after two private money_launderingm I have {}".format(balance))
test1()
test2()
def consumer_producer():
condition = threading.Condition()
def consumer(cond):
t = threading.current_thread()
print('{} start , and waiting for proc'.format(t.name))
with cond:
cond.wait()
print('{} making resource avaiable to consumer'.format(t.name))
def producer(cond):
t = threading.current_thread()
with cond:
print('{} producer acaiable !'.format(t.name))
cond.notifyAll() # 激活条件
c1 = threading.Thread(name='cons1', target=consumer, args=(condition,))
c2 = threading.Thread(name='cons2', target=consumer, args=(condition,))
p = threading.Thread(name='prod', target=producer, args=(condition,))
c1.start()
time.sleep(1)
c2.start()
time.sleep(1)
p.start()
def consumber_producer_event():
"""一个线程发送事件,其他的线程等待事件的触发 生产者消费者模型"""
from random import randint
TIMEOUT = 2
eve = threading.Event()
ll = []
threads = []
def consumer(event, l):
"""消费者"""
mt = threading.currentThread()
while 1:
event_is_set = event.wait(TIMEOUT)
if event_is_set:
try:
integer = l.pop()
print('{} pop from list by {}'.format(integer, mt.name))
event.clear() # 重制事件状态
except IndexError:
pass # 刚启动时容错。
def producer(event, l):
mt = threading.currentThread()
while 1:
integer = randint(10, 100)
l.append(integer)
print('{} is append to list by {}'.format(integer, mt.name))
event.set()
time.sleep(1)
pass
for name in ('consumer1', 'consumer2'):
t = threading.Thread(name=name, target=consumer, args=(eve, ll))
t.start()
threads.append(t)
p = threading.Thread(name='producer', target=producer, args=(eve, ll))
p.start()
threads.append(p)
for t in threads:
t.join()
pass
def consumer_producer_queue():
""" 有两种模式 priority 优先级模式 LIFOqueue后进先出模式。"""
# priority 模式
from random import random
from Queue import PriorityQueue, LifoQueue
q = PriorityQueue()
def double(num):
return num * 2
def producer():
while 1:
wt = random()
time.sleep(1)
print('put', wt)
q.put((double, wt))
def consumer():
while 1:
task, arg = q.get()
print arg, task(arg)
q.task_done()
for target in (producer, consumer):
t = threading.Thread(target=target)
t.start()
def consumer_producer_priqueue():
"""priority 优先级队列"""
from random import randint
from Queue import PriorityQueue
pri_q = PriorityQueue()
def triple(n):
return n * 3
def consumer():
while 1:
if pri_q.empty():
break
pri, target, arg = pri_q.get()
print('[PRI: {}], {} * 3 = {}'.format(pri, arg, target(arg)))
pri_q.task_done()
time.sleep(1)
pass
def producer():
count = 0
while 1:
if count > 50:
break
pri = randint(10, 100)
print('put priority {} '.format(pri))
pri_q.put((pri, triple, pri))
count += 1
pass
for targ in (producer, consumer):
t = threading.Thread(target=targ)
t.start()
time.sleep(1)
def daemon_and_not_daemon():
def nd():
logging.debug("start!")
time.sleep(6)
logging.debug("end!")
def d():
logging.debug("start")
time.sleep(3)
logging.debug("end")
t = threading.Thread(target=d, name="deamon")
nt = threading.Thread(target=nd, name='no-deamon')
t.setDaemon(True)
t.start()
nt.start()
# 论join 的重要性。
t.join()
# threading pool && threading module programing
def quadra(strings):
return str(strings) * 4
class Worker(threading.Thread):
def __init__(self, queue):
super(Worker, self).__init__()
self._q = queue
self.daemon = True
self.start()
def run(self):
while 1:
f, args, kwargs = self._q.get()
try:
print('USE {} '.format(self.name))
print(f(*args, **kwargs))
except Exception as e:
print e
self._q.task_done()
pass
class ThreadingPool(object):
def __init__(self, num_con=5):
self._q = Queue(num_con)
for _ in xrange(num_con):
Worker(self._q)
def add_task(self, f, *args, **kwargs):
self._q.put((f, args, kwargs))
def wait_complete(self):
self._q.join()
pass
def test_threading_pool():
pool = ThreadingPool(10)
for _ in xrange(1000):
wt = randint(1, 9)
pool.add_task(quadra, wt)
time.sleep(1)
pool.wait_complete()
def main():
# nothread()
# withthread()
# ----------------- no threading vs use treading-------------------------
# show_thread_itself('no muti')
# print('threading is running! thraead name is {}'.format(threading.current_thread().getName()))
# t = threading.Thread(target=show_thread_itself, args=(123,), name='Do yourself')
# t.start()
# t.join()
# print('threading {} end.'.format(threading.current_thread().getName()))
# ------------------ problem on thread lock ---------------------------
# lock_is_important()
# ------------------ cunsumer / producter model with condition -----------------------
# consumer_producer()
# ------------------ cumsumer/ producter model with event -----------------
# consumber_producer_event()
# -------- deamon and not deamon threading ------------------------
# daemon_and_not_daemon()
# ------------------ cunsumer / producter model with Queue -----------------------
# consumer_producer_queue() # 普通队列
# consumer_producer_priqueue() # 优先级队列
# ------------------ threadingpool ------------
test_threading_pool()
pass
if __name__ == '__main__':
print("python version is {}".format(platform.python_version()))
main()
print("done!")
|
batcher.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
# Modifications Copyright 2017 Abigail See
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This file contains code to process data into batches"""
try:
import queue
except:
import Queue as queue
from random import shuffle
from random import seed
seed(123)
from threading import Thread
import time
import numpy as np
import tensorflow as tf
import data
FLAGS = tf.app.flags.FLAGS
class Example(object):
"""Class representing a train/val/test example for text summarization."""
def __init__(self, article, abstract_sentences, vocab, hps):
"""Initializes the Example, performing tokenization and truncation to produce the encoder, decoder and target sequences, which are stored in self.
Args:
article: source text; a string. each token is separated by a single space.
abstract_sentences: list of strings, one per abstract sentence. In each sentence, each token is separated by a single space.
vocab: Vocabulary object
hps: hyperparameters
"""
self.hps = hps
# Get ids of special tokens
start_decoding = vocab.word2id(data.START_DECODING)
stop_decoding = vocab.word2id(data.STOP_DECODING)
# Process the article
article_words = article.split()
if len(article_words) > hps.max_enc_steps: # todo 仅选取文章的前400词,max_enc_steps=400
article_words = article_words[:hps.max_enc_steps]
self.enc_len = len(article_words) # store the length after truncation but before padding
self.enc_input = [vocab.word2id(w) for w in article_words] # list of word ids; OOVs are represented by the id for UNK token
# Process the abstract
abstract = ' '.join(abstract_sentences) # string
abstract_words = abstract.split() # list of strings
abs_ids = [vocab.word2id(w) for w in abstract_words] # list of word ids; OOVs are represented by the id for UNK token
# Get the decoder input sequence and target sequence
self.dec_input, self.target = self.get_dec_inp_targ_seqs(abs_ids, hps.max_dec_steps, start_decoding, stop_decoding)
self.dec_len = len(self.dec_input)
# If using pointer-generator mode, we need to store some extra info
if hps.pointer_gen: # todo 默认为True
# Store a version of the enc_input where in-article OOVs are represented by their temporary OOV id; also store the in-article OOVs words themselves
self.enc_input_extend_vocab, self.article_oovs = data.article2ids(article_words, vocab)
# Get a verison of the reference summary where in-article OOVs are represented by their temporary article OOV id
abs_ids_extend_vocab = data.abstract2ids(abstract_words, vocab, self.article_oovs)
# Overwrite decoder target sequence so it uses the temp article OOV ids
_, self.target = self.get_dec_inp_targ_seqs(abs_ids_extend_vocab, hps.max_dec_steps, start_decoding, stop_decoding)
# Store the original strings
self.original_article = article
self.original_abstract = abstract
self.original_abstract_sents = abstract_sentences
def get_dec_inp_targ_seqs(self, sequence, max_len, start_id, stop_id):
"""Given the reference summary as a sequence of tokens, return the input sequence for the decoder, and the target sequence which we will use to calculate loss. The sequence will be truncated if it is longer than max_len. The input sequence must start with the start_id and the target sequence must end with the stop_id (but not if it's been truncated).
Args:
sequence: List of ids (integers)
max_len: integer
start_id: integer
stop_id: integer
Returns:
inp: sequence length <=max_len starting with start_id
target: sequence same length as input, ending with stop_id only if there was no truncation
"""
inp = [start_id] + sequence[:]
target = sequence[:]
if len(inp) > max_len: # truncate
inp = inp[:max_len]
target = target[:max_len] # no end_token
else: # no truncation
target.append(stop_id) # end token
assert len(inp) == len(target)
return inp, target
def pad_decoder_inp_targ(self, max_len, pad_id):
"""Pad decoder input and target sequences with pad_id up to max_len."""
while len(self.dec_input) < max_len:
self.dec_input.append(pad_id)
while len(self.target) < max_len:
self.target.append(pad_id)
def pad_encoder_input(self, max_len, pad_id):
"""Pad the encoder input sequence with pad_id up to max_len."""
while len(self.enc_input) < max_len:
self.enc_input.append(pad_id)
if self.hps.pointer_gen:
while len(self.enc_input_extend_vocab) < max_len:
self.enc_input_extend_vocab.append(pad_id)
class Batch(object):
"""Class representing a minibatch of train/val/test examples for text summarization."""
def __init__(self, example_list, hps, vocab):
"""Turns the example_list into a Batch object.
Args:
example_list: List of Example objects
hps: hyperparameters
vocab: Vocabulary object
"""
self.pad_id = vocab.word2id(data.PAD_TOKEN) # id of the PAD token used to pad sequences
self.init_encoder_seq(example_list, hps) # initialize the input to the encoder
self.init_decoder_seq(example_list, hps) # initialize the input and targets for the decoder
self.store_orig_strings(example_list) # store the original strings
def init_encoder_seq(self, example_list, hps):
"""Initializes the following:
self.enc_batch:
numpy array of shape (batch_size, <=max_enc_steps) containing integer ids (all OOVs represented by UNK id), padded to length of longest sequence in the batch
self.enc_lens:
numpy array of shape (batch_size) containing integers. The (truncated) length of each encoder input sequence (pre-padding).
self.enc_padding_mask:
numpy array of shape (batch_size, <=max_enc_steps), containing 1s and 0s. 1s correspond to real tokens in enc_batch and target_batch; 0s correspond to padding.
If hps.pointer_gen, additionally initializes the following:
self.max_art_oovs:
maximum number of in-article OOVs in the batch
self.art_oovs:
list of list of in-article OOVs (strings), for each example in the batch
self.enc_batch_extend_vocab:
Same as self.enc_batch, but in-article OOVs are represented by their temporary article OOV number.
"""
# Determine the maximum length of the encoder input sequence in this batch
max_enc_seq_len = max([ex.enc_len for ex in example_list])
# Pad the encoder input sequences up to the length of the longest sequence
for ex in example_list:
ex.pad_encoder_input(max_enc_seq_len, self.pad_id)
# Initialize the numpy arrays
# Note: our enc_batch can have different length (second dimension) for each batch because we use dynamic_rnn for the encoder.
self.enc_batch = np.zeros((hps.batch_size, max_enc_seq_len), dtype=np.int32)
self.enc_lens = np.zeros((hps.batch_size), dtype=np.int32)
self.enc_padding_mask = np.zeros((hps.batch_size, max_enc_seq_len), dtype=np.float32)
# Fill in the numpy arrays
for i, ex in enumerate(example_list):
self.enc_batch[i, :] = ex.enc_input[:]
self.enc_lens[i] = ex.enc_len
for j in range(ex.enc_len):
self.enc_padding_mask[i][j] = 1
# For pointer-generator mode, need to store some extra info
if hps.pointer_gen:
# Determine the max number of in-article OOVs in this batch
self.max_art_oovs = max([len(ex.article_oovs) for ex in example_list])
# Store the in-article OOVs themselves
self.art_oovs = [ex.article_oovs for ex in example_list]
# Store the version of the enc_batch that uses the article OOV ids
self.enc_batch_extend_vocab = np.zeros((hps.batch_size, max_enc_seq_len), dtype=np.int32)
for i, ex in enumerate(example_list):
self.enc_batch_extend_vocab[i, :] = ex.enc_input_extend_vocab[:]
def init_decoder_seq(self, example_list, hps):
"""Initializes the following:
self.dec_batch:
numpy array of shape (batch_size, max_dec_steps), containing integer ids as input for the decoder, padded to max_dec_steps length.
self.target_batch:
numpy array of shape (batch_size, max_dec_steps), containing integer ids for the target sequence, padded to max_dec_steps length.
self.dec_padding_mask:
numpy array of shape (batch_size, max_dec_steps), containing 1s and 0s. 1s correspond to real tokens in dec_batch and target_batch; 0s correspond to padding.
"""
# Pad the inputs and targets
for ex in example_list:
ex.pad_decoder_inp_targ(hps.max_dec_steps, self.pad_id)
# Initialize the numpy arrays.
# Note: our decoder inputs and targets must be the same length for each batch (second dimension = max_dec_steps) because we do not use a dynamic_rnn for decoding. However I believe this is possible, or will soon be possible, with Tensorflow 1.0, in which case it may be best to upgrade to that.
self.dec_batch = np.zeros((hps.batch_size, hps.max_dec_steps), dtype=np.int32)
self.target_batch = np.zeros((hps.batch_size, hps.max_dec_steps), dtype=np.int32)
self.dec_padding_mask = np.zeros((hps.batch_size, hps.max_dec_steps), dtype=np.float32)
# Fill in the numpy arrays
for i, ex in enumerate(example_list):
self.dec_batch[i, :] = ex.dec_input[:]
self.target_batch[i, :] = ex.target[:]
for j in range(ex.dec_len):
self.dec_padding_mask[i][j] = 1
def store_orig_strings(self, example_list):
"""Store the original article and abstract strings in the Batch object"""
self.original_articles = [ex.original_article for ex in example_list] # list of lists
self.original_abstracts = [ex.original_abstract for ex in example_list] # list of lists
self.original_abstracts_sents = [ex.original_abstract_sents for ex in example_list] # list of list of lists
class Batcher(object):
"""A class to generate minibatches of data. Buckets examples together based on length of the encoder sequence."""
BATCH_QUEUE_MAX = 100 # max number of batches the batch_queue can hold
def __init__(self, data_path, vocab, hps, single_pass, decode_after):
"""Initialize the batcher. Start threads that process the data into batches.
Args:
data_path: tf.Example filepattern.
vocab: Vocabulary object
hps: hyperparameters
single_pass: If True, run through the dataset exactly once (useful for when you want to run evaluation on the dev or test set). Otherwise generate random batches indefinitely (useful for training).
"""
self._data_path = data_path
self._vocab = vocab
self._hps = hps
self._single_pass = single_pass
self._decode_after = decode_after
# Initialize a queue of Batches waiting to be used, and a queue of Examples waiting to be batched
self._batch_queue = queue.Queue(self.BATCH_QUEUE_MAX)
self._example_queue = queue.Queue(self.BATCH_QUEUE_MAX * self._hps.batch_size)
# Different settings depending on whether we're in single_pass mode or not
if single_pass:
self._num_example_q_threads = 1 # just one thread, so we read through the dataset just once
self._num_batch_q_threads = 1 # just one thread to batch examples
self._bucketing_cache_size = 1 # only load one batch's worth of examples before bucketing; this essentially means no bucketing
self._finished_reading = False # this will tell us when we're finished reading the dataset
else:
self._num_example_q_threads = FLAGS.example_queue_threads # num threads to fill example queue
self._num_batch_q_threads = FLAGS.batch_queue_threads # num threads to fill batch queue
self._bucketing_cache_size = FLAGS.bucketing_cache_size # how many batches-worth of examples to load into cache before bucketing
# Start the threads that load the queues
self._example_q_threads = []
for _ in range(self._num_example_q_threads):
self._example_q_threads.append(Thread(target=self.fill_example_queue))
self._example_q_threads[-1].daemon = True
self._example_q_threads[-1].start()
self._batch_q_threads = []
for _ in range(self._num_batch_q_threads):
self._batch_q_threads.append(Thread(target=self.fill_batch_queue))
self._batch_q_threads[-1].daemon = True
self._batch_q_threads[-1].start()
# Start a thread that watches the other threads and restarts them if they're dead
if not single_pass: # We don't want a watcher in single_pass mode because the threads shouldn't run forever
self._watch_thread = Thread(target=self.watch_threads)
self._watch_thread.daemon = True
self._watch_thread.start()
def next_batch(self):
"""Return a Batch from the batch queue.
If mode='decode' then each batch contains a single example repeated beam_size-many times; this is necessary for beam search.
Returns:
batch: a Batch object, or None if we're in single_pass mode and we've exhausted the dataset.
"""
# If the batch queue is empty, print a warning
if self._batch_queue.qsize() == 0:
tf.logging.warning('Bucket input queue is empty when calling next_batch. Bucket queue size: %i, Input queue size: %i', self._batch_queue.qsize(), self._example_queue.qsize())
if self._single_pass and self._finished_reading:
tf.logging.info("Finished reading dataset in single_pass mode.")
return None
batch = self._batch_queue.get() # get the next Batch
return batch
def fill_example_queue(self):
"""Reads data from file and processes into Examples which are then placed into the example queue."""
input_gen = self.text_generator(data.example_generator(self._data_path, self._single_pass))
while True:
try:
(article, abstract) = input_gen.__next__() # read the next example from file. article and abstract are both strings.
except StopIteration: # if there are no more examples:
tf.logging.info("The example generator for this example queue filling thread has exhausted data.")
if self._single_pass:
tf.logging.info("single_pass mode is on, so we've finished reading dataset. This thread is stopping.")
self._finished_reading = True
break
else:
raise Exception("single_pass mode is off but the example generator is out of data; error.")
abstract_sentences = [sent.strip() for sent in data.abstract2sents(abstract)] # Use the <s> and </s> tags in abstract to get a list of sentences.
example = Example(article, abstract_sentences, self._vocab, self._hps) # Process into an Example.
self._example_queue.put(example) # place the Example in the example queue.
def fill_batch_queue(self):
"""Takes Examples out of example queue, sorts them by encoder sequence length, processes into Batches and places them in the batch queue.
In decode mode, makes batches that each contain a single example repeated.
"""
while True:
if self._hps.mode != 'decode':
# Get bucketing_cache_size-many batches of Examples into a list, then sort
inputs = []
for _ in range(self._hps.batch_size * self._bucketing_cache_size):
inputs.append(self._example_queue.get())
inputs = sorted(inputs, key=lambda inp: inp.enc_len) # sort by length of encoder sequence
# Group the sorted Examples into batches, optionally shuffle the batches, and place in the batch queue.
batches = []
for i in range(0, len(inputs), self._hps.batch_size):
batches.append(inputs[i:i + self._hps.batch_size])
if not self._single_pass:
shuffle(batches)
for b in batches: # each b is a list of Example objects
self._batch_queue.put(Batch(b, self._hps, self._vocab))
else: # beam search decode mode
ex = self._example_queue.get()
b = [ex for _ in range(self._hps.batch_size)]
self._batch_queue.put(Batch(b, self._hps, self._vocab))
def watch_threads(self):
"""Watch example queue and batch queue threads and restart if dead."""
while True:
time.sleep(60)
for idx,t in enumerate(self._example_q_threads):
if not t.is_alive(): # if the thread is dead
tf.logging.error('Found example queue thread dead. Restarting.')
new_t = Thread(target=self.fill_example_queue)
self._example_q_threads[idx] = new_t
new_t.daemon = True
new_t.start()
for idx,t in enumerate(self._batch_q_threads):
if not t.is_alive(): # if the thread is dead
tf.logging.error('Found batch queue thread dead. Restarting.')
new_t = Thread(target=self.fill_batch_queue)
self._batch_q_threads[idx] = new_t
new_t.daemon = True
new_t.start()
def text_generator(self, example_generator):
"""Generates article and abstract text from tf.Example.
Args:
example_generator: a generator of tf.Examples from file. See data.example_generator"""
cnt = 0
while True:
e = example_generator.__next__() # e is a tf.Example
try:
article_text = e.features.feature['article'].bytes_list.value[0] # the article text was saved under the key 'article' in the data files
abstract_text = e.features.feature['abstract'].bytes_list.value[0] # the abstract text was saved under the key 'abstract' in the data files
except ValueError:
tf.logging.error('Failed to get article or abstract from example')
continue
if len(article_text)==0: # See https://github.com/abisee/pointer-generator/issues/1
tf.logging.warning('Found an example with empty article text. Skipping it.')
else:
if self._single_pass and cnt < self._decode_after: #skip already decoded docs
cnt +=1
continue
yield (article_text, abstract_text)
|
util.py
|
import subprocess
from rc.exception import RunException
import sys
from collections import namedtuple
import os
from typing import Union, List
import io
from threading import Thread
from queue import Queue
from concurrent.futures import ThreadPoolExecutor, as_completed
RunResult = namedtuple('RunResult', ['stdout', 'stderr', 'returncode'])
def convert_list_command_to_str(cmd: List[str]) -> str:
cmd_str = io.StringIO('')
for c in cmd:
if c and (c[0] in ['"', "'", '<', '|', '>', '&', ';', '~']):
cmd_str.write(c)
elif len(c) >= 2 and c[:2] in ['1>', '2>']:
cmd_str.write(c)
else:
cmd_str.write('"' + c + '"')
cmd_str.write(' ')
return cmd_str.getvalue()
def run(cmd: Union[str, List[str]], *, shell=['/bin/sh', '-c'], input=None, timeout=None):
p = running(cmd, shell=shell, input=input)
stdout, stderr = p.communicate(timeout=timeout)
return RunResult(returncode=p.returncode, stdout=stdout, stderr=stderr)
def running(cmd: Union[str, List[str]], *, shell=['/bin/sh', '-c'], input=None):
if type(cmd) is list:
cmd = convert_list_command_to_str(cmd)
try:
p = subprocess.Popen([*shell, cmd], stdin=subprocess.PIPE if input else None,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True)
if input:
p.stdin.write(input)
p.stdin.close()
p.stdin = None
return p
except:
raise RunException(sys.exc_info()[0])
STDOUT = 1
STDERR = 2
EXIT = 3
def run_stream(cmd: Union[str, List[str]], *, shell=['/bin/sh', '-c'], input=None):
p = running(cmd, shell=shell, input=input)
q = Queue()
def queue_stdout():
for line in p.stdout:
q.put((STDOUT, line))
def queue_stderr():
for line in p.stderr:
q.put((STDERR, line))
queue_stdout_thread = Thread(target=queue_stdout)
queue_stderr_thread = Thread(target=queue_stderr)
queue_stdout_thread.start()
queue_stderr_thread.start()
def queue_exit():
queue_stdout_thread.join()
queue_stderr_thread.join()
while p.poll() is None:
pass
q.put((EXIT, p.returncode))
queue_exit_thread = Thread(target=queue_exit)
queue_exit_thread.start()
return q, p
def handle_stream(q, *, stdout_handler=None, stderr_handler=None, exit_handler=None):
while True:
event, value = q.get()
if event == EXIT:
if exit_handler:
exit_handler(value)
break
elif event == STDOUT:
if stdout_handler:
stdout_handler(value)
elif event == STDERR:
if stderr_handler:
stderr_handler(value)
q.task_done()
executor = ThreadPoolExecutor()
def go(func, *args, **kwargs):
return executor.submit(func, *args, **kwargs)
def pmap(func, *iterables, timeout=None):
return list(executor.map(func, *iterables, timeout=timeout))
def print_stream(q, *, prefix):
return handle_stream(q, stdout_handler=lambda line: print(prefix, 'STDOUT |', line, end=''),
stderr_handler=lambda line: print(
prefix, 'STDERR |', line, end=''),
exit_handler=lambda exitcode: print(prefix, 'EXIT CODE |', exitcode, end=''))
def save_stream_to_file(q, *, path, name):
with open(os.path.join(path, name+'.stdout'), 'w') as stdout:
with open(os.path.join(path, name+'.stderr'), 'w') as stderr:
with open(os.path.join(path, name+'.exitcode'), 'w') as exitcode:
return handle_stream(q, stdout_handler=lambda line: stdout.write(line),
stderr_handler=lambda line: stderr.write(
line),
exit_handler=lambda ec: exitcode.write(ec))
|
related_metrics.py
|
import logging
from time import time, sleep
from threading import Thread
from multiprocessing import Process
import os
import sys
from os import kill, getpid
import traceback
from ast import literal_eval
import settings
from skyline_functions import get_redis_conn, get_redis_conn_decoded
from functions.metrics.get_metric_latest_anomaly import get_metric_latest_anomaly
from functions.database.queries.get_metric_group_info import get_metric_group_info
from functions.luminosity.get_cross_correlation_relationships import get_cross_correlation_relationships
from functions.luminosity.update_metric_group import update_metric_group
skyline_app = 'luminosity'
skyline_app_logger = '%sLog' % skyline_app
logger = logging.getLogger(skyline_app_logger)
skyline_app_logfile = '%s/%s.log' % (settings.LOG_PATH, skyline_app)
skyline_app_loglock = '%s.lock' % skyline_app_logfile
skyline_app_logwait = '%s.wait' % skyline_app_logfile
this_host = str(os.uname()[1])
try:
SERVER_METRIC_PATH = '.%s' % settings.SERVER_METRICS_NAME
if SERVER_METRIC_PATH == '.':
SERVER_METRIC_PATH = ''
except:
SERVER_METRIC_PATH = ''
try:
LUMINOSITY_RELATED_METRICS_MAX_5MIN_LOADAVG = settings.LUMINOSITY_RELATED_METRICS_MAX_5MIN_LOADAVG
except:
LUMINOSITY_RELATED_METRICS_MAX_5MIN_LOADAVG = 3
skyline_app_graphite_namespace = 'skyline.%s%s' % (skyline_app, SERVER_METRIC_PATH)
class RelatedMetrics(Thread):
"""
The RelatedMetrics class controls the luminosity/related_metrics thread and
spawned processes. luminosity/related_metrics analyses the results of
luminosity cross_correlations and related_metricss to create and maintain
metric groups.
"""
def __init__(self, parent_pid):
"""
Initialize RelatedMetrics
"""
super(RelatedMetrics, self).__init__()
self.redis_conn = get_redis_conn(skyline_app)
self.redis_conn_decoded = get_redis_conn_decoded(skyline_app)
self.daemon = True
self.parent_pid = parent_pid
self.current_pid = getpid()
def check_if_parent_is_alive(self):
"""
Self explanatory
"""
try:
kill(self.current_pid, 0)
kill(self.parent_pid, 0)
except:
sys.exit(0)
def find_related(self, i):
"""
- Determine when a metric group was last updated.
- Determine if any new anomalies have occurred on the metric.
- If there are any new anomalies on the metric, determine if new
cross_correlations or related_metrics have occured on a metric
- If there are new cross_correlations or related_metricss calculate the
metric group with the new data
- Determine if the new data modifies the metric group, if so update.
"""
max_execution_seconds = 50
find_related_start = time()
logger.info('related_metrics :: find_related :: process %s started' % str(i))
current_5min_loadavg = os.getloadavg()[1]
if current_5min_loadavg > LUMINOSITY_RELATED_METRICS_MAX_5MIN_LOADAVG:
logger.info('related_metrics :: find_related :: not processing any metrics as current_5min_loadavg: %s, exceeds LUMINOSITY_RELATED_METRICS_MAX_5MIN_LOADAVG: %s' % (
str(current_5min_loadavg),
str(LUMINOSITY_RELATED_METRICS_MAX_5MIN_LOADAVG)))
return
metrics_to_process = []
# If a set exists process that
force_process = False
try:
metrics_to_process = list(self.redis_conn_decoded.smembers('luminosity.related_metrics.process_immediate'))
if metrics_to_process:
force_process = True
logger.info('related_metrics :: find_related :: %s metrics found in luminosity.related_metrics.process_immediate: %s' % (
str(len(metrics_to_process)), str(metrics_to_process)))
self.redis_conn_decoded.delete('luminosity.related_metrics.process_immediate')
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: related_metrics :: failed get aet.analyzer.smtp_alerter_metrics Redis key - %s' % (
str(err)))
metrics_to_process = []
# Get all alerting metric basenames
if not metrics_to_process:
try:
metrics_to_process = list(self.redis_conn_decoded.smembers('aet.analyzer.smtp_alerter_metrics'))
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: related_metrics :: failed get aet.analyzer.smtp_alerter_metrics Redis key - %s' % (
str(err)))
metrics_to_process = []
metrics_to_process_count = len(metrics_to_process)
# Get through the population once per day
optimal_metrics_per_minute = metrics_to_process_count / 1440
# optimal_metrics_per_minute = metrics_to_process_count / 360
if force_process:
optimal_metrics_per_minute = metrics_to_process_count
logger.info('related_metrics :: find_related :: force_process so optimal metrics to process per minute: %s' % str(optimal_metrics_per_minute))
if optimal_metrics_per_minute < 1:
optimal_metrics_per_minute = 1
logger.info('related_metrics :: find_related :: optimal metrics to process per minute: %s' % str(optimal_metrics_per_minute))
metric_names_with_ids = {}
ids_with_metric_names = {}
try:
metric_names_with_ids = self.redis_conn_decoded.hgetall('aet.metrics_manager.metric_names_with_ids')
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: related_metrics :: find_related :: failed to get Redis hash aet.metrics_manager.metric_names_with_ids - %s' % str(err))
if metric_names_with_ids:
for c_metric_name in list(metric_names_with_ids.keys()):
c_metric_id = int(str(metric_names_with_ids[c_metric_name]))
ids_with_metric_names[c_metric_id] = c_metric_name
metric_group_last_updated = {}
try:
metric_group_last_updated = self.redis_conn_decoded.hgetall('luminosity.metric_group.last_updated')
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: related_metrics :: find_related :: failed to get Redis hash luminosity.metric_group.last_updated - %s' % str(err))
metric_group_last_updated = {}
logger.info('related_metrics :: find_related :: all eligible metric names and ids determined')
latest_anomalies = {}
try:
latest_anomalies = self.redis_conn_decoded.hgetall('panorama.metrics.latest_anomaly')
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: related_metrics :: find_related :: failed to get Redis hash luminosity.metric_group.last_updated - %s' % str(err))
metric_group_last_updated = {}
metrics_checked = []
metric_groups_updated = []
metrics_to_check = []
metrics_skipped_recently_checked = []
metrics_skipped_recent_anomaly = []
metrics_skipped_no_anomaly = []
# Determine which metrics have new anomalies and may require their
# metric_group updated
for base_name in metrics_to_process:
if len(metrics_to_check) >= optimal_metrics_per_minute:
break
# Determine if the metric group needs to be built or checked
metric_id = 0
metric_id_str = None
try:
metric_id_str = metric_names_with_ids[base_name]
if metric_id_str:
metric_id = int(metric_id_str)
except KeyError:
metric_id = 0
if not metric_id:
logger.error('error :: related_metrics :: find_related :: failed to get determine metric id for %s from Redis hash data aet.metrics_manager.metric_names_with_ids' % str(base_name))
continue
# force_process
if force_process:
logger.info('related_metrics :: find_related :: force_process, %s adding to check' % (
base_name))
metrics_to_check.append([metric_id, base_name])
continue
metric_info_last_updated = 0
try:
metric_info_last_updated_str = None
# Get the entire hash key once
# metric_info_last_updated_str = self.redis_conn_decoded.hget('luminosity.metric_group.last_updated', metric_id)
try:
metric_info_last_updated_str = metric_group_last_updated[str(metric_id)]
except KeyError:
metric_info_last_updated_str = None
if metric_info_last_updated_str:
metric_info_last_updated = int(str(metric_info_last_updated_str))
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: related_metrics :: find_related :: failed to get Redis hash aet.metrics_manager.metric_names_with_ids - %s' % str(err))
# Check the DB just in case Redis data was lost this will stop all
# metrics being reanalysed if the Redis data is lost
if not metric_info_last_updated:
logger.info('debug :: related_metrics :: find_related :: %s last_updated timestamp not found in Redis hash luminosity.metric_group.last_updated querying DB' % (
base_name))
metric_group_info = {}
try:
metric_group_info = get_metric_group_info(skyline_app, metric_id)
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: related_metrics :: find_related :: get_metric_group_info failed - %s' % str(err))
try:
metric_info_last_updated = metric_group_info[metric_id]['last_updated']
except KeyError:
metric_info_last_updated = 0
# Never checked
if not metric_info_last_updated:
logger.info('related_metrics :: find_related :: %s has no last_updated timestamp adding to check' % (
base_name))
metrics_to_check.append([metric_id, base_name])
continue
# if metric_info_last_updated > (int(find_related_start) - 86400):
if metric_info_last_updated > (int(find_related_start) - 14400):
metrics_skipped_recently_checked.append(base_name)
continue
metric_last_anomaly_ts = 0
latest_anomaly = {}
try:
latest_anomaly_str = latest_anomalies[base_name]
if latest_anomaly_str:
latest_anomaly = literal_eval(str(latest_anomaly_str))
if latest_anomaly:
metric_last_anomaly_ts = latest_anomaly['anomaly_timestamp']
except KeyError:
metric_last_anomaly_ts = 0
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: related_metrics :: find_related :: failed literal_eval latest_anomaly - %s' % str(err))
metric_last_anomaly_ts = 0
if not metric_last_anomaly_ts:
try:
# TODO optimise anomalies DB requests to be Redis instead
# params = {'latest': True}
# latest_metric_anomaly = get_anomalies(skyline_app, metric_id, params)
# Causes a lot of DB queries for metrics without anomalies
# latest_anomaly = get_metric_latest_anomaly(skyline_app, base_name, metric_id, False)
latest_anomaly = {}
if latest_anomaly:
try:
metric_last_anomaly_ts = latest_anomaly['anomaly_timestamp']
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: related_metrics :: find_related :: failed to determine anomaly_timestamp for get_anomalies dict - %s' % str(err))
metric_last_anomaly_ts = 0
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: related_metrics :: find_related :: get_anomalies failed - %s' % str(err))
metric_last_anomaly_ts = 0
if not metric_last_anomaly_ts:
metrics_skipped_no_anomaly.append(base_name)
try:
self.redis_conn_decoded.hset('luminosity.metric_group.last_updated', metric_id, int(find_related_start))
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: related_metrics :: find_related :: failed to update in metric_id: %s luminosity.metric_group.last_updated - %s' % (
str(metric_id), str(err)))
continue
# Allow correlations to occur
if metric_last_anomaly_ts > (int(find_related_start) - 900):
metrics_skipped_recent_anomaly.append(base_name)
continue
if metric_last_anomaly_ts > metric_info_last_updated:
logger.info('related_metrics :: find_related :: recent anomaly on %s adding to check' % base_name)
metrics_to_check.append([metric_id, base_name])
logger.info('related_metrics :: find_related :: %s metrics in metrics_to_check' % str(len(metrics_to_check)))
logger.info('related_metrics :: find_related :: metrics_to_check: %s' % str(metrics_to_check))
for metric_id, base_name in metrics_to_check:
time_now = int(time())
running_for = time_now - int(find_related_start)
if running_for > max_execution_seconds:
logger.info('related_metrics :: find_related :: stopping after running for %s seconds, reaching max_execution_seconds' % str(running_for))
break
metrics_checked.append(base_name)
cross_correlation_relationships = {}
try:
cross_correlation_relationships_dict = get_cross_correlation_relationships(base_name, metric_names_with_ids=metric_names_with_ids)
if cross_correlation_relationships_dict:
cross_correlation_relationships = cross_correlation_relationships_dict[base_name]['cross_correlation_relationships']
logger.info('related_metrics :: find_related :: %s cross_correlation_relationships for %s' % (
str(len(list(cross_correlation_relationships.keys()))), base_name))
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: related_metrics :: find_related :: get_cross_correlation_relationships failed for %s - %s' % (
base_name, str(err)))
try:
self.redis_conn_decoded.hset('luminosity.metric_group.last_updated', metric_id, int(find_related_start))
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: related_metrics :: find_related :: failed to update in metric_id: %s luminosity.metric_group.last_updated - %s' % (
str(metric_id), str(err)))
metric_group_update = False
if cross_correlation_relationships:
metric_group_update = True
if metric_group_info:
if metric_group_info['related_metrics'] > 0:
metric_group_update = True
if force_process:
metric_group_update = True
if metric_group_update:
updated_metric_group = update_metric_group(base_name, metric_id, cross_correlation_relationships, ids_with_metric_names)
if updated_metric_group:
logger.info('related_metrics :: find_related :: updated metric group for %s, updated_metric_group: %s' % (
base_name, str(update_metric_group)))
metric_groups_updated.append(base_name)
# metrics_skipped_recently_checked.append(base_name)
logger.info('related_metrics :: find_related :: %s metrics skipped as recently checked' % (
str(len(metrics_skipped_recently_checked))))
logger.info('related_metrics :: find_related :: %s metrics skipped as recently they have a recent anomaly' % (
str(len(metrics_skipped_recent_anomaly))))
logger.info('related_metrics :: find_related :: %s metrics skipped as they have no anomalies' % (
str(len(metrics_skipped_no_anomaly))))
find_related_end = time() - find_related_start
logger.info('related_metrics :: find_related :: %s metrics checked and %s metric_groups were updated, analysed took %.2f seconds' % (
str(len(metrics_checked)), str(len(metric_groups_updated)),
find_related_end))
# related_metrics table
# id, source_metric_id, timestamp, full_duration, resolution, processed
# related_metricss table
# id, related_metrics_id, related_metric_id, ppscore_1, ppscore_2
# Maybe do not just do ppscore maybe use ruptures to identify metrics
# that have changespoints in the same window
return
def run(self):
"""
- Called when the process intializes.
- Determine if Redis is up
- Spawn a process_metric process to do analysis
- Wait for the process to finish.
- run_every 300 seconds
"""
# Log management to prevent overwriting
# Allow the bin/<skyline_app>.d to manage the log
now = time()
log_wait_for = now + 5
while now < log_wait_for:
if os.path.isfile(skyline_app_loglock):
sleep(.1)
now = time()
else:
now = log_wait_for + 1
logger.info('related_metrics :: starting')
while 1:
now = time()
# Make sure Redis is up
try:
self.redis_conn.ping()
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: related_metrics cannot connect to redis at socket path %s - %s' % (
settings.REDIS_SOCKET_PATH, e))
sleep(10)
try:
self.redis_conn = get_redis_conn(skyline_app)
self.redis_conn_decoded = get_redis_conn_decoded(skyline_app)
except Exception as e:
logger.info(traceback.format_exc())
logger.error('error :: related_metrics cannot connect to get_redis_conn - %s' % e)
continue
# Report app up
try:
self.redis_conn.setex('luminosity.related_metrics', 120, now)
logger.info('related_metrics :: set luminosity.related_metrics Redis key')
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: related_metrics :: could not update the Redis luminosity.related_metrics key - %s' % str(err))
now_timestamp = int(time())
# Spawn process
pids = []
spawned_pids = []
pid_count = 0
for i in range(1, 1 + 1):
try:
p = Process(target=self.find_related, args=(i,))
pids.append(p)
pid_count += 1
logger.info('related_metrics starting %s of 1 find_related processes' % (str(pid_count)))
p.start()
spawned_pids.append(p.pid)
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: related_metrics :: failed to spawn find_related_metrics process - %s' % e)
# Self monitor processes and terminate if any find_related
# has run for longer than run_every - 10
p_starts = time()
while time() - p_starts <= (120 - 10):
if any(p.is_alive() for p in pids):
# Just to avoid hogging the CPU
sleep(.1)
else:
# All the processes are done, break now.
time_to_run = time() - p_starts
logger.info('related_metrics :: find_related process completed in %.2f seconds' % (
time_to_run))
break
else:
# We only enter this if we didn't 'break' above.
logger.info('related_metrics :: timed out, killing find_related process')
for p in pids:
logger.info('related_metrics :: killing find_related process')
p.terminate()
logger.info('related_metrics :: killed find_related process')
for p in pids:
if p.is_alive():
try:
logger.info('related_metrics :: stopping find_related - %s' % (str(p.is_alive())))
p.terminate()
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: related_metrics :: failed to stop find_related - %s' % e)
run_every = 60
process_runtime = time() - now
if process_runtime < run_every:
sleep_for = (run_every - process_runtime)
process_runtime_now = time() - now
sleep_for = (run_every - process_runtime_now)
logger.info('related_metrics :: sleeping for %.2f seconds due to low run time...' % sleep_for)
sleep(sleep_for)
try:
del sleep_for
except Exception as e:
logger.error('error :: related_metrics :: failed to del sleep_for - %s' % e)
try:
del process_runtime
except Exception as e:
logger.error('error :: related_metrics :: failed to del process_runtime - %s' % e)
|
randoscript.py
|
from collections import OrderedDict
import sys
from ssrando import Randomizer, VERSION
from options import OPTIONS, Options
def process_command_line_options(options):
if 'help' in options:
print('Skyward Sword Randomizer Version '+VERSION)
print('Available command line options:\n')
longest_option = max(len(option_name) for option_name in OPTIONS.keys())
for option_name, option in OPTIONS.items():
print(' --'+option_name.ljust(longest_option) + ' ' + option['help'])
# permalink
print()
print(' --' + 'permalink'.ljust(longest_option) + ' ' + 'Specify a permlink, which includes the settings. This is set first, other options may override these settings')
# bulk options
print()
print(' --' + 'bulk'.ljust(longest_option) + ' ' + 'Runs the randomizer in bulk mode, to generate lots of spoiler logs. Implies --dry-run')
print(' --' + 'low'.ljust(longest_option) + ' ' + '(bulk mode only) specify the lower end of seeds to generate (inclusive, default: 1)')
print(' --' + 'high'.ljust(longest_option) + ' ' + '(bulk mode only) specify the higher end of seeds to generate (inclusive, default: 100)')
print(' --' + 'threads'.ljust(longest_option) + ' ' + '(bulk mode only) specify the number of threads to use (default: 1)')
return None
elif 'version' in options:
print(VERSION)
return None
else:
cleaned_options = Options()
if 'permalink' in options:
cleaned_options.update_from_permalink(options.pop('permalink'))
problems = cleaned_options.update_from_cmd_args(options)
if problems:
print('ERROR: invalid options:')
for problem in problems:
print(problem)
return cleaned_options
def get_ranges(start, end, parts):
step = (end+1-start) / parts
for i in range(parts):
yield (int(start + step * i), int(start + step * (i+1)))
if 'NOGIT' in VERSION:
print('WARNING: Running from source, but without git, this is highly discouraged')
# use command line parameters
cmd_line_args = OrderedDict()
for arg in sys.argv[1:]:
arg_parts = arg.split("=", 1)
option_name = arg_parts[0]
assert option_name.startswith('--')
if len(arg_parts) == 1:
cmd_line_args[option_name[2:]] = 'true'
else:
cmd_line_args[option_name[2:]] = arg_parts[1]
bulk_mode = False
if cmd_line_args.pop('bulk', False):
bulk_mode = True
bulk_low = int(cmd_line_args.pop('low', '1'))
bulk_high = int(cmd_line_args.pop('high', '100'))
if bulk_high < bulk_low:
print('high has to be higher than low!')
exit(1)
bulk_threads = int(cmd_line_args.pop('threads', '1'))
options = process_command_line_options(cmd_line_args)
if options is not None:
if bulk_mode:
from multiprocessing import Process
options.set_option('dry-run', True)
def randothread(start, end, local_opts):
for i in range(start, end):
local_opts.set_option('seed', i)
rando = Randomizer(local_opts)
rando.randomize()
threads = []
for (start, end) in get_ranges(bulk_low, bulk_high, bulk_threads):
thread = Process(target=randothread, args=(start, end, options.copy()))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
elif options['noui']:
rando = Randomizer(options)
if not options['dry-run']:
rando.check_valid_directory_setup()
total_progress_steps = rando.get_total_progress_steps()
progress_steps=0
def progress_callback(action):
global progress_steps
print(f'{action} {progress_steps}/{total_progress_steps}')
progress_steps+=1
rando.progress_callback = progress_callback
rando.randomize()
print(f'SEED HASH: {rando.randomizer_hash}')
else:
from gui.randogui import run_main_gui
run_main_gui(options)
|
__init__.py
|
# pylint: disable=too-many-lines
# (Yes, it has a point!)
from __future__ import division, absolute_import, print_function
__copyright__ = "Copyright (C) 2009-2013 Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import operator
import sys
import logging
from functools import reduce
import six
from six.moves import range, zip, intern, input
decorator_module = __import__("decorator", level=0)
my_decorator = decorator_module.decorator
__doc__ = """
A Collection of Utilities
=========================
Math
----
.. autofunction:: levi_civita
.. autofunction:: perm
.. autofunction:: comb
Assertive accessors
-------------------
.. autofunction:: one
.. autofunction:: is_single_valued
.. autofunction:: all_roughly_equal
.. autofunction:: single_valued
Memoization
-----------
.. autofunction:: memoize
.. autofunction:: memoize_on_first_arg
.. autofunction:: memoize_method
.. autofunction:: memoize_method_with_uncached
.. autofunction:: memoize_in
Argmin/max
----------
.. autofunction:: argmin2
.. autofunction:: argmax2
.. autofunction:: argmin
.. autofunction:: argmax
Cartesian products
------------------
.. autofunction:: cartesian_product
.. autofunction:: distinct_pairs
Permutations, Tuples, Integer sequences
---------------------------------------
.. autofunction:: wandering_element
.. autofunction:: indices_in_shape
.. autofunction:: generate_nonnegative_integer_tuples_below
.. autofunction:: generate_nonnegative_integer_tuples_summing_to_at_most
.. autofunction:: generate_all_nonnegative_integer_tuples
.. autofunction:: generate_all_integer_tuples_below
.. autofunction:: generate_all_integer_tuples
.. autofunction:: generate_permutations
.. autofunction:: generate_unique_permutations
Graph Algorithms
----------------
.. autofunction:: a_star
Formatting
----------
.. autoclass:: Table
.. autofunction:: string_histogram
.. autofunction:: word_wrap
Debugging
---------
.. autofunction:: typedump
.. autofunction:: invoke_editor
Progress bars
-------------
.. autoclass:: ProgressBar
Name generation
---------------
.. autofunction:: generate_unique_names
.. autofunction:: generate_numbered_unique_names
.. autoclass:: UniqueNameGenerator
Functions for dealing with (large) auxiliary files
--------------------------------------------------
.. autofunction:: download_from_web_if_not_present
Helpers for :mod:`numpy`
------------------------
.. autofunction:: reshaped_view
Timing data
-----------
.. data:: SUPPORTS_PROCESS_TIME
A :class:`bool` indicating whether :class:`ProcessTimer` measures elapsed
process time (available on Python 3.3+).
.. autoclass:: ProcessTimer
Log utilities
-------------
.. autoclass:: ProcessLogger
.. autoclass:: DebugProcessLogger
.. autoclass:: log_process
"""
# {{{ math --------------------------------------------------------------------
def delta(x, y):
if x == y:
return 1
else:
return 0
def levi_civita(tup):
"""Compute an entry of the Levi-Civita tensor for the indices *tuple*."""
if len(tup) == 2:
i, j = tup
return j-i
if len(tup) == 3:
i, j, k = tup
return (j-i)*(k-i)*(k-j)/2
else:
raise NotImplementedError
def factorial(n):
from operator import mul
assert n == int(n)
return reduce(mul, (i for i in range(1, n+1)), 1)
def perm(n, k):
"""Return P(n, k), the number of permutations of length k drawn from n
choices.
"""
result = 1
assert k > 0
while k:
result *= n
n -= 1
k -= 1
return result
def comb(n, k):
"""Return C(n, k), the number of combinations (subsets)
of length k drawn from n choices.
"""
return perm(n, k)//factorial(k)
def norm_1(iterable):
return sum(abs(x) for x in iterable)
def norm_2(iterable):
return sum(x**2 for x in iterable)**0.5
def norm_inf(iterable):
return max(abs(x) for x in iterable)
def norm_p(iterable, p):
return sum(i**p for i in iterable)**(1/p)
class Norm(object):
def __init__(self, p):
self.p = p
def __call__(self, iterable):
return sum(i**self.p for i in iterable)**(1/self.p)
# }}}
# {{{ data structures
# {{{ record
class RecordWithoutPickling(object):
"""An aggregate of named sub-variables. Assumes that each record sub-type
will be individually derived from this class.
"""
__slots__ = []
def __init__(self, valuedict=None, exclude=None, **kwargs):
assert self.__class__ is not Record
if exclude is None:
exclude = ["self"]
try:
fields = self.__class__.fields
except AttributeError:
self.__class__.fields = fields = set()
if valuedict is not None:
kwargs.update(valuedict)
for key, value in six.iteritems(kwargs):
if key not in exclude:
fields.add(key)
setattr(self, key, value)
def get_copy_kwargs(self, **kwargs):
for f in self.__class__.fields:
if f not in kwargs:
try:
kwargs[f] = getattr(self, f)
except AttributeError:
pass
return kwargs
def copy(self, **kwargs):
return self.__class__(**self.get_copy_kwargs(**kwargs))
def __repr__(self):
return "%s(%s)" % (
self.__class__.__name__,
", ".join("%s=%r" % (fld, getattr(self, fld))
for fld in self.__class__.fields
if hasattr(self, fld)))
def register_fields(self, new_fields):
try:
fields = self.__class__.fields
except AttributeError:
self.__class__.fields = fields = set()
fields.update(new_fields)
def __getattr__(self, name):
# This method is implemented to avoid pylint 'no-member' errors for
# attribute access.
raise AttributeError(
"'%s' object has no attribute '%s'" % (
self.__class__.__name__, name))
class Record(RecordWithoutPickling):
__slots__ = []
def __getstate__(self):
return dict(
(key, getattr(self, key))
for key in self.__class__.fields
if hasattr(self, key))
def __setstate__(self, valuedict):
try:
fields = self.__class__.fields
except AttributeError:
self.__class__.fields = fields = set()
for key, value in six.iteritems(valuedict):
fields.add(key)
setattr(self, key, value)
def __eq__(self, other):
return (self.__class__ == other.__class__
and self.__getstate__() == other.__getstate__())
def __ne__(self, other):
return not self.__eq__(other)
class ImmutableRecordWithoutPickling(RecordWithoutPickling):
"Hashable record. Does not explicitly enforce immutability."
def __init__(self, *args, **kwargs):
RecordWithoutPickling.__init__(self, *args, **kwargs)
self._cached_hash = None
def __hash__(self):
if self._cached_hash is None:
self._cached_hash = hash(
(type(self),) + tuple(getattr(self, field)
for field in self.__class__.fields))
return self._cached_hash
class ImmutableRecord(ImmutableRecordWithoutPickling, Record):
pass
# }}}
class Reference(object):
def __init__(self, value):
self.value = value
def get(self):
from warnings import warn
warn("Reference.get() is deprecated -- use ref.value instead")
return self.value
def set(self, value):
self.value = value
class FakeList(object):
def __init__(self, f, length):
self._Length = length
self._Function = f
def __len__(self):
return self._Length
def __getitem__(self, index):
try:
return [self._Function(i)
for i in range(*index.indices(self._Length))]
except AttributeError:
return self._Function(index)
# {{{ dependent dictionary ----------------------------------------------------
class DependentDictionary(object):
def __init__(self, f, start=None):
if start is None:
start = {}
self._Function = f
self._Dictionary = start.copy()
def copy(self):
return DependentDictionary(self._Function, self._Dictionary)
def __contains__(self, key):
try:
self[key] # pylint: disable=pointless-statement
return True
except KeyError:
return False
def __getitem__(self, key):
try:
return self._Dictionary[key]
except KeyError:
return self._Function(self._Dictionary, key)
def __setitem__(self, key, value):
self._Dictionary[key] = value
def genuineKeys(self): # noqa
return list(self._Dictionary.keys())
def iteritems(self):
return six.iteritems(self._Dictionary)
def iterkeys(self):
return six.iterkeys(self._Dictionary)
def itervalues(self):
return six.itervalues(self._Dictionary)
# }}}
# }}}
# {{{ assertive accessors
def one(iterable):
"""Return the first entry of *iterable*. Assert that *iterable* has only
that one entry.
"""
it = iter(iterable)
try:
v = next(it)
except StopIteration:
raise ValueError("empty iterable passed to 'one()'")
def no_more():
try:
next(it)
raise ValueError("iterable with more than one entry passed to 'one()'")
except StopIteration:
return True
assert no_more()
return v
def is_single_valued(iterable, equality_pred=operator.eq):
it = iter(iterable)
try:
first_item = next(it)
except StopIteration:
raise ValueError("empty iterable passed to 'single_valued()'")
for other_item in it:
if not equality_pred(other_item, first_item):
return False
return True
all_equal = is_single_valued
def all_roughly_equal(iterable, threshold):
return is_single_valued(iterable,
equality_pred=lambda a, b: abs(a-b) < threshold)
def single_valued(iterable, equality_pred=operator.eq):
"""Return the first entry of *iterable*; Assert that other entries
are the same with the first entry of *iterable*.
"""
it = iter(iterable)
try:
first_item = next(it)
except StopIteration:
raise ValueError("empty iterable passed to 'single_valued()'")
def others_same():
for other_item in it:
if not equality_pred(other_item, first_item):
return False
return True
assert others_same()
return first_item
# }}}
# {{{ memoization / attribute storage
def memoize(*args, **kwargs):
"""Stores previously computed function values in a cache.
Two keyword-only arguments are supported:
:arg use_kwargs: Allows the caller to use keyword arguments. Defaults to
``False``. Setting this to ``True`` has a non-negligible performance
impact.
:arg key: A function receiving the same arguments as the decorated function
which computes and returns the cache key.
"""
use_kw = bool(kwargs.pop('use_kwargs', False))
if use_kw:
def default_key_func(*inner_args, **inner_kwargs):
return inner_args, frozenset(six.iteritems(inner_kwargs))
else:
default_key_func = None
key_func = kwargs.pop("key", default_key_func)
if kwargs:
raise TypeError(
"memoize received unexpected keyword arguments: %s"
% ", ".join(list(kwargs.keys())))
if key_func is not None:
@my_decorator
def _deco(func, *args, **kwargs):
# by Michele Simionato
# http://www.phyast.pitt.edu/~micheles/python/
key = key_func(*args, **kwargs)
try:
return func._memoize_dic[key] # pylint: disable=protected-access
except AttributeError:
# _memoize_dic doesn't exist yet.
result = func(*args, **kwargs)
func._memoize_dic = {key: result} # pylint: disable=protected-access
return result
except KeyError:
result = func(*args, **kwargs)
func._memoize_dic[key] = result # pylint: disable=protected-access
return result
else:
@my_decorator
def _deco(func, *args):
# by Michele Simionato
# http://www.phyast.pitt.edu/~micheles/python/
try:
return func._memoize_dic[args] # pylint: disable=protected-access
except AttributeError:
# _memoize_dic doesn't exist yet.
result = func(*args)
func._memoize_dic = {args: result} # pylint:disable=protected-access
return result
except KeyError:
result = func(*args)
func._memoize_dic[args] = result # pylint: disable=protected-access
return result
if not args:
return _deco
if callable(args[0]) and len(args) == 1:
return _deco(args[0])
raise TypeError(
"memoize received unexpected position arguments: %s" % args)
FunctionValueCache = memoize
class _HasKwargs(object):
pass
def memoize_on_first_arg(function, cache_dict_name=None):
"""Like :func:`memoize_method`, but for functions that take the object
to do memoization as first argument.
Supports cache deletion via ``function_name.clear_cache(self)``.
.. note::
*clear_cache* support requires Python 2.5 or newer.
"""
if cache_dict_name is None:
cache_dict_name = intern("_memoize_dic_"
+ function.__module__ + function.__name__)
def wrapper(obj, *args, **kwargs):
if kwargs:
key = (_HasKwargs, frozenset(six.iteritems(kwargs))) + args
else:
key = args
try:
return getattr(obj, cache_dict_name)[key]
except AttributeError:
result = function(obj, *args, **kwargs)
setattr(obj, cache_dict_name, {key: result})
return result
except KeyError:
result = function(obj, *args, **kwargs)
getattr(obj, cache_dict_name)[key] = result
return result
def clear_cache(obj):
delattr(obj, cache_dict_name)
from functools import update_wrapper
new_wrapper = update_wrapper(wrapper, function)
new_wrapper.clear_cache = clear_cache
return new_wrapper
def memoize_method(method):
"""Supports cache deletion via ``method_name.clear_cache(self)``.
.. note::
*clear_cache* support requires Python 2.5 or newer.
"""
return memoize_on_first_arg(method, intern("_memoize_dic_"+method.__name__))
def memoize_method_with_uncached(uncached_args=None, uncached_kwargs=None):
"""Supports cache deletion via ``method_name.clear_cache(self)``.
:arg uncached_args: a list of argument numbers
(0-based, not counting 'self' argument)
"""
if uncached_args is None:
uncached_args = []
if uncached_kwargs is None:
uncached_kwargs = set()
# delete starting from the end
uncached_args = sorted(uncached_args, reverse=True)
uncached_kwargs = list(uncached_kwargs)
def parametrized_decorator(method):
cache_dict_name = intern("_memoize_dic_"+method.__name__)
def wrapper(self, *args, **kwargs):
cache_args = list(args)
cache_kwargs = kwargs.copy()
for i in uncached_args:
if i < len(cache_args):
cache_args.pop(i)
cache_args = tuple(cache_args)
if kwargs:
for name in uncached_kwargs:
cache_kwargs.pop(name, None)
key = (
(_HasKwargs, frozenset(six.iteritems(cache_kwargs)))
+ cache_args)
else:
key = cache_args
try:
return getattr(self, cache_dict_name)[key]
except AttributeError:
result = method(self, *args, **kwargs)
setattr(self, cache_dict_name, {key: result})
return result
except KeyError:
result = method(self, *args, **kwargs)
getattr(self, cache_dict_name)[key] = result
return result
def clear_cache(self):
delattr(self, cache_dict_name)
if sys.version_info >= (2, 5):
from functools import update_wrapper
new_wrapper = update_wrapper(wrapper, method)
new_wrapper.clear_cache = clear_cache
return new_wrapper
return parametrized_decorator
def memoize_method_nested(inner):
"""Adds a cache to a function nested inside a method. The cache is attached
to *memoize_cache_context* (if it exists) or *self* in the outer (method)
namespace.
Requires Python 2.5 or newer.
"""
from warnings import warn
warn("memoize_method_nested is deprecated. Use @memoize_in(self, 'identifier') "
"instead", DeprecationWarning, stacklevel=2)
from functools import wraps
cache_dict_name = intern("_memoize_inner_dic_%s_%s_%d"
% (inner.__name__, inner.__code__.co_filename,
inner.__code__.co_firstlineno))
from inspect import currentframe
outer_frame = currentframe().f_back
cache_context = outer_frame.f_locals.get("memoize_cache_context")
if cache_context is None:
cache_context = outer_frame.f_locals.get("self")
try:
cache_dict = getattr(cache_context, cache_dict_name)
except AttributeError:
cache_dict = {}
setattr(cache_context, cache_dict_name, cache_dict)
@wraps(inner)
def new_inner(*args):
try:
return cache_dict[args]
except KeyError:
result = inner(*args)
cache_dict[args] = result
return result
return new_inner
class memoize_in(object): # noqa
"""Adds a cache to a function nested inside a method. The cache is attached
to *object*.
Requires Python 2.5 or newer.
"""
def __init__(self, container, identifier):
key = "_pytools_memoize_in_dict_for_"+identifier
try:
self.cache_dict = getattr(container, key)
except AttributeError:
self.cache_dict = {}
setattr(container, key, self.cache_dict)
def __call__(self, inner):
from functools import wraps
@wraps(inner)
def new_inner(*args):
try:
return self.cache_dict[args]
except KeyError:
result = inner(*args)
self.cache_dict[args] = result
return result
return new_inner
# }}}
# {{{ syntactical sugar
class InfixOperator:
"""Pseudo-infix operators that allow syntax of the kind `op1 <<operator>> op2'.
Following a recipe from
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/384122
"""
def __init__(self, function):
self.function = function
def __rlshift__(self, other):
return InfixOperator(lambda x: self.function(other, x))
def __rshift__(self, other):
return self.function(other)
def call(self, a, b):
return self.function(a, b)
def monkeypatch_method(cls):
# from GvR, http://mail.python.org/pipermail/python-dev/2008-January/076194.html
def decorator(func):
setattr(cls, func.__name__, func)
return func
return decorator
def monkeypatch_class(_name, bases, namespace):
# from GvR, http://mail.python.org/pipermail/python-dev/2008-January/076194.html
assert len(bases) == 1, "Exactly one base class required"
base = bases[0]
for name, value in six.iteritems(namespace):
if name != "__metaclass__":
setattr(base, name, value)
return base
# }}}
# {{{ generic utilities
def add_tuples(t1, t2):
return tuple([t1v + t2v for t1v, t2v in zip(t1, t2)])
def negate_tuple(t1):
return tuple([-t1v for t1v in t1])
def shift(vec, dist):
"""Return a copy of C{vec} shifted by C{dist}.
@postcondition: C{shift(a, i)[j] == a[(i+j) % len(a)]}
"""
result = vec[:]
N = len(vec) # noqa
dist = dist % N
# modulo only returns positive distances!
if dist > 0:
result[dist:] = vec[:N-dist]
result[:dist] = vec[N-dist:]
return result
def len_iterable(iterable):
return sum(1 for i in iterable)
def flatten(iterable):
"""For an iterable of sub-iterables, generate each member of each
sub-iterable in turn, i.e. a flattened version of that super-iterable.
Example: Turn [[a,b,c],[d,e,f]] into [a,b,c,d,e,f].
"""
for sublist in iterable:
for j in sublist:
yield j
def general_sum(sequence):
return reduce(operator.add, sequence)
def linear_combination(coefficients, vectors):
result = coefficients[0] * vectors[0]
for c, v in zip(coefficients[1:], vectors[1:]):
result += c*v
return result
def common_prefix(iterable, empty=None):
it = iter(iterable)
try:
pfx = next(it)
except StopIteration:
return empty
for v in it:
for j, pfx_j in enumerate(pfx):
if pfx_j != v[j]:
pfx = pfx[:j]
if j == 0:
return pfx
break
return pfx
def decorate(function, iterable):
return [(x, function(x)) for x in iterable]
def partition(criterion, iterable):
part_true = []
part_false = []
for i in iterable:
if criterion(i):
part_true.append(i)
else:
part_false.append(i)
return part_true, part_false
def partition2(iterable):
part_true = []
part_false = []
for pred, i in iterable:
if pred:
part_true.append(i)
else:
part_false.append(i)
return part_true, part_false
def product(iterable):
from operator import mul
return reduce(mul, iterable, 1)
all = six.moves.builtins.all # pylint: disable=redefined-builtin
any = six.moves.builtins.any # pylint: disable=redefined-builtin
def reverse_dictionary(the_dict):
result = {}
for key, value in six.iteritems(the_dict):
if value in result:
raise RuntimeError(
"non-reversible mapping, duplicate key '%s'" % value)
result[value] = key
return result
def set_sum(set_iterable):
from operator import or_
return reduce(or_, set_iterable, set())
def div_ceil(nr, dr):
return -(-nr // dr)
def uniform_interval_splitting(n, granularity, max_intervals):
""" Return *(interval_size, num_intervals)* such that::
num_intervals * interval_size >= n
and::
(num_intervals - 1) * interval_size < n
and *interval_size* is a multiple of *granularity*.
"""
# ported from Thrust
grains = div_ceil(n, granularity)
# one grain per interval
if grains <= max_intervals:
return granularity, grains
grains_per_interval = div_ceil(grains, max_intervals)
interval_size = grains_per_interval * granularity
num_intervals = div_ceil(n, interval_size)
return interval_size, num_intervals
def find_max_where(predicate, prec=1e-5, initial_guess=1, fail_bound=1e38):
"""Find the largest value for which a predicate is true,
along a half-line. 0 is assumed to be the lower bound."""
# {{{ establish bracket
mag = initial_guess
if predicate(mag):
mag *= 2
while predicate(mag):
mag *= 2
if mag > fail_bound:
raise RuntimeError("predicate appears to be true "
"everywhere, up to %g" % fail_bound)
lower_true = mag/2
upper_false = mag
else:
mag /= 2
while not predicate(mag):
mag /= 2
if mag < prec:
return mag
lower_true = mag
upper_false = mag*2
# }}}
# {{{ refine
# Refine a bracket between *lower_true*, where the predicate is true,
# and *upper_false*, where it is false, until *prec* is satisfied.
assert predicate(lower_true)
assert not predicate(upper_false)
while abs(lower_true-upper_false) > prec:
mid = (lower_true+upper_false)/2
if predicate(mid):
lower_true = mid
else:
upper_false = mid
return lower_true
# }}}
# }}}
# {{{ argmin, argmax
def argmin2(iterable, return_value=False):
it = iter(iterable)
try:
current_argmin, current_min = next(it)
except StopIteration:
raise ValueError("argmin of empty iterable")
for arg, item in it:
if item < current_min:
current_argmin = arg
current_min = item
if return_value:
return current_argmin, current_min
else:
return current_argmin
def argmax2(iterable, return_value=False):
it = iter(iterable)
try:
current_argmax, current_max = next(it)
except StopIteration:
raise ValueError("argmax of empty iterable")
for arg, item in it:
if item > current_max:
current_argmax = arg
current_max = item
if return_value:
return current_argmax, current_max
else:
return current_argmax
def argmin(iterable):
return argmin2(enumerate(iterable))
def argmax(iterable):
return argmax2(enumerate(iterable))
# }}}
# {{{ cartesian products etc.
def cartesian_product(list1, list2):
for i in list1:
for j in list2:
yield (i, j)
def distinct_pairs(list1, list2):
for i, xi in enumerate(list1):
for j, yj in enumerate(list2):
if i != j:
yield (xi, yj)
def cartesian_product_sum(list1, list2):
"""This routine returns a list of sums of each element of
list1 with each element of list2. Also works with lists.
"""
for i in list1:
for j in list2:
yield i+j
# }}}
# {{{ elementary statistics
def average(iterable):
"""Return the average of the values in iterable.
iterable may not be empty.
"""
it = iterable.__iter__()
try:
s = next(it)
count = 1
except StopIteration:
raise ValueError("empty average")
for value in it:
s = s + value
count += 1
return s/count
class VarianceAggregator:
"""Online variance calculator.
See http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
Adheres to pysqlite's aggregate interface.
"""
def __init__(self, entire_pop):
self.n = 0
self.mean = 0
self.m2 = 0
self.entire_pop = entire_pop
def step(self, x):
self.n += 1
delta_ = x - self.mean
self.mean += delta_/self.n
self.m2 += delta_*(x - self.mean)
def finalize(self):
if self.entire_pop:
if self.n == 0:
return None
else:
return self.m2/self.n
else:
if self.n <= 1:
return None
else:
return self.m2/(self.n - 1)
def variance(iterable, entire_pop):
v_comp = VarianceAggregator(entire_pop)
for x in iterable:
v_comp.step(x)
return v_comp.finalize()
def std_deviation(iterable, finite_pop):
from math import sqrt
return sqrt(variance(iterable, finite_pop))
# }}}
# {{{ permutations, tuples, integer sequences
def wandering_element(length, wanderer=1, landscape=0):
for i in range(length):
yield i*(landscape,) + (wanderer,) + (length-1-i)*(landscape,)
def indices_in_shape(shape):
if isinstance(shape, int):
shape = (shape,)
if not shape:
yield ()
elif len(shape) == 1:
for i in range(0, shape[0]):
yield (i,)
else:
remainder = shape[1:]
for i in range(0, shape[0]):
for rest in indices_in_shape(remainder):
yield (i,)+rest
def generate_nonnegative_integer_tuples_below(n, length=None, least=0):
"""n may be a sequence, in which case length must be None."""
if length is None:
if not n:
yield ()
return
my_n = n[0]
n = n[1:]
next_length = None
else:
my_n = n
assert length >= 0
if length == 0:
yield ()
return
next_length = length-1
for i in range(least, my_n):
my_part = (i,)
for base in generate_nonnegative_integer_tuples_below(n, next_length, least):
yield my_part + base
def generate_decreasing_nonnegative_tuples_summing_to(
n, length, min_value=0, max_value=None):
if length == 0:
yield ()
elif length == 1:
if n <= max_value:
#print "MX", n, max_value
yield (n,)
else:
return
else:
if max_value is None or n < max_value:
max_value = n
for i in range(min_value, max_value+1):
#print "SIG", sig, i
for remainder in generate_decreasing_nonnegative_tuples_summing_to(
n-i, length-1, min_value, i):
yield (i,) + remainder
def generate_nonnegative_integer_tuples_summing_to_at_most(n, length):
"""Enumerate all non-negative integer tuples summing to at most n,
exhausting the search space by varying the first entry fastest,
and the last entry the slowest.
"""
assert length >= 0
if length == 0:
yield ()
else:
for i in range(n+1):
for remainder in generate_nonnegative_integer_tuples_summing_to_at_most(
n-i, length-1):
yield remainder + (i,)
def generate_all_nonnegative_integer_tuples(length, least=0):
assert length >= 0
current_max = least
while True:
for max_pos in range(length):
for prebase in generate_nonnegative_integer_tuples_below(
current_max, max_pos, least):
for postbase in generate_nonnegative_integer_tuples_below(
current_max+1, length-max_pos-1, least):
yield prebase + [current_max] + postbase
current_max += 1
# backwards compatibility
generate_positive_integer_tuples_below = generate_nonnegative_integer_tuples_below
generate_all_positive_integer_tuples = generate_all_nonnegative_integer_tuples
def _pos_and_neg_adaptor(tuple_iter):
for tup in tuple_iter:
nonzero_indices = [i for i in range(len(tup)) if tup[i] != 0]
for do_neg_tup in generate_nonnegative_integer_tuples_below(
2, len(nonzero_indices)):
this_result = list(tup)
for index, do_neg in enumerate(do_neg_tup):
if do_neg:
this_result[nonzero_indices[index]] *= -1
yield tuple(this_result)
def generate_all_integer_tuples_below(n, length, least_abs=0):
return _pos_and_neg_adaptor(generate_nonnegative_integer_tuples_below(
n, length, least_abs))
def generate_all_integer_tuples(length, least_abs=0):
return _pos_and_neg_adaptor(generate_all_nonnegative_integer_tuples(
length, least_abs))
def generate_permutations(original):
"""Generate all permutations of the list *original*.
Nicked from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/252178
"""
if len(original) <= 1:
yield original
else:
for perm_ in generate_permutations(original[1:]):
for i in range(len(perm_)+1):
#nb str[0:1] works in both string and list contexts
yield perm_[:i] + original[0:1] + perm_[i:]
def generate_unique_permutations(original):
"""Generate all unique permutations of the list *original*.
"""
had_those = set()
for perm_ in generate_permutations(original):
if perm_ not in had_those:
had_those.add(perm_)
yield perm_
def enumerate_basic_directions(dimensions):
coordinate_list = [[0], [1], [-1]]
return reduce(cartesian_product_sum, [coordinate_list] * dimensions)[1:]
# }}}
# {{{ index mangling
def get_read_from_map_from_permutation(original, permuted):
"""With a permutation given by C{original} and C{permuted},
generate a list C{rfm} of indices such that
C{permuted[i] == original[rfm[i]]}.
Requires that the permutation can be inferred from
C{original} and C{permuted}.
.. doctest ::
>>> for p1 in generate_permutations(range(5)):
... for p2 in generate_permutations(range(5)):
... rfm = get_read_from_map_from_permutation(p1, p2)
... p2a = [p1[rfm[i]] for i in range(len(p1))]
... assert p2 == p2a
"""
from warnings import warn
warn("get_read_from_map_from_permutation is deprecated and will be "
"removed in 2019", DeprecationWarning, stacklevel=2)
assert len(original) == len(permuted)
where_in_original = dict(
(original[i], i) for i in range(len(original)))
assert len(where_in_original) == len(original)
return tuple(where_in_original[pi] for pi in permuted)
def get_write_to_map_from_permutation(original, permuted):
"""With a permutation given by C{original} and C{permuted},
generate a list C{wtm} of indices such that
C{permuted[wtm[i]] == original[i]}.
Requires that the permutation can be inferred from
C{original} and C{permuted}.
.. doctest::
>>> for p1 in generate_permutations(range(5)):
... for p2 in generate_permutations(range(5)):
... wtm = get_write_to_map_from_permutation(p1, p2)
... p2a = [0] * len(p2)
... for i, oi in enumerate(p1):
... p2a[wtm[i]] = oi
... assert p2 == p2a
"""
from warnings import warn
warn("get_write_to_map_from_permutation is deprecated and will be "
"removed in 2019", DeprecationWarning, stacklevel=2)
assert len(original) == len(permuted)
where_in_permuted = dict(
(permuted[i], i) for i in range(len(permuted)))
assert len(where_in_permuted) == len(permuted)
return tuple(where_in_permuted[oi] for oi in original)
# }}}
# {{{ graph algorithms
def a_star( # pylint: disable=too-many-locals
initial_state, goal_state, neighbor_map,
estimate_remaining_cost=None,
get_step_cost=lambda x, y: 1
):
"""
With the default cost and heuristic, this amounts to Dijkstra's algorithm.
"""
from heapq import heappop, heappush
if estimate_remaining_cost is None:
def estimate_remaining_cost(x): # pylint: disable=function-redefined
if x != goal_state:
return 1
else:
return 0
class AStarNode(object):
__slots__ = ["state", "parent", "path_cost"]
def __init__(self, state, parent, path_cost):
self.state = state
self.parent = parent
self.path_cost = path_cost
inf = float("inf")
init_remcost = estimate_remaining_cost(initial_state)
assert init_remcost != inf
queue = [(init_remcost, AStarNode(initial_state, parent=None, path_cost=0))]
visited_states = set()
while queue:
_, top = heappop(queue)
visited_states.add(top.state)
if top.state == goal_state:
result = []
it = top
while it is not None:
result.append(it.state)
it = it.parent
return result[::-1]
for state in neighbor_map[top.state]:
if state in visited_states:
continue
remaining_cost = estimate_remaining_cost(state)
if remaining_cost == inf:
continue
step_cost = get_step_cost(top, state)
estimated_path_cost = top.path_cost+step_cost+remaining_cost
heappush(queue,
(estimated_path_cost,
AStarNode(state, top, path_cost=top.path_cost + step_cost)))
raise RuntimeError("no solution")
# }}}
# {{{ formatting
# {{{ table formatting
class Table:
"""An ASCII table generator.
.. automethod:: add_row
.. automethod:: __str__
.. automethod:: latex
"""
def __init__(self):
self.rows = []
def add_row(self, row):
self.rows.append([str(i) for i in row])
def __str__(self):
columns = len(self.rows[0])
col_widths = [max(len(row[i]) for row in self.rows)
for i in range(columns)]
lines = [" | ".join([cell.ljust(col_width)
for cell, col_width in zip(row, col_widths)])
for row in self.rows]
lines[1:1] = ["+".join("-" * (col_width + 1 + (i > 0))
for i, col_width in enumerate(col_widths))]
return "\n".join(lines)
def latex(self, skip_lines=0, hline_after=None):
if hline_after is None:
hline_after = []
lines = []
for row_nr, row in list(enumerate(self.rows))[skip_lines:]:
lines.append(" & ".join(row)+r" \\")
if row_nr in hline_after:
lines.append(r"\hline")
return "\n".join(lines)
# }}}
# {{{ histogram formatting
def string_histogram( # pylint: disable=too-many-arguments,too-many-locals
iterable, min_value=None, max_value=None,
bin_count=20, width=70, bin_starts=None, use_unicode=True):
if bin_starts is None:
if min_value is None or max_value is None:
iterable = list(iterable)
min_value = min(iterable)
max_value = max(iterable)
bin_width = (max_value - min_value)/bin_count
bin_starts = [min_value+bin_width*i for i in range(bin_count)]
bins = [0 for i in range(len(bin_starts))]
from bisect import bisect
for value in iterable:
if max_value is not None and value > max_value or value < bin_starts[0]:
from warnings import warn
warn("string_histogram: out-of-bounds value ignored")
else:
bin_nr = bisect(bin_starts, value)-1
try:
bins[bin_nr] += 1
except Exception:
print(value, bin_nr, bin_starts)
raise
from math import floor, ceil
if use_unicode:
def format_bar(cnt):
scaled = cnt*width/max_count
full = int(floor(scaled))
eighths = int(ceil((scaled-full)*8))
if eighths:
return full*six.unichr(0x2588) + six.unichr(0x2588+(8-eighths))
else:
return full*six.unichr(0x2588)
else:
def format_bar(cnt):
return int(ceil(cnt*width/max_count))*"#"
max_count = max(bins)
total_count = sum(bins)
return "\n".join("%9g |%9d | %3.0f %% | %s" % (
bin_start,
bin_value,
bin_value/total_count*100,
format_bar(bin_value))
for bin_start, bin_value in zip(bin_starts, bins))
# }}}
def word_wrap(text, width, wrap_using="\n"):
# http://code.activestate.com/recipes/148061-one-liner-word-wrap-function/
r"""
A word-wrap function that preserves existing line breaks
and most spaces in the text. Expects that existing line
breaks are posix newlines (``\n``).
"""
space_or_break = [" ", wrap_using]
return reduce(lambda line, word: '%s%s%s' %
(line,
space_or_break[(len(line)-line.rfind('\n')-1
+ len(word.split('\n', 1)[0])
>= width)],
word),
text.split(' ')
)
# }}}
# {{{ command line interfaces -------------------------------------------------
def _exec_arg(arg, execenv):
import os
if os.access(arg, os.F_OK):
exec(compile(open(arg, "r"), arg, 'exec'), execenv)
else:
exec(compile(arg, "<command line>", 'exec'), execenv)
class CPyUserInterface(object):
class Parameters(Record):
pass
def __init__(self, variables, constants=None, doc=None):
if constants is None:
constants = {}
if doc is None:
doc = {}
self.variables = variables
self.constants = constants
self.doc = doc
def show_usage(self, progname):
print("usage: %s <FILE-OR-STATEMENTS>" % progname)
print()
print("FILE-OR-STATEMENTS may either be Python statements of the form")
print("'variable1 = value1; variable2 = value2' or the name of a file")
print("containing such statements. Any valid Python code may be used")
print("on the command line or in a command file. If new variables are")
print("used, they must start with 'user_' or just '_'.")
print()
print("The following variables are recognized:")
for v in sorted(self.variables):
print(" %s = %s" % (v, self.variables[v]))
if v in self.doc:
print(" %s" % self.doc[v])
print()
print("The following constants are supplied:")
for c in sorted(self.constants):
print(" %s = %s" % (c, self.constants[c]))
if c in self.doc:
print(" %s" % self.doc[c])
def gather(self, argv=None):
if argv is None:
argv = sys.argv
if len(argv) == 1 or (
("-h" in argv)
or ("help" in argv)
or ("-help" in argv)
or ("--help" in argv)):
self.show_usage(argv[0])
sys.exit(2)
execenv = self.variables.copy()
execenv.update(self.constants)
for arg in argv[1:]:
_exec_arg(arg, execenv)
# check if the user set invalid keys
for added_key in (
set(execenv.keys())
- set(self.variables.keys())
- set(self.constants.keys())):
if not (added_key.startswith("user_") or added_key.startswith("_")):
raise ValueError(
"invalid setup key: '%s' "
"(user variables must start with 'user_' or '_')"
% added_key)
result = self.Parameters(dict((key, execenv[key]) for key in self.variables))
self.validate(result)
return result
def validate(self, setup):
pass
# }}}
# {{{ code maintenance
class MovedFunctionDeprecationWrapper:
def __init__(self, f):
self.f = f
def __call__(self, *args, **kwargs):
from warnings import warn
warn("This function is deprecated. Use %s.%s instead." % (
self.f.__module__, self.f.__name__),
DeprecationWarning, stacklevel=2)
return self.f(*args, **kwargs)
# }}}
# {{{ debugging
class StderrToStdout(object):
def __enter__(self):
# pylint: disable=attribute-defined-outside-init
self.stderr_backup = sys.stderr
sys.stderr = sys.stdout
def __exit__(self, exc_type, exc_val, exc_tb):
sys.stderr = self.stderr_backup
del self.stderr_backup
def typedump(val, max_seq=5, special_handlers=None):
if special_handlers is None:
special_handlers = {}
try:
hdlr = special_handlers[type(val)]
except KeyError:
pass
else:
return hdlr(val)
try:
len(val)
except TypeError:
return type(val).__name__
else:
if isinstance(val, dict):
return "{%s}" % (
", ".join(
"%r: %s" % (str(k), typedump(v))
for k, v in six.iteritems(val)))
try:
if len(val) > max_seq:
return "%s(%s,...)" % (
type(val).__name__,
",".join(typedump(x, max_seq, special_handlers)
for x in val[:max_seq]))
else:
return "%s(%s)" % (
type(val).__name__,
",".join(typedump(x, max_seq, special_handlers)
for x in val))
except TypeError:
return val.__class__.__name__
def invoke_editor(s, filename="edit.txt", descr="the file"):
from tempfile import mkdtemp
tempdir = mkdtemp()
from os.path import join
full_name = join(tempdir, filename)
outf = open(full_name, "w")
outf.write(str(s))
outf.close()
import os
if "EDITOR" in os.environ:
from subprocess import Popen
p = Popen([os.environ["EDITOR"], full_name])
os.waitpid(p.pid, 0)
else:
print("(Set the EDITOR environment variable to be "
"dropped directly into an editor next time.)")
input("Edit %s at %s now, then hit [Enter]:"
% (descr, full_name))
inf = open(full_name, "r")
result = inf.read()
inf.close()
return result
# }}}
# {{{ progress bars
class ProgressBar(object): # pylint: disable=too-many-instance-attributes
"""
.. automethod:: draw
.. automethod:: progress
.. automethod:: set_progress
.. automethod:: finished
.. automethod:: __enter__
.. automethod:: __exit__
"""
def __init__(self, descr, total, initial=0, length=40):
import time
self.description = descr
self.total = total
self.done = initial
self.length = length
self.last_squares = -1
self.start_time = time.time()
self.last_update_time = self.start_time
self.speed_meas_start_time = self.start_time
self.speed_meas_start_done = initial
self.time_per_step = None
def draw(self):
import time
now = time.time()
squares = int(self.done/self.total*self.length)
if squares != self.last_squares or now-self.last_update_time > 0.5:
if (self.done != self.speed_meas_start_done
and now-self.speed_meas_start_time > 3):
new_time_per_step = (now-self.speed_meas_start_time) \
/ (self.done-self.speed_meas_start_done)
if self.time_per_step is not None:
self.time_per_step = (new_time_per_step + self.time_per_step)/2
else:
self.time_per_step = new_time_per_step
self.speed_meas_start_time = now
self.speed_meas_start_done = self.done
if self.time_per_step is not None:
eta_str = "%7.1fs " % max(
0, (self.total-self.done) * self.time_per_step)
else:
eta_str = "?"
sys.stderr.write("%-20s [%s] ETA %s\r" % (
self.description,
squares*"#"+(self.length-squares)*" ",
eta_str))
self.last_squares = squares
self.last_update_time = now
def progress(self, steps=1):
self.set_progress(self.done + steps)
def set_progress(self, done):
self.done = done
self.draw()
def finished(self):
self.set_progress(self.total)
sys.stderr.write("\n")
def __enter__(self):
self.draw()
def __exit__(self, exc_type, exc_val, exc_tb):
self.finished()
# }}}
# {{{ file system related
def assert_not_a_file(name):
import os
if os.access(name, os.F_OK):
raise IOError("file `%s' already exists" % name)
def add_python_path_relative_to_script(rel_path):
from os.path import dirname, join, abspath
script_name = sys.argv[0]
rel_script_dir = dirname(script_name)
sys.path.append(abspath(join(rel_script_dir, rel_path)))
# }}}
# {{{ numpy dtype mangling
def common_dtype(dtypes, default=None):
dtypes = list(dtypes)
if dtypes:
return argmax2((dtype, dtype.num) for dtype in dtypes)
else:
if default is not None:
return default
else:
raise ValueError(
"cannot find common dtype of empty dtype list")
def to_uncomplex_dtype(dtype):
import numpy
if dtype == numpy.complex64:
return numpy.float32
elif dtype == numpy.complex128:
return numpy.float64
if dtype == numpy.float32:
return numpy.float32
elif dtype == numpy.float64:
return numpy.float64
else:
raise TypeError("unrecgonized dtype '%s'" % dtype)
def match_precision(dtype, dtype_to_match):
import numpy
tgt_is_double = dtype_to_match in [
numpy.float64, numpy.complex128]
dtype_is_complex = dtype.kind == "c"
if dtype_is_complex:
if tgt_is_double:
return numpy.dtype(numpy.complex128)
else:
return numpy.dtype(numpy.complex64)
else:
if tgt_is_double:
return numpy.dtype(numpy.float64)
else:
return numpy.dtype(numpy.float32)
# }}}
# {{{ unique name generation
def generate_unique_names(prefix):
yield prefix
try_num = 0
while True:
yield "%s_%d" % (prefix, try_num)
try_num += 1
def generate_numbered_unique_names(prefix, num=None):
if num is None:
yield (0, prefix)
num = 0
while True:
name = "%s_%d" % (prefix, num)
num += 1
yield (num, name)
generate_unique_possibilities = MovedFunctionDeprecationWrapper(
generate_unique_names)
class UniqueNameGenerator(object):
"""
.. automethod:: is_name_conflicting
.. automethod:: add_name
.. automethod:: add_names
.. automethod:: __call__
"""
def __init__(self, existing_names=None, forced_prefix=""):
if existing_names is None:
existing_names = set()
self.existing_names = existing_names.copy()
self.forced_prefix = forced_prefix
self.prefix_to_counter = {}
def is_name_conflicting(self, name):
return name in self.existing_names
def _name_added(self, name):
"""Callback to alert subclasses when a name has been added.
.. note::
This will not get called for the names in the *existing_names*
argument to :meth:`__init__`.
"""
pass
def add_name(self, name):
if self.is_name_conflicting(name):
raise ValueError("name '%s' conflicts with existing names")
if not name.startswith(self.forced_prefix):
raise ValueError("name '%s' does not start with required prefix")
self.existing_names.add(name)
self._name_added(name)
def add_names(self, names):
for name in names:
self.add_name(name)
def __call__(self, based_on="id"):
based_on = self.forced_prefix + based_on
counter = self.prefix_to_counter.get(based_on, None)
for counter, var_name in generate_numbered_unique_names(based_on, counter):
if not self.is_name_conflicting(var_name):
break
self.prefix_to_counter[based_on] = counter
var_name = intern(var_name) # pylint: disable=undefined-loop-variable
self.existing_names.add(var_name)
self._name_added(var_name)
return var_name
# }}}
# {{{ recursion limit
class MinRecursionLimit(object):
def __init__(self, min_rec_limit):
self.min_rec_limit = min_rec_limit
def __enter__(self):
# pylint: disable=attribute-defined-outside-init
self.prev_recursion_limit = sys.getrecursionlimit()
new_limit = max(self.prev_recursion_limit, self.min_rec_limit)
sys.setrecursionlimit(new_limit)
def __exit__(self, exc_type, exc_val, exc_tb):
# Deep recursion can produce deeply nested data structures
# (or long chains of to-be gc'd generators) that cannot be
# undergo garbage collection with a lower recursion limit.
#
# As a result, it doesn't seem possible to lower the recursion limit
# again after it has been raised without causing reliability issues.
#
# See https://gitlab.tiker.net/inducer/sumpy/issues/31 for
# context.
pass
# }}}
# {{{ download from web if not present
def download_from_web_if_not_present(url, local_name=None):
"""
.. versionadded:: 2017.5
"""
from os.path import basename, exists
if local_name is None:
local_name = basename(url)
if not exists(local_name):
from six.moves.urllib.request import urlopen
with urlopen(url) as inf:
contents = inf.read()
with open(local_name, "wb") as outf:
outf.write(contents)
# }}}
# {{{ find git revisions
def find_git_revision(tree_root): # pylint: disable=too-many-locals
# Keep this routine self-contained so that it can be copy-pasted into
# setup.py.
from os.path import join, exists, abspath
tree_root = abspath(tree_root)
if not exists(join(tree_root, ".git")):
return None
# construct minimal environment
# stolen from
# https://github.com/numpy/numpy/blob/055ce3e90b50b5f9ef8cf1b8641c42e391f10735/setup.py#L70-L92
import os
env = {}
for k in ['SYSTEMROOT', 'PATH', 'HOME']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
from subprocess import Popen, PIPE, STDOUT
p = Popen(["git", "rev-parse", "HEAD"], shell=False,
stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True,
cwd=tree_root, env=env)
(git_rev, _) = p.communicate()
if sys.version_info >= (3,):
git_rev = git_rev.decode()
git_rev = git_rev.rstrip()
retcode = p.returncode
assert retcode is not None
if retcode != 0:
from warnings import warn
warn("unable to find git revision")
return None
return git_rev
def find_module_git_revision(module_file, n_levels_up):
from os.path import dirname, join
tree_root = join(*([dirname(module_file)] + [".." * n_levels_up]))
return find_git_revision(tree_root)
# }}}
# {{{ create a reshaped view of a numpy array
def reshaped_view(a, newshape):
""" Create a new view object with shape ``newshape`` without copying the data of
``a``. This function is different from ``numpy.reshape`` by raising an
exception when data copy is necessary.
:arg a: a :class:`numpy.ndarray` object.
:arg newshape: an ``int`` object or a tuple of ``int`` objects.
.. versionadded:: 2018.4
"""
newview = a.view()
newview.shape = newshape
return newview
# }}}
# {{{ process timer
SUPPORTS_PROCESS_TIME = (sys.version_info >= (3, 3))
class ProcessTimer(object):
"""Measures elapsed wall time and process time.
.. automethod:: __enter__
.. automethod:: __exit__
.. automethod:: done
Timing data attributes:
.. attribute:: wall_elapsed
.. attribute:: process_elapsed
Only available in Python 3.3+.
.. versionadded:: 2018.5
"""
def __init__(self):
import time
if SUPPORTS_PROCESS_TIME:
self.perf_counter_start = time.perf_counter()
self.process_time_start = time.process_time()
else:
import timeit
self.time_start = timeit.default_timer()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.done()
def done(self):
# pylint: disable=attribute-defined-outside-init
import time
if SUPPORTS_PROCESS_TIME:
self.wall_elapsed = time.perf_counter() - self.perf_counter_start
self.process_elapsed = time.process_time() - self.process_time_start
else:
import timeit
self.wall_elapsed = timeit.default_timer() - self.time_start
self.process_elapsed = None
# }}}
# {{{ log utilities
class ProcessLogger(object): # pylint: disable=too-many-instance-attributes
"""Logs the completion time of a (presumably) lengthy process to :mod:`logging`.
Only uses a high log level if the process took perceptible time.
.. automethod:: __init__
.. automethod:: done
.. automethod:: __enter__
.. automethod:: __exit__
"""
default_noisy_level = logging.INFO
def __init__( # pylint: disable=too-many-arguments
self, logger, description,
silent_level=None, noisy_level=None, long_threshold_seconds=None):
self.logger = logger
self.description = description
self.silent_level = silent_level or logging.DEBUG
self.noisy_level = noisy_level or self.default_noisy_level
self.long_threshold_seconds = (
# 0 is a valid value that should override the default
0.3 if long_threshold_seconds is None else long_threshold_seconds)
self.logger.log(self.silent_level, "%s: start", self.description)
self.is_done = False
import threading
self.late_start_log_thread = threading.Thread(target=self._log_start_if_long)
# Do not delay interpreter exit if thread not finished.
self.late_start_log_thread.daemon = True
# https://github.com/firedrakeproject/firedrake/issues/1422
# Starting a thread may irrecoverably break various environments,
# e.g. MPI.
#
# Since the late-start logging is an optional 'quality-of-life'
# feature for interactive use, do not do it unless there is (weak)
# evidence of interactive use.
import sys
if sys.stdin is None:
# Can happen, e.g., if pudb is controlling the console.
use_late_start_logging = False
else:
use_late_start_logging = sys.stdin.isatty()
import os
if os.environ.get("PYTOOLS_LOG_NO_THREADS", ""):
use_late_start_logging = False
if use_late_start_logging:
try:
self.late_start_log_thread.start()
except RuntimeError:
# https://github.com/firedrakeproject/firedrake/issues/1422
#
# Starting a thread may fail in various environments, e.g. MPI.
# Since the late-start logging is an optional 'quality-of-life'
# feature for interactive use, tolerate failures of it without
# warning.
pass
self.timer = ProcessTimer()
def _log_start_if_long(self):
from time import sleep
sleep_duration = 10*self.long_threshold_seconds
sleep(sleep_duration)
if not self.is_done:
self.logger.log(
self.noisy_level, "%s: started %.gs ago",
self.description,
sleep_duration)
def done( # pylint: disable=keyword-arg-before-vararg
self, extra_msg=None, *extra_fmt_args):
self.timer.done()
self.is_done = True
wall_elapsed = self.timer.wall_elapsed
process_elapsed = self.timer.process_elapsed
completion_level = (
self.noisy_level
if wall_elapsed > self.long_threshold_seconds
else self.silent_level)
if process_elapsed is not None:
msg = "%s: completed (%.2fs wall, %.1fx CPU)"
fmt_args = [self.description, wall_elapsed, process_elapsed/wall_elapsed]
else:
msg = "%s: completed (%f.2s wall)"
fmt_args = [self.description, wall_elapsed]
if extra_msg:
msg += ": " + extra_msg
fmt_args.extend(extra_fmt_args)
self.logger.log(completion_level, msg, *fmt_args)
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
self.done()
class DebugProcessLogger(ProcessLogger):
default_noisy_level = logging.DEBUG
class log_process(object): # noqa: N801
"""A decorator that uses :class:`ProcessLogger` to log data about calls
to the wrapped function.
"""
def __init__(self, logger, description=None):
self.logger = logger
self.description = description
def __call__(self, wrapped):
def wrapper(*args, **kwargs):
with ProcessLogger(
self.logger,
self.description or wrapped.__name__):
return wrapped(*args, **kwargs)
from functools import update_wrapper
new_wrapper = update_wrapper(wrapper, wrapped)
return new_wrapper
# }}}
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
# vim: foldmethod=marker
|
client.py
|
# Krishna Chaitanya Naragam
# 1001836274
from tkinter import *
from datetime import datetime
import threading
import random
import string
import requests
# methods
# generate a random string of length specified
# https://pynative.com/python-generate-random-string/
def get_random_string(length):
# Random string with the combination of lower and upper case
letters = string.ascii_letters
result_str = ''.join(random.choice(letters) for i in range(length))
return result_str
# load the upload messages UI
def upload_messages_ui():
# UI for uploading messages
global upload_status_label
upload = Tk()
# upload.geometry("370x150")
upload.title("Upload Message")
upload_status_label = Label(upload, text="")
upload_status_label.pack()
q = StringVar(upload)
q.set("A") # default value
w = OptionMenu(upload, q, "A", "B", "C") # dropdown
w.pack()
number_enter_label = Label(upload, text="Enter a number to send:")
number_enter_label.pack()
number_enter_box = Entry(upload)
number_enter_box.pack()
send_number_button = Button(upload, text="Send to Queue", command=lambda: send_metric_to_server(q.get(),number_enter_box.get()))
send_number_button.pack()
quit_button = Button(upload, command=lambda: upload.destroy(), text="Close")
quit_button.pack()
upload.mainloop()
# load the receive messages UI
def check_messages_ui():
global check_messages_status_label
check = Tk()
# check.geometry("370x130")
check.title("Check for messages")
check_messages_status_label = Label(check, text="")
check_messages_status_label.pack()
q = StringVar(check)
q.set("A") # default value
w = OptionMenu(check, q, "A", "B", "C") # dropdown
w.pack()
number_enter_label = Label(check, text="Select queue and click Check for messages")
number_enter_label.pack()
send_number_button = Button(check, text="Check for messages", command=lambda: poll_queue(q.get()))
send_number_button.pack()
quit_button = Button(check, command=lambda: check.destroy(), text="Close")
quit_button.pack()
check.mainloop()
# set upload label
def set_upload_ui_label(data):
global upload_status_label
upload_status_label.config(text=str(data))
# set check messages label
def set_check_messages_ui_label(data):
global check_messages_status_label
check_messages_status_label.config(text=str(data))
# Send a number to server
def send_metric_to_server(q, metric):
try:
response = requests.get('http://{}:{}/putInQueue/{}/{}/{}'.format(host, port, user, q, metric))
print('http://{}:{}/putInQueue/{}/{}/{}'.format(host, port, user, q, metric))
print(response.text)
set_upload_ui_label(response.text)
except Exception as e:
set_upload_ui_label("Server not found!!")
# receive from queue
def poll_queue(q):
try:
response = requests.get('http://{}:{}/getQueue/{}/{}'.format(host, port, user, q))
print('http://{}:{}/getQueue/{}/{}'.format(host, port, user, q))
print(response.text)
set_check_messages_ui_label(response.text)
except Exception as e:
set_check_messages_ui_label("Server not found!!")
# utility method to start popup UI in a new window
def start_new_ui(i):
if i == 1:
t = threading.Thread(target=check_messages_ui)
t.daemon = True
t.start()
elif i == 0:
t = threading.Thread(target=upload_messages_ui)
t.daemon = True
t.start()
# Quit functionality for GUI
def quit():
global root
root.destroy()
# Initial GUI
host = "localhost"
port = 5000
user = get_random_string(5)
# GUI widgets
root = Tk()
root.title(user)
send_number_button = Button(root, text="Upload Message", command=lambda: start_new_ui(0))
send_number_button.pack()
poll_queue_button = Button(root, text="Check for messages", command=lambda: start_new_ui(1))
poll_queue_button.pack()
quit_button = Button(root, command=quit, text="Quit")
quit_button.pack()
# GUI loop
root.mainloop()
|
DialogPluginManager.py
|
'''
Created on March 1, 2012
@author: Mark V Systems Limited
(c) Copyright 2011 Mark V Systems Limited, All rights reserved.
based on pull request 4
'''
from tkinter import Toplevel, font, messagebox, VERTICAL, HORIZONTAL, N, S, E, W
from tkinter.constants import DISABLED, ACTIVE
try:
from tkinter.ttk import Treeview, Scrollbar, Frame, Label, Button
except ImportError:
from ttk import Treeview, Scrollbar, Frame, Label, Button
from arelle import PluginManager, DialogURL
from arelle.CntlrWinTooltip import ToolTip
import os, time
try:
import regex as re
except ImportError:
import re
EMPTYLIST = []
GROUPSEP = '\x01d'
def dialogPluginManager(mainWin):
# check for updates in background
import threading
thread = threading.Thread(target=lambda cntlr=mainWin: backgroundCheckForUpdates(cntlr))
thread.daemon = True
thread.start()
def backgroundCheckForUpdates(cntlr):
cntlr.showStatus(_("Checking for updates to plug-ins")) # clear web loading status
modulesWithNewerFileDates = PluginManager.modulesWithNewerFileDates()
if modulesWithNewerFileDates:
cntlr.showStatus(_("Updates are available for these plug-ins: {0}")
.format(', '.join(modulesWithNewerFileDates)), clearAfter=5000)
else:
cntlr.showStatus(_("No updates found for plug-ins."), clearAfter=5000)
time.sleep(0.1) # Mac locks up without this, may be needed for empty ui queue?
cntlr.uiThreadQueue.put((DialogPluginManager, [cntlr, modulesWithNewerFileDates]))
class DialogPluginManager(Toplevel):
def __init__(self, mainWin, modulesWithNewerFileDates):
super(DialogPluginManager, self).__init__(mainWin.parent)
self.ENABLE = _("Enable")
self.DISABLE = _("Disable")
self.parent = mainWin.parent
self.cntlr = mainWin
# copy plugins for temporary display
self.pluginConfig = PluginManager.pluginConfig
self.pluginConfigChanged = False
self.uiClassMethodsChanged = False
self.modelClassesChanged = False
self.disclosureSystemTypesChanged = False
self.modulesWithNewerFileDates = modulesWithNewerFileDates
parentGeometry = re.match("(\d+)x(\d+)[+]?([-]?\d+)[+]?([-]?\d+)", self.parent.geometry())
dialogX = int(parentGeometry.group(3))
dialogY = int(parentGeometry.group(4))
self.title(_("Plug-in Manager"))
frame = Frame(self)
# left button frame
buttonFrame = Frame(frame, width=40)
buttonFrame.columnconfigure(0, weight=1)
addLabel = Label(buttonFrame, text=_("Find plug-in modules:"), wraplength=60, justify="center")
addLocalButton = Button(buttonFrame, text=_("Locally"), command=self.findLocally)
ToolTip(addLocalButton, text=_("File chooser allows selecting python module files to add (or reload) plug-ins, from the local file system."), wraplength=240)
addWebButton = Button(buttonFrame, text=_("On Web"), command=self.findOnWeb)
ToolTip(addWebButton, text=_("Dialog to enter URL full path to load (or reload) plug-ins, from the web or local file system."), wraplength=240)
addLabel.grid(row=0, column=0, pady=4)
addLocalButton.grid(row=1, column=0, pady=4)
addWebButton.grid(row=2, column=0, pady=4)
buttonFrame.grid(row=0, column=0, rowspan=2, sticky=(N, S, W), padx=3, pady=3)
# right tree frame (plugins already known to arelle)
modulesFrame = Frame(frame, width=700)
vScrollbar = Scrollbar(modulesFrame, orient=VERTICAL)
hScrollbar = Scrollbar(modulesFrame, orient=HORIZONTAL)
self.modulesView = Treeview(modulesFrame, xscrollcommand=hScrollbar.set, yscrollcommand=vScrollbar.set, height=7)
self.modulesView.grid(row=0, column=0, sticky=(N, S, E, W))
self.modulesView.bind('<<TreeviewSelect>>', self.moduleSelect)
hScrollbar["command"] = self.modulesView.xview
hScrollbar.grid(row=1, column=0, sticky=(E,W))
vScrollbar["command"] = self.modulesView.yview
vScrollbar.grid(row=0, column=1, sticky=(N,S))
modulesFrame.columnconfigure(0, weight=1)
modulesFrame.rowconfigure(0, weight=1)
modulesFrame.grid(row=0, column=1, columnspan=4, sticky=(N, S, E, W), padx=3, pady=3)
self.modulesView.focus_set()
self.modulesView.column("#0", width=120, anchor="w")
self.modulesView.heading("#0", text=_("Name"))
self.modulesView["columns"] = ("author", "ver", "status", "date", "update", "descr", "license")
self.modulesView.column("author", width=100, anchor="w", stretch=False)
self.modulesView.heading("author", text=_("Author"))
self.modulesView.column("ver", width=50, anchor="w", stretch=False)
self.modulesView.heading("ver", text=_("Version"))
self.modulesView.column("status", width=50, anchor="w", stretch=False)
self.modulesView.heading("status", text=_("Status"))
self.modulesView.column("date", width=70, anchor="w", stretch=False)
self.modulesView.heading("date", text=_("File Date"))
self.modulesView.column("update", width=50, anchor="w", stretch=False)
self.modulesView.heading("update", text=_("Update"))
self.modulesView.column("descr", width=200, anchor="w", stretch=False)
self.modulesView.heading("descr", text=_("Description"))
self.modulesView.column("license", width=70, anchor="w", stretch=False)
self.modulesView.heading("license", text=_("License"))
classesFrame = Frame(frame)
vScrollbar = Scrollbar(classesFrame, orient=VERTICAL)
hScrollbar = Scrollbar(classesFrame, orient=HORIZONTAL)
self.classesView = Treeview(classesFrame, xscrollcommand=hScrollbar.set, yscrollcommand=vScrollbar.set, height=5)
self.classesView.grid(row=0, column=0, sticky=(N, S, E, W))
hScrollbar["command"] = self.classesView.xview
hScrollbar.grid(row=1, column=0, sticky=(E,W))
vScrollbar["command"] = self.classesView.yview
vScrollbar.grid(row=0, column=1, sticky=(N,S))
classesFrame.columnconfigure(0, weight=1)
classesFrame.rowconfigure(0, weight=1)
classesFrame.grid(row=1, column=1, columnspan=4, sticky=(N, S, E, W), padx=3, pady=3)
self.classesView.focus_set()
self.classesView.column("#0", width=200, anchor="w")
self.classesView.heading("#0", text=_("Class"))
self.classesView["columns"] = ("modules",)
self.classesView.column("modules", width=500, anchor="w", stretch=False)
self.classesView.heading("modules", text=_("Modules"))
# bottom frame module info details
moduleInfoFrame = Frame(frame, width=700)
moduleInfoFrame.columnconfigure(1, weight=1)
self.moduleNameLabel = Label(moduleInfoFrame, wraplength=600, justify="left",
font=font.Font(family='Helvetica', size=12, weight='bold'))
self.moduleNameLabel.grid(row=0, column=0, columnspan=4, sticky=W)
self.moduleAuthorHdr = Label(moduleInfoFrame, text=_("author:"), state=DISABLED)
self.moduleAuthorHdr.grid(row=1, column=0, sticky=W)
self.moduleAuthorLabel = Label(moduleInfoFrame, wraplength=600, justify="left")
self.moduleAuthorLabel.grid(row=1, column=1, columnspan=3, sticky=W)
self.moduleDescrHdr = Label(moduleInfoFrame, text=_("description:"), state=DISABLED)
self.moduleDescrHdr.grid(row=2, column=0, sticky=W)
self.moduleDescrLabel = Label(moduleInfoFrame, wraplength=600, justify="left")
self.moduleDescrLabel.grid(row=2, column=1, columnspan=3, sticky=W)
self.moduleClassesHdr = Label(moduleInfoFrame, text=_("classes:"), state=DISABLED)
self.moduleClassesHdr.grid(row=3, column=0, sticky=W)
self.moduleClassesLabel = Label(moduleInfoFrame, wraplength=600, justify="left")
self.moduleClassesLabel.grid(row=3, column=1, columnspan=3, sticky=W)
ToolTip(self.moduleClassesLabel, text=_("List of classes that this plug-in handles."), wraplength=240)
self.moduleUrlHdr = Label(moduleInfoFrame, text=_("URL:"), state=DISABLED)
self.moduleUrlHdr.grid(row=4, column=0, sticky=W)
self.moduleUrlLabel = Label(moduleInfoFrame, wraplength=600, justify="left")
self.moduleUrlLabel.grid(row=4, column=1, columnspan=3, sticky=W)
ToolTip(self.moduleUrlLabel, text=_("URL of plug-in module (local file path or web loaded file)."), wraplength=240)
self.moduleDateHdr = Label(moduleInfoFrame, text=_("date:"), state=DISABLED)
self.moduleDateHdr.grid(row=5, column=0, sticky=W)
self.moduleDateLabel = Label(moduleInfoFrame, wraplength=600, justify="left")
self.moduleDateLabel.grid(row=5, column=1, columnspan=3, sticky=W)
ToolTip(self.moduleDateLabel, text=_("Date of currently loaded module file (with parenthetical node when an update is available)."), wraplength=240)
self.moduleLicenseHdr = Label(moduleInfoFrame, text=_("license:"), state=DISABLED)
self.moduleLicenseHdr.grid(row=6, column=0, sticky=W)
self.moduleLicenseLabel = Label(moduleInfoFrame, wraplength=600, justify="left")
self.moduleLicenseLabel.grid(row=6, column=1, columnspan=3, sticky=W)
self.moduleImportsHdr = Label(moduleInfoFrame, text=_("imports:"), state=DISABLED)
self.moduleImportsHdr.grid(row=7, column=0, sticky=W)
self.moduleImportsLabel = Label(moduleInfoFrame, wraplength=600, justify="left")
self.moduleImportsLabel.grid(row=7, column=1, columnspan=3, sticky=W)
self.moduleEnableButton = Button(moduleInfoFrame, text=self.ENABLE, state=DISABLED, command=self.moduleEnable)
ToolTip(self.moduleEnableButton, text=_("Enable/disable plug in."), wraplength=240)
self.moduleEnableButton.grid(row=8, column=1, sticky=E)
self.moduleReloadButton = Button(moduleInfoFrame, text=_("Reload"), state=DISABLED, command=self.moduleReload)
ToolTip(self.moduleReloadButton, text=_("Reload/update plug in."), wraplength=240)
self.moduleReloadButton.grid(row=8, column=2, sticky=E)
self.moduleRemoveButton = Button(moduleInfoFrame, text=_("Remove"), state=DISABLED, command=self.moduleRemove)
ToolTip(self.moduleRemoveButton, text=_("Remove plug in from plug in table (does not erase the plug in's file)."), wraplength=240)
self.moduleRemoveButton.grid(row=8, column=3, sticky=E)
moduleInfoFrame.grid(row=2, column=0, columnspan=5, sticky=(N, S, E, W), padx=3, pady=3)
moduleInfoFrame.config(borderwidth=4, relief="groove")
okButton = Button(frame, text=_("Close"), command=self.ok)
ToolTip(okButton, text=_("Accept and changes (if any) and close dialog."), wraplength=240)
cancelButton = Button(frame, text=_("Cancel"), command=self.close)
ToolTip(cancelButton, text=_("Cancel changes (if any) and close dialog."), wraplength=240)
okButton.grid(row=3, column=3, sticky=(S,E), pady=3)
cancelButton.grid(row=3, column=4, sticky=(S,E), pady=3, padx=3)
enableDisableFrame = Frame(frame)
enableDisableFrame.grid(row=3, column=1, sticky=(S,W), pady=3)
enableAllButton = Button(enableDisableFrame, text=_("Enable All"), command=self.enableAll)
ToolTip(enableAllButton, text=_("Enable all plug ins."), wraplength=240)
disableAllButton = Button(enableDisableFrame, text=_("Disable All"), command=self.disableAll)
ToolTip(disableAllButton, text=_("Disable all plug ins."), wraplength=240)
enableAllButton.grid(row=1, column=1)
disableAllButton.grid(row=1, column=2)
self.loadTreeViews()
self.geometry("+{0}+{1}".format(dialogX+50,dialogY+100))
frame.grid(row=0, column=0, sticky=(N,S,E,W))
frame.columnconfigure(0, weight=0)
frame.columnconfigure(1, weight=1)
frame.rowconfigure(0, weight=1)
window = self.winfo_toplevel()
window.columnconfigure(0, weight=1)
window.rowconfigure(0, weight=1)
self.bind("<Return>", self.ok)
self.bind("<Escape>", self.close)
self.protocol("WM_DELETE_WINDOW", self.close)
self.grab_set()
self.wait_window(self)
def loadTreeViews(self):
self.selectedModule = None
# clear previous treeview entries
for previousNode in self.modulesView.get_children(""):
self.modulesView.delete(previousNode)
def loadSubtree(parentNode, moduleItems):
for moduleItem in sorted(moduleItems, key=lambda item: item[0]):
moduleInfo = moduleItem[1]
if parentNode or not moduleInfo.get("isImported"):
nodeName = moduleItem[0]
if parentNode:
nodeName = parentNode + GROUPSEP + nodeName
name = moduleInfo.get("name", nodeName)
node = self.modulesView.insert(parentNode, "end", nodeName, text=name)
self.modulesView.set(node, "author", moduleInfo.get("author"))
self.modulesView.set(node, "ver", moduleInfo.get("version"))
self.modulesView.set(node, "status", moduleInfo.get("status"))
self.modulesView.set(node, "date", moduleInfo.get("fileDate"))
if name in self.modulesWithNewerFileDates:
self.modulesView.set(node, "update", _("available"))
self.modulesView.set(node, "descr", moduleInfo.get("description"))
self.modulesView.set(node, "license", moduleInfo.get("license"))
if moduleInfo.get("imports"):
loadSubtree(node, [(importModuleInfo["name"],importModuleInfo)
for importModuleInfo in moduleInfo["imports"]])
loadSubtree("", self.pluginConfig.get("modules", {}).items())
# clear previous treeview entries
for previousNode in self.classesView.get_children(""):
self.classesView.delete(previousNode)
for i, classItem in enumerate(sorted(self.pluginConfig.get("classes", {}).items())):
className, moduleList = classItem
node = self.classesView.insert("", "end", className, text=className)
self.classesView.set(node, "modules", ', '.join(moduleList))
self.moduleSelect() # clear out prior selection
def ok(self, event=None):
if self.pluginConfigChanged:
PluginManager.pluginConfig = self.pluginConfig
PluginManager.pluginConfigChanged = True
PluginManager.reset() # force reloading of modules
if self.uiClassMethodsChanged or self.modelClassesChanged or self.disclosureSystemTypesChanged: # may require reloading UI
affectedItems = ""
if self.uiClassMethodsChanged:
affectedItems += _("menus of the user interface")
if self.modelClassesChanged:
if self.uiClassMethodsChanged:
affectedItems += _(" and ")
affectedItems += _("model objects of the processor")
if (self.uiClassMethodsChanged or self.modelClassesChanged):
affectedItems += _(" and ")
if self.disclosureSystemTypesChanged:
if (self.uiClassMethodsChanged or self.modelClassesChanged):
affectedItems += _(" and ")
affectedItems += _("disclosure system types")
if messagebox.askyesno(_("User interface plug-in change"),
_("A change in plug-in class methods may have affected {0}. "
"Please restart Arelle to due to these changes. \n\n"
"Should Arelle restart itself now "
"(if there are any unsaved changes they would be lost!)?"
).format(affectedItems),
parent=self):
self.cntlr.uiThreadQueue.put((self.cntlr.quit, [None, True]))
self.close()
def close(self, event=None):
self.parent.focus_set()
self.destroy()
def moduleSelect(self, *args):
node = (self.modulesView.selection() or (None,))[0]
if node:
node = node.rpartition(GROUPSEP)[2] # drop leading path names for module name
moduleInfo = self.pluginConfig.get("modules", {}).get(node)
if moduleInfo:
self.selectedModule = node
name = moduleInfo["name"]
self.moduleNameLabel.config(text=name)
self.moduleAuthorHdr.config(state=ACTIVE)
self.moduleAuthorLabel.config(text=moduleInfo["author"])
self.moduleDescrHdr.config(state=ACTIVE)
self.moduleDescrLabel.config(text=moduleInfo["description"])
self.moduleClassesHdr.config(state=ACTIVE)
self.moduleClassesLabel.config(text=', '.join(moduleInfo["classMethods"]))
self.moduleUrlHdr.config(state=ACTIVE)
self.moduleUrlLabel.config(text=moduleInfo["moduleURL"])
self.moduleDateHdr.config(state=ACTIVE)
self.moduleDateLabel.config(text=moduleInfo["fileDate"] + " " +
(_("(an update is available)") if name in self.modulesWithNewerFileDates else ""))
self.moduleLicenseHdr.config(state=ACTIVE)
self.moduleLicenseLabel.config(text=moduleInfo["license"])
if moduleInfo.get("imports"):
self.moduleImportsHdr.config(state=ACTIVE)
_text = ", ".join(mi["name"] for mi in moduleInfo["imports"][:3])
if len(moduleInfo["imports"]) >= 3:
_text += ", ..."
self.moduleImportsLabel.config(text=_text)
_buttonState = DISABLED if moduleInfo.get("isImported") else ACTIVE
self.moduleEnableButton.config(state=_buttonState,
text={"enabled":self.DISABLE,
"disabled":self.ENABLE}[moduleInfo["status"]])
self.moduleReloadButton.config(state=_buttonState)
self.moduleRemoveButton.config(state=_buttonState)
else:
self.selectedModule = None
self.moduleNameLabel.config(text="")
self.moduleAuthorHdr.config(state=DISABLED)
self.moduleAuthorLabel.config(text="")
self.moduleDescrHdr.config(state=DISABLED)
self.moduleDescrLabel.config(text="")
self.moduleClassesHdr.config(state=DISABLED)
self.moduleClassesLabel.config(text="")
self.moduleUrlHdr.config(state=DISABLED)
self.moduleUrlLabel.config(text="")
self.moduleDateHdr.config(state=DISABLED)
self.moduleDateLabel.config(text="")
self.moduleLicenseHdr.config(state=DISABLED)
self.moduleLicenseLabel.config(text="")
self.moduleImportsHdr.config(state=DISABLED)
self.moduleImportsLabel.config(text="")
self.moduleEnableButton.config(state=DISABLED, text=self.ENABLE)
self.moduleReloadButton.config(state=DISABLED)
self.moduleRemoveButton.config(state=DISABLED)
def findLocally(self):
initialdir = self.cntlr.pluginDir # default plugin directory
if not self.cntlr.isMac: # can't navigate within app easily, always start in default directory
initialdir = self.cntlr.config.setdefault("pluginOpenDir", initialdir)
filename = self.cntlr.uiFileDialog("open",
parent=self,
title=_("Choose plug-in module file"),
initialdir=initialdir,
filetypes=[(_("Python files"), "*.py")],
defaultextension=".py")
if filename:
# check if a package is selected (any file in a directory containing an __init__.py
#if (os.path.basename(filename) == "__init__.py" and os.path.isdir(os.path.dirname(filename)) and
# os.path.isfile(filename)):
# filename = os.path.dirname(filename) # refer to the package instead
self.cntlr.config["pluginOpenDir"] = os.path.dirname(filename)
moduleInfo = PluginManager.moduleModuleInfo(filename)
self.loadFoundModuleInfo(moduleInfo, filename)
def findOnWeb(self):
url = DialogURL.askURL(self)
if url: # url is the in-cache or local file
moduleInfo = PluginManager.moduleModuleInfo(url)
self.cntlr.showStatus("") # clear web loading status
self.loadFoundModuleInfo(moduleInfo, url)
def loadFoundModuleInfo(self, moduleInfo, url):
if moduleInfo and moduleInfo.get("name"):
self.addPluginConfigModuleInfo(moduleInfo)
self.loadTreeViews()
else:
messagebox.showwarning(_("Module is not itself a plug-in or in a directory with package __init__.py plug-in. "),
_("File does not itself contain a python program with an appropriate __pluginInfo__ declaration: \n\n{0}")
.format(url),
parent=self)
def checkIfImported(self, moduleInfo):
if moduleInfo.get("isImported"):
messagebox.showwarning(_("Plug-in is imported by a parent plug-in. "),
_("Plug-in has a parent, please request operation on the parent: \n\n{0}")
.format(moduleInfo.get("name")),
parent=self)
return True
return False
def removePluginConfigModuleInfo(self, name):
moduleInfo = self.pluginConfig["modules"].get(name)
if moduleInfo:
if self.checkIfImported(moduleInfo):
return;
def _removePluginConfigModuleInfo(moduleInfo):
_name = moduleInfo.get("name")
if _name:
for classMethod in moduleInfo["classMethods"]:
classMethods = self.pluginConfig["classes"].get(classMethod)
if classMethods and _name in classMethods:
classMethods.remove(_name)
if not classMethods: # list has become unused
del self.pluginConfig["classes"][classMethod] # remove class
if classMethod.startswith("CntlrWinMain.Menu"):
self.uiClassMethodsChanged = True # may require reloading UI
elif classMethod == "ModelObjectFactory.ElementSubstitutionClasses":
self.modelClassesChanged = True # model object factor classes changed
elif classMethod == "DisclosureSystem.Types":
self.disclosureSystemTypesChanged = True # disclosure system types changed
for importModuleInfo in moduleInfo.get("imports", EMPTYLIST):
_removePluginConfigModuleInfo(importModuleInfo)
self.pluginConfig["modules"].pop(_name, None)
_removePluginConfigModuleInfo(moduleInfo)
self.pluginConfigChanged = True
def addPluginConfigModuleInfo(self, moduleInfo):
if self.checkIfImported(moduleInfo):
return;
name = moduleInfo.get("name")
self.removePluginConfigModuleInfo(name) # remove any prior entry for this module
def _addPlugin(moduleInfo):
_name = moduleInfo.get("name")
if _name:
self.modulesWithNewerFileDates.discard(_name) # no longer has an update available
self.pluginConfig["modules"][_name] = moduleInfo
# add classes
for classMethod in moduleInfo["classMethods"]:
classMethods = self.pluginConfig["classes"].setdefault(classMethod, [])
if name not in classMethods:
classMethods.append(_name)
if classMethod.startswith("CntlrWinMain.Menu"):
self.uiClassMethodsChanged = True # may require reloading UI
elif classMethod == "ModelObjectFactory.ElementSubstitutionClasses":
self.modelClassesChanged = True # model object factor classes changed
elif classMethod == "DisclosureSystem.Types":
self.disclosureSystemTypesChanged = True # disclosure system types changed
for importModuleInfo in moduleInfo.get("imports", EMPTYLIST):
_addPlugin(importModuleInfo)
_addPlugin(moduleInfo)
self.pluginConfigChanged = True
def moduleEnable(self):
if self.selectedModule in self.pluginConfig["modules"]:
moduleInfo = self.pluginConfig["modules"][self.selectedModule]
if self.checkIfImported(moduleInfo):
return;
def _moduleEnable(moduleInfo):
if self.moduleEnableButton['text'] == self.ENABLE:
moduleInfo["status"] = "enabled"
elif self.moduleEnableButton['text'] == self.DISABLE:
moduleInfo["status"] = "disabled"
for importModuleInfo in moduleInfo.get("imports", EMPTYLIST):
_moduleEnable(importModuleInfo)
_moduleEnable(moduleInfo)
if self.moduleEnableButton['text'] == self.ENABLE:
self.moduleEnableButton['text'] = self.DISABLE
elif self.moduleEnableButton['text'] == self.DISABLE:
self.moduleEnableButton['text'] = self.ENABLE
self.pluginConfigChanged = True
self.loadTreeViews()
def moduleReload(self):
if self.selectedModule in self.pluginConfig["modules"]:
url = self.pluginConfig["modules"][self.selectedModule].get("moduleURL")
if url:
moduleInfo = PluginManager.moduleModuleInfo(url, reload=True)
if moduleInfo:
if self.checkIfImported(moduleInfo):
return;
self.addPluginConfigModuleInfo(moduleInfo)
self.loadTreeViews()
self.cntlr.showStatus(_("{0} reloaded").format(moduleInfo["name"]), clearAfter=5000)
else:
messagebox.showwarning(_("Module error"),
_("File or module cannot be reloaded: \n\n{0}")
.format(url),
parent=self)
def moduleRemove(self):
if self.selectedModule in self.pluginConfig["modules"]:
self.removePluginConfigModuleInfo(self.selectedModule)
self.pluginConfigChanged = True
self.loadTreeViews()
def enableAll(self):
self.enableDisableAll(True)
def disableAll(self):
self.enableDisableAll(False)
def enableDisableAll(self, doEnable):
for module in self.pluginConfig["modules"]:
if not module.get("isImported"):
moduleInfo = self.pluginConfig["modules"][module]
def _enableDisableAll(moduleInfo):
if doEnable:
moduleInfo["status"] = "enabled"
else:
moduleInfo["status"] = "disabled"
for importModuleInfo in moduleInfo.get("imports", EMPTYLIST):
_enableDisableAll(importModuleInfo)
_enableDisableAll(moduleInfo)
if doEnable:
self.moduleEnableButton['text'] = self.DISABLE
else:
self.moduleEnableButton['text'] = self.ENABLE
self.pluginConfigChanged = True
self.loadTreeViews()
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys, time, threading
import os, json, traceback
import shutil
import weakref
import webbrowser
import csv
from decimal import Decimal
import base64
from functools import partial
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import PyQt5.QtCore as QtCore
from .exception_window import Exception_Hook
from PyQt5.QtWidgets import *
from electroncash import keystore
from electroncash.address import Address, ScriptOutput
from electroncash.bitcoin import COIN, TYPE_ADDRESS, TYPE_SCRIPT
from electroncash.networks import NetworkConstants
from electroncash.plugins import run_hook
from electroncash.i18n import _
from electroncash.util import (format_time, format_satoshis, PrintError,
format_satoshis_plain, NotEnoughFunds, ExcessiveFee,
UserCancelled, bh2u, bfh, format_fee_satoshis)
import electroncash.web as web
from electroncash import Transaction
from electroncash import util, bitcoin, commands
from electroncash import paymentrequest
from electroncash.wallet import Multisig_Wallet, sweep_preparations
try:
from electroncash.plot import plot_history
except:
plot_history = None
import electroncash.web as web
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, BTCkBEdit, BTCSatsByteEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import *
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
from electroncash.paymentrequest import PR_PAID
class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
notify_transactions_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
cashaddr_toggled_signal = pyqtSignal()
history_updated_signal = pyqtSignal()
def __init__(self, gui_object, wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config
self.setup_exception_hook()
self.network = gui_object.daemon.network
self.fx = gui_object.daemon.fx
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.is_max = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.op_return_toolong = False
self.internalpluginsdialog = None
self.externalpluginsdialog = None
self.require_fee_update = False
self.tx_notifications = []
self.tx_notify_timer = None
self.tl_windows = []
self.tx_external_keypairs = {}
Address.show_cashaddr(config.get('show_cashaddr', False))
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', 8)
self.fee_unit = config.get('fee_unit', 0)
self.num_zeros = int(config.get('num_zeros',0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.converter_tab = self.create_converter_tab()
tabs.addTab(self.create_history_tab(), QIcon(":icons/tab_history.png"), _('History'))
tabs.addTab(self.send_tab, QIcon(":icons/tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, QIcon(":icons/tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name, default=False):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), default):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, QIcon(":icons/tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, QIcon(":icons/tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, QIcon(":icons/tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.converter_tab, QIcon(":icons/tab_converter.png"), _("Address Converter"), "converter", True)
add_optional_tab(tabs, self.console_tab, QIcon(":icons/tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(QIcon(":icons/electron-cash.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.cashaddr_toggled_signal.connect(self.update_cashaddr_icon)
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.notify_transactions_signal.connect(self.notify_transactions)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['updated', 'new_transaction', 'status',
'banner', 'verified', 'fee']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
self.connect_slots(gui_object.timer)
self.fetch_alias()
def on_history(self, b):
self.new_fx_history_signal.emit()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide") if show else _("Show")) + " " + tab.tab_description
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
return self.top_level_window_recurse(override)
def diagnostic_name(self):
return "%s/%s" % (PrintError.diagnostic_name(self),
self.wallet.basename() if self.wallet else "None")
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
try:
traceback.print_exception(*exc_info)
except OSError:
# Issue #662, user got IO error.
# We want them to still get the error displayed to them.
pass
self.show_error(str(exc_info[1]))
def on_network(self, event, *args):
if event == 'updated':
self.need_update.set()
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
elif event == 'new_transaction':
tx, wallet = args
if wallet == self.wallet: # filter out tx's not for this wallet
self.tx_notifications.append(tx)
self.notify_transactions_signal.emit()
elif event in ['status', 'banner', 'verified', 'fee']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.print_error("unexpected network message:", event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
self.history_list.update_item(*args)
elif event == 'fee':
pass
else:
self.print_error("unexpected network_qt signal:", event, args)
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.print_error('close_wallet', self.wallet.storage.path)
run_hook('close_wallet', self.wallet)
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.wallet = wallet
self.update_recently_visited(wallet.storage.path)
# address used to create a dummy transaction and estimate transaction fee
self.history_list.update()
self.address_list.update()
self.utxo_list.update()
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
self.notify_transactions()
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
run_hook('load_wallet', wallet, self)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.print_error("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
title = '%s %s - %s' % (NetworkConstants.TITLE,
self.wallet.electrum_version,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
self.warn_if_watching_only()
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.can_change_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoin Cash with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoin Cash to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
if not os.path.exists(wallet_folder):
wallet_folder = None
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
# Copy file contents
shutil.copyfile(path, new_path)
# Copy file attributes if possible
# (not supported on targets like Flatpak documents)
try:
shutil.copystat(path, new_path)
except (IOError, os.error):
pass
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except (IOError, os.error) as reason:
self.show_critical(_("Electron Cash was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent2 = []
for k in recent:
if os.path.exists(k):
recent2.append(k)
recent = recent2[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
i = 1
while True:
filename = "wallet_%d" % i
if filename in os.listdir(wallet_folder):
i += 1
else:
break
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
hist_menu = wallet_menu.addMenu(_("&History"))
hist_menu.addAction("Plot", self.plot_history_dialog).setEnabled(plot_history is not None)
hist_menu.addAction("Export", self.export_history_dialog)
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.converter_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in OSX using this as work around
tools_menu.addAction(_("Electron Cash preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
tools_menu.addAction(_("Optional &Features"), self.internal_plugins_dialog)
tools_menu.addAction(_("Installed &Plugins"), self.external_plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Official website"), lambda: webbrowser.open("http://electroncash.org"))
help_menu.addSeparator()
help_menu.addAction(_("Documentation"), lambda: webbrowser.open("http://electroncash.readthedocs.io/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters()[0]
self.pay_to_URI('{}:{}?message=donation for {}'
.format(NetworkConstants.CASHADDR_PREFIX, d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electron Cash",
_("Version")+" %s" % (self.wallet.electrum_version) + "\n\n" +
_("Electron Cash's focus is speed, with low resource usage and simplifying Bitcoin Cash. You do not need to perform regular backups, because your wallet can be recovered from a secret phrase that you can memorize or write on paper. Startup times are instant because it operates in conjunction with high-performance servers that handle the most complicated parts of the Bitcoin Cash system." + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/Electron-Cash/Electron-Cash/issues\">https://github.com/Electron-Cash/Electron-Cash/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Electron Cash (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electron Cash - " + _("Reporting Bugs"))
last_notify_tx_time = 0.0
notify_tx_rate = 30.0
def notify_tx_cb(self):
n_ok = 0
if self.network and self.network.is_connected() and self.wallet:
num_txns = len(self.tx_notifications)
if num_txns:
# Combine the transactions
total_amount = 0
for tx in self.tx_notifications:
if tx:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if v > 0 and is_relevant:
total_amount += v
n_ok += 1
if n_ok:
self.print_error("Notifying GUI %d tx"%(n_ok))
if n_ok > 1:
self.notify(_("{} new transactions received: Total amount received in the new transactions {}")
.format(n_ok, self.format_amount_and_units(total_amount)))
else:
self.notify(_("New transaction received: {}").format(self.format_amount_and_units(total_amount)))
self.tx_notifications = list()
self.last_notify_tx_time = time.time() if n_ok else self.last_notify_tx_time
if self.tx_notify_timer:
self.tx_notify_timer.stop()
self.tx_notify_timer = None
def notify_transactions(self):
if self.tx_notify_timer or not len(self.tx_notifications) or self.cleaned_up:
# common case: extant notify timer -- we already enqueued to notify. So bail and wait for timer to handle it.
return
elapsed = time.time() - self.last_notify_tx_time
if elapsed < self.notify_tx_rate:
# spam control. force tx notify popup to not appear more often than every 30 seconds by enqueing the request for a timer to
# handle it sometime later
self.tx_notify_timer = QTimer(self)
self.tx_notify_timer.setSingleShot(True)
self.tx_notify_timer.timeout.connect(self.notify_tx_cb)
when = (self.notify_tx_rate - elapsed)
self.print_error("Notify spam control: will notify GUI of %d new tx's in %f seconds"%(len(self.tx_notifications),when))
self.tx_notify_timer.start(when * 1e3) # time in ms
else:
# it's been a while since we got a tx notify -- so do it immediately (no timer necessary)
self.notify_tx_cb()
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electron Cash", message, QIcon(":icons/electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electron Cash", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def connect_slots(self, sender):
sender.timer_signal.connect(self.timer_actions)
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount)
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
return format_fee_satoshis(fee_rate/1000, self.num_zeros) + ' sat/byte'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
assert self.decimal_point in [2, 5, 8]
if self.decimal_point == 2:
return 'cash'
if self.decimal_point == 5:
return 'mBCH'
if self.decimal_point == 8:
return 'BCH'
raise Exception('Unknown base unit')
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else None
if rate is None or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None or not self.network.is_running():
text = _("Offline")
icon = QIcon(":icons/status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
num_chains = len(self.network.get_blockchains())
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = QIcon(":icons/status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = QIcon(":icons/status_lagging.png") if num_chains <= 1 else QIcon(":icons/status_lagging_fork.png")
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = QIcon(":icons/status_connected.png") if num_chains <= 1 else QIcon(":icons/status_connected_fork.png")
else:
icon = QIcon(":icons/status_connected_proxy.png") if num_chains <= 1 else QIcon(":icons/status_connected_proxy_fork.png")
else:
text = _("Not connected")
icon = QIcon(":icons/status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self):
self.history_list.update()
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
def create_history_tab(self):
from .history_list import HistoryList
self.history_list = l = HistoryList(self)
l.searchable_list = l
return l
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address = None
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('Bitcoin Cash address where the payment should be received. Note that each payment request uses a different Bitcoin Cash address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.setFocusPolicy(Qt.NoFocus)
self.cashaddr_toggled_signal.connect(self.update_receive_address_widget)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin Cash addresses.'),
_('The Bitcoin Cash address never expires and will always be part of this Electron Cash wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr.to_storage_string(), '')
amount = req['amount']
URI = web.create_URI(addr, amount, message)
if req.get('time'):
URI += "&time=%d"%req.get('time')
if req.get('exp'):
URI += "&exp=%d"%req.get('exp')
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
URI += "&name=" + req['name'] + "&sig="+sig
return str(URI)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = self.password_dialog(msg)
if password:
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
else:
return
def save_payment_request(self):
if not self.receive_address:
self.show_error(_('No receiving address'))
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
req = self.wallet.make_payment_request(self.receive_address, amount,
message, expiration)
self.wallet.add_payment_request(req, self.config)
self.sign_payment_request(self.receive_address)
self.request_list.update()
self.address_list.update()
self.save_request_button.setEnabled(False)
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests[addr]
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address = addr
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.update_receive_address_widget()
def update_receive_address_widget(self):
text = ''
if self.receive_address:
text = self.receive_address.to_full_ui_string()
self.receive_address_e.setText(text)
def clear_receive_tab(self):
self.expires_label.hide()
self.expires_combo.show()
self.set_receive_address(self.wallet.get_receiving_address())
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
self.receive_address = addr
self.show_receive_tab()
self.new_request_button.setEnabled(True)
self.update_receive_address_widget()
def update_receive_qr(self):
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = web.create_URI(self.receive_address, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.set_content(self.receive_address_e.text(), amount,
message, uri)
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Bitcoin Cash address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin Cash address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.setCompleter(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
msg_opreturn = ( _('OP_RETURN data (optional).') + '\n\n'
+ _('Posts a PERMANENT note to the BCH blockchain as part of this transaction.')
+ '\n\n' + _('If you specify OP_RETURN text, you may leave the \'Pay to\' field blank.') )
self.opreturn_label = HelpLabel(_('OP_RETURN'), msg_opreturn)
grid.addWidget(self.opreturn_label, 3, 0)
self.message_opreturn_e = MyLineEdit()
grid.addWidget(self.message_opreturn_e, 3 , 1, 1, -1)
if not self.config.get('enable_opreturn'):
self.message_opreturn_e.setText("")
self.message_opreturn_e.setHidden(True)
self.opreturn_label.setHidden(True)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 4, 0)
self.from_list = MyTreeWidget(self, self.from_list_menu, ['',''])
self.from_list.setHeaderHidden(True)
self.from_list.setMaximumHeight(80)
grid.addWidget(self.from_list, 4, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 5, 0)
grid.addWidget(self.amount_e, 5, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 5, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(140)
grid.addWidget(self.max_button, 5, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 5, 4)
msg = _('Bitcoin Cash transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
self.spend_max() if self.is_max else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(140)
self.fee_custom_lbl = HelpLabel(self.get_custom_fee_text(),
_('This is the fee rate that will be used for this transaction.')
+ "\n\n" + _('It is calculated from the Custom Fee Rate in preferences, but can be overridden from the manual fee edit on this form (if enabled).')
+ "\n\n" + _('Generally, a fee of 1.0 sats/B is a good minimal rate to ensure your transaction will make it into the next block.'))
self.fee_custom_lbl.setFixedWidth(140)
self.fee_slider_mogrifier()
self.fee_e = BTCAmountEdit(self.get_decimal_point)
if not self.config.get('show_fee', False):
self.fee_e.setVisible(False)
self.fee_e.textEdited.connect(self.update_fee)
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
self.fee_e.editingFinished.connect(self.update_fee)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
grid.addWidget(self.fee_e_label, 6, 0)
grid.addWidget(self.fee_slider, 6, 1)
grid.addWidget(self.fee_custom_lbl, 6, 1)
grid.addWidget(self.fee_e, 6, 2)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transactions before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 7, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
self.message_opreturn_e.textEdited.connect(self.update_fee)
self.message_opreturn_e.textChanged.connect(self.update_fee)
self.message_opreturn_e.editingFinished.connect(self.update_fee)
def reset_max(t):
self.is_max = False
self.max_button.setEnabled(not bool(t))
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
text = _( "Not enough funds" )
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += ' (' + self.format_amount(c+u+x).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')'
elif self.fee_e.isModified():
amt_color, fee_color = ColorScheme.DEFAULT, ColorScheme.DEFAULT
elif self.amount_e.isModified():
amt_color, fee_color = ColorScheme.DEFAULT, ColorScheme.BLUE
else:
amt_color, fee_color = ColorScheme.BLUE, ColorScheme.BLUE
opret_color = ColorScheme.DEFAULT
if self.op_return_toolong:
opret_color = ColorScheme.RED
text = _("OP_RETURN message too large, needs to be under 220 bytes") + (", " if text else "") + text
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.message_opreturn_e.setStyleSheet(opret_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.message_opreturn_e.textChanged.connect(entry_changed)
self.message_opreturn_e.textEdited.connect(entry_changed)
self.message_opreturn_e.editingFinished.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
self.is_max = True
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def get_custom_fee_text(self, fee_rate = None):
if not self.config.has_custom_fee_rate():
return ""
else:
if fee_rate is None: fee_rate = self.config.custom_fee_rate() / 1000.0
return str(round(fee_rate*100)/100) + " sats/B"
@staticmethod
def output_for_opreturn_stringdata(op_return):
if not isinstance(op_return, str):
raise OPReturnError('OP_RETURN parameter needs to be of type str!')
op_return_code = "OP_RETURN "
op_return_encoded = op_return.encode('utf-8')
if len(op_return_encoded) > 220:
raise OPReturnTooLarge(_("OP_RETURN message too large, needs to be under 220 bytes"))
op_return_payload = op_return_encoded.hex()
script = op_return_code + op_return_payload
amount = 0
return (TYPE_SCRIPT, ScriptOutput.from_string(script), amount)
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = (self.fee_e.isModified()
and (self.fee_e.text() or self.fee_e.hasFocus()))
amount = '!' if self.is_max else self.amount_e.get_amount()
fee_rate = None
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
else:
fee = self.fee_e.get_amount() if freeze_fee else None
outputs = self.payto_e.get_outputs(self.is_max)
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [(_type, addr, amount)]
try:
opreturn_message = self.message_opreturn_e.text() if self.config.get('enable_opreturn') else None
if opreturn_message:
outputs.append(self.output_for_opreturn_stringdata(opreturn_message))
tx = self.wallet.make_unsigned_transaction(self.get_coins(), outputs, self.config, fee)
self.not_enough_funds = False
self.op_return_toolong = False
except NotEnoughFunds:
self.not_enough_funds = True
if not freeze_fee:
self.fee_e.setAmount(None)
return
except OPReturnTooLarge:
self.op_return_toolong = True
return
except OPReturnError as e:
self.statusBar().showMessage(str(e))
return
except BaseException:
return
if not freeze_fee:
fee = None if self.not_enough_funds else tx.get_fee()
self.fee_e.setAmount(fee)
if self.is_max:
amount = tx.output_value()
self.amount_e.setAmount(amount)
if fee is not None:
fee_rate = fee / tx.estimated_size()
self.fee_slider_mogrifier(self.get_custom_fee_text(fee_rate))
def fee_slider_mogrifier(self, text = None):
fee_slider_hidden = self.config.has_custom_fee_rate()
self.fee_slider.setHidden(fee_slider_hidden)
self.fee_custom_lbl.setHidden(not fee_slider_hidden)
if text is not None: self.fee_custom_lbl.setText(text)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x['prevout_hash']
return '{}...{}:{:d}\t{}'.format(h[0:10], h[-10:],
x['prevout_n'], x['address'])
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_password():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def read_send_tab(self):
isInvoice= False;
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = self.message_e.text()
if self.payment_request:
isInvoice = True;
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs(self.is_max)
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
try:
# handle op_return if specified and enabled
opreturn_message = self.message_opreturn_e.text()
if opreturn_message:
outputs.append(self.output_for_opreturn_stringdata(opreturn_message))
except OPReturnTooLarge as e:
self.show_error(str(e))
return
except OPReturnError as e:
self.show_error(str(e))
return
if not outputs:
self.show_error(_('No outputs'))
return
for _type, addr, amount in outputs:
if amount is None:
self.show_error(_('Invalid Amount'))
return
freeze_fee = self.fee_e.isVisible() and self.fee_e.isModified() and (self.fee_e.text() or self.fee_e.hasFocus())
fee = self.fee_e.get_amount() if freeze_fee else None
coins = self.get_coins(isInvoice)
return outputs, fee, label, coins
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
r = self.read_send_tab()
if not r:
return
outputs, fee, tx_desc, coins = r
try:
tx = self.wallet.make_unsigned_transaction(coins, outputs, self.config, fee)
except NotEnoughFunds:
self.show_message(_("Insufficient funds"))
return
except ExcessiveFee:
self.show_message(_("Your fee is too high. Max is 50 sat/byte."))
return
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
amount = tx.output_value() if self.is_max else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
#if fee < self.wallet.relayfee() * tx.estimated_size() / 1000 and tx.requires_fee(self.wallet):
#self.show_error(_("This transaction requires a higher fee, or it will not be propagated by the network"))
#return
if preview:
self.show_transaction(tx, tx_desc)
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
confirm_rate = 2 * self.config.max_fee_rate()
# IN THE FUTURE IF WE WANT TO APPEND SOMETHING IN THE MSG ABOUT THE FEE, CODE IS COMMENTED OUT:
#if fee > confirm_rate * tx.estimated_size() / 1000:
# msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if (fee < (tx.estimated_size())):
msg.append(_('Warning') + ': ' + _("You're using a fee less than 1000 sats/kb. It may take a very long time to confirm."))
if self.config.get('enable_opreturn') and self.message_opreturn_e.text():
msg.append(_("You are using an OP_RETURN message. This gets permanently written to the blockchain."))
if self.wallet.has_password():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
# call hook to see if plugin needs gui interaction
run_hook('sign_tx', self, tx)
def on_signed(result):
callback(True)
def on_failed(exc_info):
self.on_error(exc_info)
callback(False)
if self.tx_external_keypairs:
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
WaitingDialog(self, _('Signing transaction...'), task,
on_signed, on_failed)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
status = False
msg = "Failed"
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
if pr:
refund_address = self.wallet.get_receiving_addresses()[0]
ack_status, ack_msg = pr.send_payment(str(tx), refund_address)
msg = ack_msg
if ack_status:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
status = True
else:
status, msg = self.network.broadcast_transaction(tx)
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window()
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.amount_e, self.message_e]:
e.setFrozen(True)
self.max_button.setDisabled(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
self.show_message(self.payment_request.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = web.parse_URI(URI, self.on_pr)
except Exception as e:
self.show_error(_('Invalid bitcoincash URI:') + '\n' + str(e))
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
op_return = out.get('op_return')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
if op_return:
self.message_opreturn_e.setText(op_return)
self.message_opreturn_e.setHidden(False)
self.opreturn_label.setHidden(False)
elif not self.config.get('enable_opreturn'):
self.message_opreturn_e.setText('')
self.message_opreturn_e.setHidden(True)
self.opreturn_label.setHidden(True)
def do_clear(self):
self.is_max = False
self.not_enough_funds = False
self.op_return_toolong = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e, self.fee_e, self.message_opreturn_e]:
e.setText('')
e.setFrozen(False)
self.max_button.setDisabled(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.message_opreturn_e.setVisible(self.config.get('enable_opreturn'))
self.opreturn_label.setVisible(self.config.get('enable_opreturn'))
self.update_status()
run_hook('do_clear', self)
def set_frozen_state(self, addrs, freeze):
self.wallet.set_frozen_state(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def set_frozen_coin_state(self, utxos, freeze):
self.wallet.set_frozen_coin_state(utxos, freeze)
self.utxo_list.update()
self.update_fee()
def create_converter_tab(self):
source_address = QLineEdit()
cash_address = QLineEdit()
cash_address.setReadOnly(True)
legacy_address = QLineEdit()
legacy_address.setReadOnly(True)
widgets = [
(cash_address, Address.FMT_CASHADDR),
(legacy_address, Address.FMT_LEGACY),
]
def convert_address():
try:
addr = Address.from_string(source_address.text().strip())
except:
addr = None
for widget, fmt in widgets:
if addr:
widget.setText(addr.to_full_string(fmt))
else:
widget.setText('')
source_address.textChanged.connect(convert_address)
label = WWLabel(_(
"This tool helps convert between address formats for Bitcoin "
"Cash addresses.\nYou are encouraged to use the 'Cash address' "
"format."
))
w = QWidget()
grid = QGridLayout()
grid.setSpacing(15)
grid.setColumnStretch(1, 2)
grid.setColumnStretch(2, 1)
grid.addWidget(QLabel(_('Address to convert')), 0, 0)
grid.addWidget(source_address, 0, 1)
grid.addWidget(QLabel(_('Cash address')), 1, 0)
grid.addWidget(cash_address, 1, 1)
grid.addWidget(QLabel(_('Legacy address')), 2, 0)
grid.addWidget(legacy_address, 2, 1)
w.setLayout(grid)
vbox = QVBoxLayout()
vbox.addWidget(label)
vbox.addWidget(w)
vbox.addStretch(1)
w = QWidget()
w.setLayout(vbox)
return w
def create_list_tab(self, l, list_header=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if list_header:
hbox = QHBoxLayout()
for b in list_header:
hbox.addWidget(b)
hbox.addStretch()
vbox.addLayout(hbox)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
self.cashaddr_toggled_signal.connect(l.update)
return self.create_list_tab(l)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
self.cashaddr_toggled_signal.connect(l.update)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
self.cashaddr_toggled_signal.connect(l.update)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?"
.format(addr.to_ui_string()))):
self.wallet.delete_address(addr)
self.address_list.update()
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.clear_receive_tab()
def get_coins(self, isInvoice = False):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config, isInvoice)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not Address.is_valid(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
old_entry = self.contacts.get(address, None)
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.update_completions()
# The contact has changed, update any addresses that are displayed with the old information.
run_hook('update_contact', address, self.contacts[address], old_entry)
return True
def delete_contacts(self, addresses):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(addresses))):
return
removed_entries = []
for address in addresses:
if address in self.contacts.keys():
removed_entries.append((address, self.contacts[address]))
self.contacts.pop(address)
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.contact_list.update()
self.update_completions()
run_hook('delete_contacts', removed_entries)
def show_invoice(self, key):
pr = self.invoices.get(key)
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1].to_ui_string(), pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
fn = self.getSaveFileName(_("Save invoice to file"), "*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({'wallet' : self.wallet,
'network' : self.network,
'plugins' : self.gui_object.plugins,
'window': self})
console.updateNamespace({'util' : util, 'bitcoin':bitcoin})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args, **kwargs: f(method, *args, password_getter=self.password_dialog,
**kwargs)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
qtVersion = qVersion()
self.balance_label = QLabel("")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.lock_icon = QIcon()
self.password_button = StatusBarButton(self.lock_icon, _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
self.addr_converter_button = StatusBarButton(
self.cashaddr_icon(),
_("Toggle CashAddr Display"),
self.toggle_cashaddr_status_bar
)
sb.addPermanentWidget(self.addr_converter_button)
sb.addPermanentWidget(StatusBarButton(QIcon(":icons/preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(QIcon(":icons/seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(QIcon(":icons/status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = QIcon(":icons/lock.png") if self.wallet.has_password() else QIcon(":icons/unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.can_change_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from .password_dialog import ChangePasswordDialog
d = ChangePasswordDialog(self, self.wallet)
ok, password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(password, new_password, encrypt_file)
except BaseException as e:
self.show_error(str(e))
return
except:
traceback.print_exc(file=sys.stdout)
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if new_password else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(280)
line2 = QLineEdit()
line2.setFixedWidth(280)
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + ' ' + str(key+1)
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
self.gui_object.daemon.stop_wallet(wallet_path)
self.close()
os.unlink(wallet_path)
self.update_recently_visited(wallet_path) # this ensures it's deleted from the menu
self.show_error("Wallet removed:" + basename)
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk = self.wallet.export_private_key(address, password)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel('{}: {}'.format(_("Address"), address)))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=address.to_script().hex())
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electron Cash, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
try:
addr = Address.from_string(address)
except:
self.show_message(_('Invalid Bitcoin Cash address.'))
return
if addr.kind != addr.ADDR_P2PKH:
self.show_message(_('Cannot sign messages with this type of address.') + '\n\n' + self.msg_sign)
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(addr):
self.show_message(_('Address not in wallet.'))
return
task = partial(self.wallet.sign_message, addr, message, password)
def show_signed_message(sig):
signature.setText(base64.b64encode(sig).decode('ascii'))
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
try:
address = Address.from_string(address.text().strip())
except:
self.show_message(_('Invalid Bitcoin Cash address.'))
return
message = message.toPlainText().strip().encode('utf-8')
try:
# This can throw on invalid base64
sig = base64.b64decode(signature.toPlainText())
verified = bitcoin.verify_message(address, sig, message)
except:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=None):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address.to_ui_string() if address else '')
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
self.wallet.thread.add(task, on_success=lambda text: message_e.setText(text.decode('utf-8')))
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
encrypted = bitcoin.encrypt_message(message, pubkey_e.text())
encrypted_e.setText(encrypted.decode('ascii'))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_warning(str(e))
def encrypt_message(self, address=None):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
if not isinstance(pubkey, str):
pubkey = pubkey.to_ui_string()
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electroncash.transaction import tx_from_str
try:
txt_tx = tx_from_str(txt)
tx = Transaction(txt_tx)
tx.deserialize()
if self.wallet:
my_coins = self.wallet.get_spendable_coins(None, self.config)
my_outpoints = [vin['prevout_hash'] + ':' + str(vin['prevout_n']) for vin in my_coins]
for i, txin in enumerate(tx.inputs()):
outpoint = txin['prevout_hash'] + ':' + str(txin['prevout_n'])
if outpoint in my_outpoints:
my_index = my_outpoints.index(outpoint)
tx._inputs[i]['value'] = my_coins[my_index]['value']
return tx
except:
traceback.print_exc(file=sys.stdout)
self.show_critical(_("Electron Cash was unable to parse your transaction"))
return
def read_tx_from_qrcode(self):
from electroncash import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a bitcoincash URI
if data.lower().startswith(NetworkConstants.CASHADDR_PREFIX + ':'):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electron Cash was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
file_content = file_content.strip()
tx_file_dict = json.loads(str(file_content))
tx = self.tx_from_text(file_content)
return tx
def do_process_from_text(self):
from electroncash.transaction import SerializationError
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
try:
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
except SerializationError as e:
self.show_critical(_("Electron Cash was unable to deserialize the transaction:") + "\n" + str(e))
def do_process_from_file(self):
from electroncash.transaction import SerializationError
try:
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
except SerializationError as e:
self.show_critical(_("Electron Cash was unable to deserialize the transaction:") + "\n" + str(e))
def do_process_from_txid(self):
from electroncash import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
r = self.network.synchronous_get(('blockchain.transaction.get',[txid]))
except BaseException as e:
self.show_message(str(e))
return
tx = transaction.Transaction(r)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It can not be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(850, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electron-cash-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)
private_keys[addr.to_ui_string()] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join('{}\t{}'.format(addr, privkey)
for addr, privkey in private_keys.items())
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electron Cash was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
import json
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
labelsFile = self.getOpenFileName(_("Open labels file"), "*.json")
if not labelsFile: return
try:
with open(labelsFile, 'r') as f:
data = f.read()
if type(data) is not dict or len(data) and not all(type(v) is str for v in next(iter(d))):
self.show_critical(_("The file you selected does not appear to contain labels."))
return
for key, value in json.loads(data).items():
self.wallet.set_label(key, value)
self.show_message(_("Your labels were imported from") + " '%s'" % str(labelsFile))
except (IOError, os.error) as reason:
self.show_critical(_("Electron Cash was unable to import your labels.") + "\n" + str(reason))
self.address_list.update()
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
def do_export_labels(self):
labels = self.wallet.labels
try:
fileName = self.getSaveFileName(_("Select file to save your labels"), 'electron-cash_labels.json', "*.json")
if fileName:
with open(fileName, 'w+') as f:
json.dump(labels, f, indent=4, sort_keys=True)
self.show_message(_("Your labels were exported to") + " '%s'" % str(fileName))
except (IOError, os.error) as reason:
self.show_critical(_("Electron Cash was unable to export your labels.") + "\n" + str(reason))
def export_history_dialog(self):
d = WindowModalDialog(self, _('Export History'))
d.setMinimumSize(400, 200)
vbox = QVBoxLayout(d)
defaultname = os.path.expanduser('~/electron-cash-history.csv')
select_msg = _('Select file to export your wallet transactions to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
vbox.addStretch(1)
hbox = Buttons(CancelButton(d), OkButton(d, _('Export')))
vbox.addLayout(hbox)
run_hook('export_history_dialog', self, hbox)
self.update()
if not d.exec_():
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_history(self.wallet, filename, csv_button.isChecked())
except (IOError, os.error) as reason:
export_error_label = _("Electron Cash was unable to produce a transaction export.")
self.show_critical(export_error_label + "\n" + str(reason), title=_("Unable to export history"))
return
self.show_message(_("Your wallet history has been successfully exported."))
def plot_history_dialog(self):
if plot_history is None:
return
wallet = self.wallet
history = wallet.get_history()
if len(history) > 0:
plt = plot_history(self.wallet, history)
plt.show()
def do_export_history(self, wallet, fileName, is_csv):
history = wallet.export_history(fx=self.fx)
lines = []
for item in history:
if is_csv:
lines.append([item['txid'], item.get('label', ''), item['confirmations'], item['value'], item['date']])
else:
lines.append(item)
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f, lineterminator='\n')
transaction.writerow(["transaction_hash","label", "confirmations", "value", "timestamp"])
for line in lines:
transaction.writerow(line)
else:
import json
f.write(json.dumps(lines, indent=4))
def sweep_key_dialog(self):
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
if not addresses:
self.show_warning(_('Wallet has no address to sweep to'))
return
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_("Enter private keys:")))
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
h, addr_combo = address_combo(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
sweep_button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), sweep_button))
def get_address_text():
return addr_combo.currentText()
def get_priv_keys():
return keystore.get_private_keys(keys_e.toPlainText())
def enable_sweep():
sweep_button.setEnabled(bool(get_address_text()
and get_priv_keys()))
keys_e.textChanged.connect(enable_sweep)
enable_sweep()
if not d.exec_():
return
try:
self.do_clear()
coins, keypairs = sweep_preparations(get_priv_keys(), self.network)
self.tx_external_keypairs = keypairs
self.payto_e.setText(get_address_text())
self.spend_coins(coins)
self.spend_max()
except BaseException as e:
self.show_message(str(e))
return
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
self.warn_if_watching_only()
def _do_import(self, title, msg, func):
text = text_dialog(self, title, msg + ' :', _('Import'),
allow_multi=True)
if not text:
return
bad = []
good = []
for key in str(text).split():
try:
addr = func(key)
good.append(addr)
except BaseException as e:
bad.append(key)
continue
if good:
self.show_message(_("The following addresses were added") + ':\n' + '\n'.join(good))
if bad:
self.show_critical(_("The following inputs could not be imported") + ':\n'+ '\n'.join(bad))
self.address_list.update()
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")
def import_addr(addr):
if self.wallet.import_address(Address.from_string(addr)):
return addr
return ''
self._do_import(title, msg, import_addr)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title, msg = _('Import private keys'), _("Enter private keys")
self._do_import(title, msg, lambda x: self.wallet.import_private_key(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.refresh_headers()
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def cashaddr_icon(self):
if self.config.get('show_cashaddr', False):
return QIcon(":icons/tab_converter.png")
else:
return QIcon(":icons/tab_converter_bw.png")
def update_cashaddr_icon(self):
self.addr_converter_button.setIcon(self.cashaddr_icon())
def toggle_cashaddr_status_bar(self):
self.toggle_cashaddr(not self.config.get('show_cashaddr', False))
def toggle_cashaddr_settings(self, state):
self.toggle_cashaddr(state == Qt.Checked)
def toggle_cashaddr(self, on):
self.config.set_key('show_cashaddr', on)
Address.show_cashaddr(on)
for window in self.gui_object.windows:
window.cashaddr_toggled_signal.emit()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
cashaddr_cb = QCheckBox(_('CashAddr address format'))
cashaddr_cb.setChecked(Address.FMT_UI == Address.FMT_CASHADDR)
cashaddr_cb.setToolTip(_("If unchecked, addresses are shown in legacy format"))
cashaddr_cb.stateChanged.connect(self.toggle_cashaddr_settings)
gui_widgets.append((cashaddr_cb, None))
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electroncash.i18n import languages
language_names = []
language_keys = []
for item in languages.items():
language_keys.append(item[0])
language_names.append(item[1])
lang_combo.addItems(language_names)
try:
index = language_keys.index(self.config.get("language",''))
except ValueError:
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]:
w.setEnabled(False)
def on_lang(x):
lang_request = language_keys[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
def on_customfee(x):
amt = customfee_e.get_amount()
m = int(amt * 1000.0) if amt is not None else None
self.config.set_key('customfee', m)
self.fee_slider.update()
self.fee_slider_mogrifier()
customfee_e = BTCSatsByteEdit()
customfee_e.setAmount(self.config.custom_fee_rate() / 1000.0 if self.config.has_custom_fee_rate() else None)
customfee_e.textChanged.connect(on_customfee)
customfee_label = HelpLabel(_('Custom Fee Rate'), _('Custom Fee Rate in Satoshis per byte'))
fee_widgets.append((customfee_label, customfee_e))
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_e.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see http://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = ['BCH', 'mBCH', 'cash']
msg = _('Base unit of your wallet.')\
+ '\n1 BCH = 1,000 mBCH = 1,000,000 cash.\n' \
+ _(' These settings affects the fields in the Send tab')+' '
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
if unit_result == 'BCH':
self.decimal_point = 8
elif unit_result == 'mBCH':
self.decimal_point = 5
elif unit_result == 'cash':
self.decimal_point = 2
else:
raise Exception('Unknown base unit')
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = web.BE_sorted_list()
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(web.BE_from_config(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electroncash import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def on_opret(x):
self.config.set_key('enable_opreturn', bool(x))
if not x:
self.message_opreturn_e.setText("")
self.op_return_toolong = False
self.message_opreturn_e.setHidden(not x)
self.opreturn_label.setHidden(not x)
enable_opreturn = bool(self.config.get('enable_opreturn'))
opret_cb = QCheckBox(_('Enable OP_RETURN output'))
opret_cb.setToolTip(_('Enable posting messages with OP_RETURN.'))
opret_cb.setChecked(enable_opreturn)
opret_cb.stateChanged.connect(on_opret)
tx_widgets.append((opret_cb,None))
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_list.refresh_headers()
if self.fx.is_enabled() and checked:
# reset timeout to get historical rates
self.fx.timeout = 0
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('Appearance')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.timeout = 0
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electron Cash to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
if self.tx_notify_timer:
self.tx_notify_timer.stop()
self.tx_notify_timer = None
# We catch these errors with the understanding that there is no recovery at
# this point, given user has likely performed an action we cannot recover
# cleanly from. So we attempt to exit as cleanly as possible.
try:
self.config.set_key("is_maximized", self.isMaximized())
self.config.set_key("console-history", self.console.history[-50:], True)
except (OSError, PermissionError) as e:
self.print_error("unable to write to config (directory removed?)", e)
if not self.isMaximized():
try:
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),g.width(),g.height()])
except (OSError, PermissionError) as e:
self.print_error("unable to write to wallet storage (directory removed?)", e)
# Should be no side-effects in this function relating to file access past this point.
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.close_window(self)
def internal_plugins_dialog(self):
self.internalpluginsdialog = d = WindowModalDialog(self, _('Optional Features'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.get_internal_plugin_count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle_internal_plugin(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
# All plugins get this whenever one is toggled.
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.internal_plugin_metadata.values()):
name = descr['__name__']
p = plugins.get_internal_plugin(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_internal_plugin_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.print_msg("error: cannot display plugin", name)
traceback.print_exc(file=sys.stdout)
grid.setRowStretch(len(plugins.internal_plugin_metadata.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def external_plugins_dialog(self):
from . import external_plugins_window
self.externalpluginsdialog = d = external_plugins_window.ExternalPluginsDialog(self, _('Plugin Manager'))
d.exec_()
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
def f(x):
a = max_fee - fee_e.get_amount()
output_amount.setText((self.format_amount(a) + ' ' + self.base_unit()) if a else '')
fee_e.textChanged.connect(f)
fee = self.config.fee_per_kb() * total_size / 1000
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee' + ':')), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * total_size / 1000
fee = min(max_fee, fee)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
if new_tx is None:
self.show_error(_('CPFP no longer valid'))
return
self.show_transaction(new_tx)
|
utils.py
|
import threading
import numpy as np
import jesse.helpers as jh
from jesse.models.Candle import Candle
from jesse.models.Orderbook import Orderbook
from jesse.models.Ticker import Ticker
from jesse.models.Trade import Trade
def store_candle_into_db(exchange: str, symbol: str, candle: np.ndarray):
"""
store candle into the database
"""
d = {
'id': jh.generate_unique_id(),
'symbol': symbol,
'exchange': exchange,
'timestamp': candle[0],
'open': candle[1],
'high': candle[3],
'low': candle[4],
'close': candle[2],
'volume': candle[5]
}
def async_save():
Candle.insert(**d).on_conflict_ignore().execute()
print(
jh.color(
'candle: {}-{}-{}: {}'.format(jh.timestamp_to_time(d['timestamp']), exchange, symbol, candle),
'blue'
)
)
# async call
threading.Thread(target=async_save).start()
def store_ticker_into_db(exchange: str, symbol: str, ticker: np.ndarray):
d = {
'id': jh.generate_unique_id(),
'timestamp': ticker[0],
'last_price': ticker[1],
'high_price': ticker[2],
'low_price': ticker[3],
'volume': ticker[4],
'symbol': symbol,
'exchange': exchange,
}
def async_save():
Ticker.insert(**d).on_conflict_ignore().execute()
print(
jh.color('ticker: {}-{}-{}: {}'.format(
jh.timestamp_to_time(d['timestamp']), exchange, symbol, ticker
), 'yellow')
)
# async call
threading.Thread(target=async_save).start()
def store_trade_into_db(exchange: str, symbol: str, trade: np.ndarray):
d = {
'id': jh.generate_unique_id(),
'timestamp': trade[0],
'price': trade[1],
'buy_qty': trade[2],
'sell_qty': trade[3],
'buy_count': trade[4],
'sell_count': trade[5],
'symbol': symbol,
'exchange': exchange,
}
def async_save():
Trade.insert(**d).on_conflict_ignore().execute()
print(
jh.color(
'trade: {}-{}-{}: {}'.format(
jh.timestamp_to_time(d['timestamp']), exchange, symbol, trade
),
'green'
)
)
# async call
threading.Thread(target=async_save).start()
def store_orderbook_into_db(exchange: str, symbol: str, orderbook: np.ndarray):
d = {
'id': jh.generate_unique_id(),
'timestamp': jh.now(),
'data': orderbook.dumps(),
'symbol': symbol,
'exchange': exchange,
}
def async_save():
Orderbook.insert(**d).on_conflict_ignore().execute()
print(
jh.color(
'orderbook: {}-{}-{}: [{}, {}], [{}, {}]'.format(
jh.timestamp_to_time(d['timestamp']), exchange, symbol,
# best ask
orderbook[0][0][0], orderbook[0][0][1],
# best bid
orderbook[1][0][0], orderbook[1][0][1]
),
'magenta'
)
)
# async call
threading.Thread(target=async_save).start()
def fetch_candles_from_db(exchange: str, symbol: str, start_date: int, finish_date: int) -> tuple:
candles_tuple = tuple(
Candle.select(
Candle.timestamp, Candle.open, Candle.close, Candle.high, Candle.low,
Candle.volume
).where(
Candle.timestamp.between(start_date, finish_date),
Candle.exchange == exchange,
Candle.symbol == symbol
).order_by(Candle.timestamp.asc()).tuples()
)
return candles_tuple
|
test_client.py
|
import asyncio
from collections import deque
from contextlib import suppress
from functools import partial
import gc
import logging
from operator import add
import os
import pickle
import psutil
import random
import subprocess
import sys
import threading
from threading import Semaphore
from time import sleep
import traceback
import warnings
import weakref
import zipfile
import pytest
from tlz import identity, isdistinct, concat, pluck, valmap, first, merge
import dask
from dask import delayed
from dask.optimization import SubgraphCallable
import dask.bag as db
from distributed import (
Worker,
Nanny,
fire_and_forget,
LocalCluster,
get_client,
secede,
get_worker,
Executor,
profile,
performance_report,
TimeoutError,
CancelledError,
)
from distributed.comm import CommClosedError
from distributed.client import (
Client,
Future,
wait,
as_completed,
tokenize,
_get_global_client,
default_client,
futures_of,
temp_default_client,
)
from distributed.compatibility import WINDOWS
from distributed.metrics import time
from distributed.scheduler import Scheduler, KilledWorker
from distributed.sizeof import sizeof
from distributed.utils import (
mp_context,
sync,
tmp_text,
tokey,
tmpfile,
is_valid_xml,
)
from distributed.utils_test import (
cluster,
slowinc,
slowadd,
slowdec,
randominc,
inc,
dec,
div,
throws,
geninc,
asyncinc,
gen_cluster,
gen_test,
double,
popen,
captured_logger,
varying,
map_varying,
wait_for,
async_wait_for,
pristine_loop,
save_sys_modules,
)
from distributed.utils_test import ( # noqa: F401
client as c,
client_secondary as c2,
cleanup,
cluster_fixture,
loop,
loop_in_thread,
nodebug,
s,
a,
b,
)
@gen_cluster(client=True, timeout=None)
async def test_submit(c, s, a, b):
x = c.submit(inc, 10)
assert not x.done()
assert isinstance(x, Future)
assert x.client is c
result = await x
assert result == 11
assert x.done()
y = c.submit(inc, 20)
z = c.submit(add, x, y)
result = await z
assert result == 11 + 21
s.validate_state()
@gen_cluster(client=True)
async def test_map(c, s, a, b):
L1 = c.map(inc, range(5))
assert len(L1) == 5
assert isdistinct(x.key for x in L1)
assert all(isinstance(x, Future) for x in L1)
result = await L1[0]
assert result == inc(0)
assert len(s.tasks) == 5
L2 = c.map(inc, L1)
result = await L2[1]
assert result == inc(inc(1))
assert len(s.tasks) == 10
# assert L1[0].key in s.tasks[L2[0].key]
total = c.submit(sum, L2)
result = await total
assert result == sum(map(inc, map(inc, range(5))))
L3 = c.map(add, L1, L2)
result = await L3[1]
assert result == inc(1) + inc(inc(1))
L4 = c.map(add, range(3), range(4))
results = await c.gather(L4)
assert results == list(map(add, range(3), range(4)))
def f(x, y=10):
return x + y
L5 = c.map(f, range(5), y=5)
results = await c.gather(L5)
assert results == list(range(5, 10))
y = c.submit(f, 10)
L6 = c.map(f, range(5), y=y)
results = await c.gather(L6)
assert results == list(range(20, 25))
s.validate_state()
@gen_cluster(client=True)
async def test_map_empty(c, s, a, b):
L1 = c.map(inc, [], pure=False)
assert len(L1) == 0
results = await c.gather(L1)
assert results == []
@gen_cluster(client=True)
async def test_map_keynames(c, s, a, b):
futures = c.map(inc, range(4), key="INC")
assert all(f.key.startswith("INC") for f in futures)
assert isdistinct(f.key for f in futures)
futures2 = c.map(inc, [5, 6, 7, 8], key="INC")
assert [f.key for f in futures] != [f.key for f in futures2]
keys = ["inc-1", "inc-2", "inc-3", "inc-4"]
futures = c.map(inc, range(4), key=keys)
assert [f.key for f in futures] == keys
@gen_cluster(client=True)
async def test_map_retries(c, s, a, b):
args = [
[ZeroDivisionError("one"), 2, 3],
[4, 5, 6],
[ZeroDivisionError("seven"), ZeroDivisionError("eight"), 9],
]
x, y, z = c.map(*map_varying(args), retries=2)
assert await x == 2
assert await y == 4
assert await z == 9
x, y, z = c.map(*map_varying(args), retries=1, pure=False)
assert await x == 2
assert await y == 4
with pytest.raises(ZeroDivisionError, match="eight"):
await z
x, y, z = c.map(*map_varying(args), retries=0, pure=False)
with pytest.raises(ZeroDivisionError, match="one"):
await x
assert await y == 4
with pytest.raises(ZeroDivisionError, match="seven"):
await z
@gen_cluster(client=True)
async def test_map_batch_size(c, s, a, b):
result = c.map(inc, range(100), batch_size=10)
result = await c.gather(result)
assert result == list(range(1, 101))
result = c.map(add, range(100), range(100), batch_size=10)
result = await c.gather(result)
assert result == list(range(0, 200, 2))
# mismatch shape
result = c.map(add, range(100, 200), range(10), batch_size=2)
result = await c.gather(result)
assert result == list(range(100, 120, 2))
@gen_cluster(client=True)
async def test_compute_retries(c, s, a, b):
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
# Sanity check for varying() use
x = c.compute(delayed(varying(args))())
with pytest.raises(ZeroDivisionError, match="one"):
await x
# Same retries for all
x = c.compute(delayed(varying(args))(), retries=1)
with pytest.raises(ZeroDivisionError, match="two"):
await x
x = c.compute(delayed(varying(args))(), retries=2)
assert await x == 3
args.append(4)
x = c.compute(delayed(varying(args))(), retries=2)
assert await x == 3
# Per-future retries
xargs = [ZeroDivisionError("one"), ZeroDivisionError("two"), 30, 40]
yargs = [ZeroDivisionError("five"), ZeroDivisionError("six"), 70]
zargs = [80, 90, 100]
x, y = [delayed(varying(args))() for args in (xargs, yargs)]
x, y = c.compute([x, y], retries={x: 2})
gc.collect()
assert await x == 30
with pytest.raises(ZeroDivisionError, match="five"):
await y
x, y, z = [delayed(varying(args))() for args in (xargs, yargs, zargs)]
x, y, z = c.compute([x, y, z], retries={(y, z): 2})
with pytest.raises(ZeroDivisionError, match="one"):
await x
assert await y == 70
assert await z == 80
def test_retries_get(c):
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
x = delayed(varying(args))()
assert x.compute(retries=5) == 3
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
x = delayed(varying(args))()
with pytest.raises(ZeroDivisionError):
x.compute()
@gen_cluster(client=True)
async def test_compute_persisted_retries(c, s, a, b):
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
# Sanity check
x = c.persist(delayed(varying(args))())
fut = c.compute(x)
with pytest.raises(ZeroDivisionError, match="one"):
await fut
x = c.persist(delayed(varying(args))())
fut = c.compute(x, retries=1)
with pytest.raises(ZeroDivisionError, match="two"):
await fut
x = c.persist(delayed(varying(args))())
fut = c.compute(x, retries=2)
assert await fut == 3
args.append(4)
x = c.persist(delayed(varying(args))())
fut = c.compute(x, retries=3)
assert await fut == 3
@gen_cluster(client=True)
async def test_persist_retries(c, s, a, b):
# Same retries for all
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
x = c.persist(delayed(varying(args))(), retries=1)
x = c.compute(x)
with pytest.raises(ZeroDivisionError, match="two"):
await x
x = c.persist(delayed(varying(args))(), retries=2)
x = c.compute(x)
assert await x == 3
# Per-key retries
xargs = [ZeroDivisionError("one"), ZeroDivisionError("two"), 30, 40]
yargs = [ZeroDivisionError("five"), ZeroDivisionError("six"), 70]
zargs = [80, 90, 100]
x, y, z = [delayed(varying(args))() for args in (xargs, yargs, zargs)]
x, y, z = c.persist([x, y, z], retries={(y, z): 2})
x, y, z = c.compute([x, y, z])
with pytest.raises(ZeroDivisionError, match="one"):
await x
assert await y == 70
assert await z == 80
@gen_cluster(client=True)
async def test_retries_dask_array(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones((10, 10), chunks=(3, 3))
future = c.compute(x.sum(), retries=2)
y = await future
assert y == 100
@gen_cluster(client=True)
async def test_future_repr(c, s, a, b):
pd = pytest.importorskip("pandas")
x = c.submit(inc, 10)
y = c.submit(pd.DataFrame, {"x": [1, 2, 3]})
await x
await y
for func in [repr, lambda x: x._repr_html_()]:
assert str(x.key) in func(x)
assert str(x.status) in func(x)
assert str(x.status) in repr(c.futures[x.key])
assert "int" in func(x)
assert "pandas" in func(y)
assert "DataFrame" in func(y)
@gen_cluster(client=True)
async def test_future_tuple_repr(c, s, a, b):
da = pytest.importorskip("dask.array")
y = da.arange(10, chunks=(5,)).persist()
f = futures_of(y)[0]
for func in [repr, lambda x: x._repr_html_()]:
for k in f.key:
assert str(k) in func(f)
@gen_cluster(client=True)
async def test_Future_exception(c, s, a, b):
x = c.submit(div, 1, 0)
result = await x.exception()
assert isinstance(result, ZeroDivisionError)
x = c.submit(div, 1, 1)
result = await x.exception()
assert result is None
def test_Future_exception_sync(c):
x = c.submit(div, 1, 0)
assert isinstance(x.exception(), ZeroDivisionError)
x = c.submit(div, 1, 1)
assert x.exception() is None
@gen_cluster(client=True)
async def test_Future_release(c, s, a, b):
# Released Futures should be removed timely from the Client
x = c.submit(div, 1, 1)
await x
x.release()
await asyncio.sleep(0)
assert not c.futures
x = c.submit(slowinc, 1, delay=0.5)
x.release()
await asyncio.sleep(0)
assert not c.futures
x = c.submit(div, 1, 0)
await x.exception()
x.release()
await asyncio.sleep(0)
assert not c.futures
def test_Future_release_sync(c):
# Released Futures should be removed timely from the Client
x = c.submit(div, 1, 1)
x.result()
x.release()
wait_for(lambda: not c.futures, timeout=0.3)
x = c.submit(slowinc, 1, delay=0.8)
x.release()
wait_for(lambda: not c.futures, timeout=0.3)
x = c.submit(div, 1, 0)
x.exception()
x.release()
wait_for(lambda: not c.futures, timeout=0.3)
def test_short_tracebacks(loop, c):
tblib = pytest.importorskip("tblib")
future = c.submit(div, 1, 0)
try:
future.result()
except Exception:
_, _, tb = sys.exc_info()
tb = tblib.Traceback(tb).to_dict()
n = 0
while tb is not None:
n += 1
tb = tb["tb_next"]
assert n < 5
@gen_cluster(client=True)
async def test_map_naming(c, s, a, b):
L1 = c.map(inc, range(5))
L2 = c.map(inc, range(5))
assert [x.key for x in L1] == [x.key for x in L2]
L3 = c.map(inc, [1, 1, 1, 1])
assert len({x._state for x in L3}) == 1
L4 = c.map(inc, [1, 1, 1, 1], pure=False)
assert len({x._state for x in L4}) == 4
@gen_cluster(client=True)
async def test_submit_naming(c, s, a, b):
a = c.submit(inc, 1)
b = c.submit(inc, 1)
assert a._state is b._state
c = c.submit(inc, 1, pure=False)
assert c.key != a.key
@gen_cluster(client=True)
async def test_exceptions(c, s, a, b):
x = c.submit(div, 1, 2)
result = await x
assert result == 1 / 2
x = c.submit(div, 1, 0)
with pytest.raises(ZeroDivisionError):
await x
x = c.submit(div, 10, 2) # continues to operate
result = await x
assert result == 10 / 2
@gen_cluster()
async def test_gc(s, a, b):
c = await Client(s.address, asynchronous=True)
x = c.submit(inc, 10)
await x
assert s.tasks[x.key].who_has
x.__del__()
await async_wait_for(
lambda: x.key not in s.tasks or not s.tasks[x.key].who_has, timeout=0.3
)
await c.close()
def test_thread(c):
x = c.submit(inc, 1)
assert x.result() == 2
x = c.submit(slowinc, 1, delay=0.3)
with pytest.raises(TimeoutError):
x.result(timeout="10 ms")
assert x.result() == 2
def test_sync_exceptions(c):
x = c.submit(div, 10, 2)
assert x.result() == 5
y = c.submit(div, 10, 0)
try:
y.result()
assert False
except ZeroDivisionError:
pass
z = c.submit(div, 10, 5)
assert z.result() == 2
@gen_cluster(client=True)
async def test_gather(c, s, a, b):
x = c.submit(inc, 10)
y = c.submit(inc, x)
result = await c.gather(x)
assert result == 11
result = await c.gather([x])
assert result == [11]
result = await c.gather({"x": x, "y": [y]})
assert result == {"x": 11, "y": [12]}
@gen_cluster(client=True)
async def test_gather_lost(c, s, a, b):
[x] = await c.scatter([1], workers=a.address)
y = c.submit(inc, 1, workers=b.address)
await a.close()
with pytest.raises(Exception):
await c.gather([x, y])
def test_gather_sync(c):
x = c.submit(inc, 1)
assert c.gather(x) == 2
y = c.submit(div, 1, 0)
with pytest.raises(ZeroDivisionError):
c.gather([x, y])
[xx] = c.gather([x, y], errors="skip")
assert xx == 2
@gen_cluster(client=True)
async def test_gather_strict(c, s, a, b):
x = c.submit(div, 2, 1)
y = c.submit(div, 1, 0)
with pytest.raises(ZeroDivisionError):
await c.gather([x, y])
[xx] = await c.gather([x, y], errors="skip")
assert xx == 2
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)])
async def test_gather_skip(c, s, a):
x = c.submit(div, 1, 0, priority=10)
y = c.submit(slowinc, 1, delay=0.5)
with captured_logger(logging.getLogger("distributed.scheduler")) as sched:
with captured_logger(logging.getLogger("distributed.client")) as client:
L = await c.gather([x, y], errors="skip")
assert L == [2]
assert not client.getvalue()
assert not sched.getvalue()
@gen_cluster(client=True)
async def test_limit_concurrent_gathering(c, s, a, b):
futures = c.map(inc, range(100))
await c.gather(futures)
assert len(a.outgoing_transfer_log) + len(b.outgoing_transfer_log) < 100
@gen_cluster(client=True, timeout=None)
async def test_get(c, s, a, b):
future = c.get({"x": (inc, 1)}, "x", sync=False)
assert isinstance(future, Future)
result = await future
assert result == 2
futures = c.get({"x": (inc, 1)}, ["x"], sync=False)
assert isinstance(futures[0], Future)
result = await c.gather(futures)
assert result == [2]
futures = c.get({}, [], sync=False)
result = await c.gather(futures)
assert result == []
result = await c.get(
{("x", 1): (inc, 1), ("x", 2): (inc, ("x", 1))}, ("x", 2), sync=False
)
assert result == 3
def test_get_sync(c):
assert c.get({"x": (inc, 1)}, "x") == 2
def test_no_future_references(c):
from weakref import WeakSet
ws = WeakSet()
futures = c.map(inc, range(10))
ws.update(futures)
del futures
import gc
gc.collect()
start = time()
while list(ws):
sleep(0.01)
assert time() < start + 2
def test_get_sync_optimize_graph_passes_through(c):
bag = db.range(10, npartitions=3).map(inc)
dask.compute(bag.sum(), optimize_graph=False)
@gen_cluster(client=True)
async def test_gather_errors(c, s, a, b):
def f(a, b):
raise TypeError
def g(a, b):
raise AttributeError
future_f = c.submit(f, 1, 2)
future_g = c.submit(g, 1, 2)
with pytest.raises(TypeError):
await c.gather(future_f)
with pytest.raises(AttributeError):
await c.gather(future_g)
await a.close()
@gen_cluster(client=True)
async def test_wait(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 1)
z = c.submit(inc, 2)
done, not_done = await wait([x, y, z])
assert done == {x, y, z}
assert not_done == set()
assert x.status == y.status == "finished"
@gen_cluster(client=True)
async def test_wait_first_completed(c, s, a, b):
x = c.submit(slowinc, 1)
y = c.submit(slowinc, 1)
z = c.submit(inc, 2)
done, not_done = await wait([x, y, z], return_when="FIRST_COMPLETED")
assert done == {z}
assert not_done == {x, y}
assert z.status == "finished"
assert x.status == "pending"
assert y.status == "pending"
@gen_cluster(client=True, timeout=2)
async def test_wait_timeout(c, s, a, b):
future = c.submit(sleep, 0.3)
with pytest.raises(TimeoutError):
await wait(future, timeout=0.01)
def test_wait_sync(c):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
done, not_done = wait([x, y])
assert done == {x, y}
assert not_done == set()
assert x.status == y.status == "finished"
future = c.submit(sleep, 0.3)
with pytest.raises(TimeoutError):
wait(future, timeout=0.01)
def test_wait_informative_error_for_timeouts(c):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
try:
wait(x, y)
except Exception as e:
assert "timeout" in str(e)
assert "list" in str(e)
@gen_cluster(client=True)
async def test_garbage_collection(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 1)
assert c.refcount[x.key] == 2
x.__del__()
await asyncio.sleep(0)
assert c.refcount[x.key] == 1
z = c.submit(inc, y)
y.__del__()
await asyncio.sleep(0)
result = await z
assert result == 3
ykey = y.key
y.__del__()
await asyncio.sleep(0)
assert ykey not in c.futures
@gen_cluster(client=True)
async def test_garbage_collection_with_scatter(c, s, a, b):
[future] = await c.scatter([1])
assert future.key in c.futures
assert future.status == "finished"
assert s.who_wants[future.key] == {c.id}
key = future.key
assert c.refcount[key] == 1
future.__del__()
await asyncio.sleep(0)
assert c.refcount[key] == 0
start = time()
while True:
if key not in s.tasks or not s.tasks[key].who_has:
break
else:
assert time() < start + 3
await asyncio.sleep(0.1)
@gen_cluster(timeout=1000, client=True)
async def test_recompute_released_key(c, s, a, b):
x = c.submit(inc, 100)
result1 = await x
xkey = x.key
del x
import gc
gc.collect()
await asyncio.sleep(0)
assert c.refcount[xkey] == 0
# 1 second batching needs a second action to trigger
while xkey in s.tasks and s.tasks[xkey].who_has or xkey in a.data or xkey in b.data:
await asyncio.sleep(0.1)
x = c.submit(inc, 100)
assert x.key in c.futures
result2 = await x
assert result1 == result2
@pytest.mark.slow
@gen_cluster(client=True)
async def test_long_tasks_dont_trigger_timeout(c, s, a, b):
from time import sleep
x = c.submit(sleep, 3)
await x
@pytest.mark.skip
@gen_cluster(client=True)
async def test_missing_data_heals(c, s, a, b):
a.validate = False
b.validate = False
x = c.submit(inc, 1)
y = c.submit(inc, x)
z = c.submit(inc, y)
await wait([x, y, z])
# Secretly delete y's key
if y.key in a.data:
del a.data[y.key]
a.release_key(y.key)
if y.key in b.data:
del b.data[y.key]
b.release_key(y.key)
await asyncio.sleep(0)
w = c.submit(add, y, z)
result = await w
assert result == 3 + 4
@pytest.mark.skip
@gen_cluster(client=True)
async def test_gather_robust_to_missing_data(c, s, a, b):
a.validate = False
b.validate = False
x, y, z = c.map(inc, range(3))
await wait([x, y, z]) # everything computed
for f in [x, y]:
for w in [a, b]:
if f.key in w.data:
del w.data[f.key]
await asyncio.sleep(0)
w.release_key(f.key)
xx, yy, zz = await c.gather([x, y, z])
assert (xx, yy, zz) == (1, 2, 3)
@pytest.mark.skip
@gen_cluster(client=True)
async def test_gather_robust_to_nested_missing_data(c, s, a, b):
a.validate = False
b.validate = False
w = c.submit(inc, 1)
x = c.submit(inc, w)
y = c.submit(inc, x)
z = c.submit(inc, y)
await wait([z])
for worker in [a, b]:
for datum in [y, z]:
if datum.key in worker.data:
del worker.data[datum.key]
await asyncio.sleep(0)
worker.release_key(datum.key)
result = await c.gather([z])
assert result == [inc(inc(inc(inc(1))))]
@gen_cluster(client=True)
async def test_tokenize_on_futures(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 1)
tok = tokenize(x)
assert tokenize(x) == tokenize(x)
assert tokenize(x) == tokenize(y)
c.futures[x.key].finish()
assert tok == tokenize(y)
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_restrictions_submit(c, s, a, b):
x = c.submit(inc, 1, workers={a.ip})
y = c.submit(inc, x, workers={b.ip})
await wait([x, y])
assert s.host_restrictions[x.key] == {a.ip}
assert x.key in a.data
assert s.host_restrictions[y.key] == {b.ip}
assert y.key in b.data
@gen_cluster(client=True)
async def test_restrictions_ip_port(c, s, a, b):
x = c.submit(inc, 1, workers={a.address})
y = c.submit(inc, x, workers={b.address})
await wait([x, y])
assert s.worker_restrictions[x.key] == {a.address}
assert x.key in a.data
assert s.worker_restrictions[y.key] == {b.address}
assert y.key in b.data
@gen_cluster(client=True)
async def test_restrictions_ip_port_task_key(c, s, a, b):
# Create a long dependency list
tasks = [delayed(inc)(1)]
for _ in range(100):
tasks.append(delayed(add)(tasks[-1], random.choice(tasks)))
last_task = tasks[-1]
# calculate all dependency keys
all_tasks = list(last_task.__dask_graph__())
# only restrict to a single worker
workers = {d: a.address for d in all_tasks}
result = c.compute(last_task, workers=workers)
await result
# all tasks should have been calculated by the first worker
for task in tasks:
assert s.worker_restrictions[task.key] == {a.address}
# and the data should also be there
assert last_task.key in a.data
assert last_task.key not in b.data
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_restrictions_map(c, s, a, b):
L = c.map(inc, range(5), workers={a.ip})
await wait(L)
assert set(a.data) == {x.key for x in L}
assert not b.data
for x in L:
assert s.host_restrictions[x.key] == {a.ip}
L = c.map(inc, [10, 11, 12], workers=[{a.ip}, {a.ip, b.ip}, {b.ip}])
await wait(L)
assert s.host_restrictions[L[0].key] == {a.ip}
assert s.host_restrictions[L[1].key] == {a.ip, b.ip}
assert s.host_restrictions[L[2].key] == {b.ip}
with pytest.raises(ValueError):
c.map(inc, [10, 11, 12], workers=[{a.ip}])
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_restrictions_get(c, s, a, b):
dsk = {"x": 1, "y": (inc, "x"), "z": (inc, "y")}
restrictions = {"y": {a.ip}, "z": {b.ip}}
futures = c.get(dsk, ["y", "z"], restrictions, sync=False)
result = await c.gather(futures)
assert result == [2, 3]
assert "y" in a.data
assert "z" in b.data
@gen_cluster(client=True)
async def dont_test_bad_restrictions_raise_exception(c, s, a, b):
z = c.submit(inc, 2, workers={"bad-address"})
try:
await z
assert False
except ValueError as e:
assert "bad-address" in str(e)
assert z.key in str(e)
@gen_cluster(client=True, timeout=None)
async def test_remove_worker(c, s, a, b):
L = c.map(inc, range(20))
await wait(L)
await b.close()
assert b.address not in s.workers
result = await c.gather(L)
assert result == list(map(inc, range(20)))
@gen_cluster(nthreads=[("127.0.0.1", 1)], client=True)
async def test_errors_dont_block(c, s, w):
L = [c.submit(inc, 1), c.submit(throws, 1), c.submit(inc, 2), c.submit(throws, 2)]
start = time()
while not (L[0].status == L[2].status == "finished"):
assert time() < start + 5
await asyncio.sleep(0.01)
result = await c.gather([L[0], L[2]])
assert result == [2, 3]
@gen_cluster(client=True)
async def test_submit_quotes(c, s, a, b):
def assert_list(x, z=[]):
return isinstance(x, list) and isinstance(z, list)
x = c.submit(assert_list, [1, 2, 3])
result = await x
assert result
x = c.submit(assert_list, [1, 2, 3], z=[4, 5, 6])
result = await x
assert result
x = c.submit(inc, 1)
y = c.submit(inc, 2)
z = c.submit(assert_list, [x, y])
result = await z
assert result
@gen_cluster(client=True)
async def test_map_quotes(c, s, a, b):
def assert_list(x, z=[]):
return isinstance(x, list) and isinstance(z, list)
L = c.map(assert_list, [[1, 2, 3], [4]])
result = await c.gather(L)
assert all(result)
L = c.map(assert_list, [[1, 2, 3], [4]], z=[10])
result = await c.gather(L)
assert all(result)
L = c.map(assert_list, [[1, 2, 3], [4]], [[]] * 3)
result = await c.gather(L)
assert all(result)
@gen_cluster()
async def test_two_consecutive_clients_share_results(s, a, b):
c = await Client(s.address, asynchronous=True)
x = c.submit(random.randint, 0, 1000, pure=True)
xx = await x
f = await Client(s.address, asynchronous=True)
y = f.submit(random.randint, 0, 1000, pure=True)
yy = await y
assert xx == yy
await c.close()
await f.close()
@gen_cluster(client=True)
async def test_submit_then_get_with_Future(c, s, a, b):
x = c.submit(slowinc, 1)
dsk = {"y": (inc, x)}
result = await c.get(dsk, "y", sync=False)
assert result == 3
@gen_cluster(client=True)
async def test_aliases(c, s, a, b):
x = c.submit(inc, 1)
dsk = {"y": x}
result = await c.get(dsk, "y", sync=False)
assert result == 2
@gen_cluster(client=True)
async def test_aliases_2(c, s, a, b):
dsk_keys = [
({"x": (inc, 1), "y": "x", "z": "x", "w": (add, "y", "z")}, ["y", "w"]),
({"x": "y", "y": 1}, ["x"]),
({"x": 1, "y": "x", "z": "y", "w": (inc, "z")}, ["w"]),
]
for dsk, keys in dsk_keys:
result = await c.gather(c.get(dsk, keys, sync=False))
assert list(result) == list(dask.get(dsk, keys))
await asyncio.sleep(0)
@gen_cluster(client=True)
async def test_scatter(c, s, a, b):
d = await c.scatter({"y": 20})
assert isinstance(d["y"], Future)
assert a.data.get("y") == 20 or b.data.get("y") == 20
y_who_has = s.get_who_has(keys=["y"])["y"]
assert a.address in y_who_has or b.address in y_who_has
assert s.get_nbytes(summary=False) == {"y": sizeof(20)}
yy = await c.gather([d["y"]])
assert yy == [20]
[x] = await c.scatter([10])
assert isinstance(x, Future)
assert a.data.get(x.key) == 10 or b.data.get(x.key) == 10
xx = await c.gather([x])
x_who_has = s.get_who_has(keys=[x.key])[x.key]
assert s.tasks[x.key].who_has
assert (
s.workers[a.address] in s.tasks[x.key].who_has
or s.workers[b.address] in s.tasks[x.key].who_has
)
assert s.get_nbytes(summary=False) == {"y": sizeof(20), x.key: sizeof(10)}
assert xx == [10]
z = c.submit(add, x, d["y"]) # submit works on Future
result = await z
assert result == 10 + 20
result = await c.gather([z, x])
assert result == [30, 10]
@gen_cluster(client=True)
async def test_scatter_types(c, s, a, b):
d = await c.scatter({"x": 1})
assert isinstance(d, dict)
assert list(d) == ["x"]
for seq in [[1], (1,), {1}, frozenset([1])]:
L = await c.scatter(seq)
assert isinstance(L, type(seq))
assert len(L) == 1
s.validate_state()
seq = await c.scatter(range(5))
assert isinstance(seq, list)
assert len(seq) == 5
s.validate_state()
@gen_cluster(client=True)
async def test_scatter_non_list(c, s, a, b):
x = await c.scatter(1)
assert isinstance(x, Future)
result = await x
assert result == 1
@gen_cluster(client=True)
async def test_scatter_hash(c, s, a, b):
[a] = await c.scatter([1])
[b] = await c.scatter([1])
assert a.key == b.key
s.validate_state()
@gen_cluster(client=True)
async def test_scatter_tokenize_local(c, s, a, b):
from dask.base import normalize_token
class MyObj:
pass
L = []
@normalize_token.register(MyObj)
def f(x):
L.append(x)
return "x"
obj = MyObj()
future = await c.scatter(obj)
assert L and L[0] is obj
@gen_cluster(client=True)
async def test_scatter_singletons(c, s, a, b):
np = pytest.importorskip("numpy")
pd = pytest.importorskip("pandas")
for x in [1, np.ones(5), pd.DataFrame({"x": [1, 2, 3]})]:
future = await c.scatter(x)
result = await future
assert str(result) == str(x)
@gen_cluster(client=True)
async def test_scatter_typename(c, s, a, b):
future = await c.scatter(123)
assert future.key.startswith("int")
@gen_cluster(client=True)
async def test_scatter_hash(c, s, a, b):
x = await c.scatter(123)
y = await c.scatter(123)
assert x.key == y.key
z = await c.scatter(123, hash=False)
assert z.key != y.key
@gen_cluster(client=True)
async def test_get_releases_data(c, s, a, b):
await c.gather(c.get({"x": (inc, 1)}, ["x"], sync=False))
import gc
gc.collect()
start = time()
while c.refcount["x"]:
await asyncio.sleep(0.01)
assert time() < start + 2
def test_current(s, a, b):
with Client(s["address"]) as c:
assert Client.current() is c
with pytest.raises(ValueError):
Client.current()
with Client(s["address"]) as c:
assert Client.current() is c
def test_global_clients(loop):
assert _get_global_client() is None
with pytest.raises(ValueError):
default_client()
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
assert _get_global_client() is c
assert default_client() is c
with Client(s["address"], loop=loop) as f:
assert _get_global_client() is f
assert default_client() is f
assert default_client(c) is c
assert default_client(f) is f
assert _get_global_client() is None
@gen_cluster(client=True)
async def test_exception_on_exception(c, s, a, b):
x = c.submit(lambda: 1 / 0)
y = c.submit(inc, x)
with pytest.raises(ZeroDivisionError):
await y
z = c.submit(inc, y)
with pytest.raises(ZeroDivisionError):
await z
@gen_cluster(client=True)
async def test_get_nbytes(c, s, a, b):
[x] = await c.scatter([1])
assert s.get_nbytes(summary=False) == {x.key: sizeof(1)}
y = c.submit(inc, x)
await y
assert s.get_nbytes(summary=False) == {x.key: sizeof(1), y.key: sizeof(2)}
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_nbytes_determines_worker(c, s, a, b):
x = c.submit(identity, 1, workers=[a.ip])
y = c.submit(identity, tuple(range(100)), workers=[b.ip])
await c.gather([x, y])
z = c.submit(lambda x, y: None, x, y)
await z
assert s.tasks[z.key].who_has == {s.workers[b.address]}
@gen_cluster(client=True)
async def test_if_intermediates_clear_on_error(c, s, a, b):
x = delayed(div, pure=True)(1, 0)
y = delayed(div, pure=True)(1, 2)
z = delayed(add, pure=True)(x, y)
f = c.compute(z)
with pytest.raises(ZeroDivisionError):
await f
s.validate_state()
assert not any(ts.who_has for ts in s.tasks.values())
@gen_cluster(
client=True, config={"distributed.scheduler.default-task-durations": {"f": "1ms"}}
)
async def test_pragmatic_move_small_data_to_large_data(c, s, a, b):
np = pytest.importorskip("numpy")
lists = c.map(np.ones, [10000] * 10, pure=False)
sums = c.map(np.sum, lists)
total = c.submit(sum, sums)
def f(x, y):
return None
results = c.map(f, lists, [total] * 10)
await wait([total])
await wait(results)
assert (
sum(
s.tasks[r.key].who_has.issubset(s.tasks[l.key].who_has)
for l, r in zip(lists, results)
)
>= 9
)
@gen_cluster(client=True)
async def test_get_with_non_list_key(c, s, a, b):
dsk = {("x", 0): (inc, 1), 5: (inc, 2)}
x = await c.get(dsk, ("x", 0), sync=False)
y = await c.get(dsk, 5, sync=False)
assert x == 2
assert y == 3
@gen_cluster(client=True)
async def test_get_with_error(c, s, a, b):
dsk = {"x": (div, 1, 0), "y": (inc, "x")}
with pytest.raises(ZeroDivisionError):
await c.get(dsk, "y", sync=False)
def test_get_with_error_sync(c):
dsk = {"x": (div, 1, 0), "y": (inc, "x")}
with pytest.raises(ZeroDivisionError):
c.get(dsk, "y")
@gen_cluster(client=True)
async def test_directed_scatter(c, s, a, b):
await c.scatter([1, 2, 3], workers=[a.address])
assert len(a.data) == 3
assert not b.data
await c.scatter([4, 5], workers=[b.name])
assert len(b.data) == 2
def test_directed_scatter_sync(c, s, a, b, loop):
futures = c.scatter([1, 2, 3], workers=[b["address"]])
has_what = sync(loop, c.scheduler.has_what)
assert len(has_what[b["address"]]) == len(futures)
assert len(has_what[a["address"]]) == 0
@gen_cluster(client=True)
async def test_scatter_direct(c, s, a, b):
future = await c.scatter(123, direct=True)
assert future.key in a.data or future.key in b.data
assert s.tasks[future.key].who_has
assert future.status == "finished"
result = await future
assert result == 123
assert not s.counters["op"].components[0]["scatter"]
result = await future
assert not s.counters["op"].components[0]["gather"]
result = await c.gather(future)
assert not s.counters["op"].components[0]["gather"]
@gen_cluster(client=True)
async def test_scatter_direct_numpy(c, s, a, b):
np = pytest.importorskip("numpy")
x = np.ones(5)
future = await c.scatter(x, direct=True)
result = await future
assert np.allclose(x, result)
assert not s.counters["op"].components[0]["scatter"]
@gen_cluster(client=True)
async def test_scatter_direct_broadcast(c, s, a, b):
future2 = await c.scatter(456, direct=True, broadcast=True)
assert future2.key in a.data
assert future2.key in b.data
assert s.tasks[future2.key].who_has == {s.workers[a.address], s.workers[b.address]}
result = await future2
assert result == 456
assert not s.counters["op"].components[0]["scatter"]
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test_scatter_direct_balanced(c, s, *workers):
futures = await c.scatter([1, 2, 3], direct=True)
assert sorted([len(w.data) for w in workers]) == [0, 1, 1, 1]
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test_scatter_direct_broadcast_target(c, s, *workers):
futures = await c.scatter([123, 456], direct=True, workers=workers[0].address)
assert futures[0].key in workers[0].data
assert futures[1].key in workers[0].data
futures = await c.scatter(
[123, 456],
direct=True,
broadcast=True,
workers=[w.address for w in workers[:3]],
)
assert (
f.key in w.data and w.address in s.tasks[f.key].who_has
for f in futures
for w in workers[:3]
)
@gen_cluster(client=True, nthreads=[])
async def test_scatter_direct_empty(c, s):
with pytest.raises((ValueError, TimeoutError)):
await c.scatter(123, direct=True, timeout=0.1)
@gen_cluster(client=True, timeout=None, nthreads=[("127.0.0.1", 1)] * 5)
async def test_scatter_direct_spread_evenly(c, s, *workers):
futures = []
for i in range(10):
future = await c.scatter(i, direct=True)
futures.append(future)
assert all(w.data for w in workers)
@pytest.mark.parametrize("direct", [True, False])
@pytest.mark.parametrize("broadcast", [True, False])
def test_scatter_gather_sync(c, direct, broadcast):
futures = c.scatter([1, 2, 3], direct=direct, broadcast=broadcast)
results = c.gather(futures, direct=direct)
assert results == [1, 2, 3]
delayed(inc)(1).compute(direct=direct)
@gen_cluster(client=True)
async def test_gather_direct(c, s, a, b):
futures = await c.scatter([1, 2, 3])
data = await c.gather(futures, direct=True)
assert data == [1, 2, 3]
@gen_cluster(client=True)
async def test_many_submits_spread_evenly(c, s, a, b):
L = [c.submit(inc, i) for i in range(10)]
await wait(L)
assert a.data and b.data
@gen_cluster(client=True)
async def test_traceback(c, s, a, b):
x = c.submit(div, 1, 0)
tb = await x.traceback()
assert any("x / y" in line for line in pluck(3, traceback.extract_tb(tb)))
@gen_cluster(client=True)
async def test_get_traceback(c, s, a, b):
try:
await c.get({"x": (div, 1, 0)}, "x", sync=False)
except ZeroDivisionError:
exc_type, exc_value, exc_traceback = sys.exc_info()
L = traceback.format_tb(exc_traceback)
assert any("x / y" in line for line in L)
@gen_cluster(client=True)
async def test_gather_traceback(c, s, a, b):
x = c.submit(div, 1, 0)
try:
await c.gather(x)
except ZeroDivisionError:
exc_type, exc_value, exc_traceback = sys.exc_info()
L = traceback.format_tb(exc_traceback)
assert any("x / y" in line for line in L)
def test_traceback_sync(c):
x = c.submit(div, 1, 0)
tb = x.traceback()
assert any(
"x / y" in line
for line in concat(traceback.extract_tb(tb))
if isinstance(line, str)
)
y = c.submit(inc, x)
tb2 = y.traceback()
assert set(pluck(3, traceback.extract_tb(tb2))).issuperset(
set(pluck(3, traceback.extract_tb(tb)))
)
z = c.submit(div, 1, 2)
tb = z.traceback()
assert tb is None
@gen_cluster(client=True)
async def test_upload_file(c, s, a, b):
def g():
import myfile
return myfile.f()
with save_sys_modules():
for value in [123, 456]:
with tmp_text("myfile.py", "def f():\n return {}".format(value)) as fn:
await c.upload_file(fn)
x = c.submit(g, pure=False)
result = await x
assert result == value
@gen_cluster(client=True)
async def test_upload_file_no_extension(c, s, a, b):
with tmp_text("myfile", "") as fn:
await c.upload_file(fn)
@gen_cluster(client=True)
async def test_upload_file_zip(c, s, a, b):
def g():
import myfile
return myfile.f()
with save_sys_modules():
try:
for value in [123, 456]:
with tmp_text(
"myfile.py", "def f():\n return {}".format(value)
) as fn_my_file:
with zipfile.ZipFile("myfile.zip", "w") as z:
z.write(fn_my_file, arcname=os.path.basename(fn_my_file))
await c.upload_file("myfile.zip")
x = c.submit(g, pure=False)
result = await x
assert result == value
finally:
if os.path.exists("myfile.zip"):
os.remove("myfile.zip")
@gen_cluster(client=True)
async def test_upload_file_egg(c, s, a, b):
def g():
import package_1, package_2
return package_1.a, package_2.b
# c.upload_file tells each worker to
# - put this file in their local_directory
# - modify their sys.path to include it
# we don't care about the local_directory
# but we do care about restoring the path
with save_sys_modules():
for value in [123, 456]:
with tmpfile() as dirname:
os.mkdir(dirname)
with open(os.path.join(dirname, "setup.py"), "w") as f:
f.write("from setuptools import setup, find_packages\n")
f.write(
'setup(name="my_package", packages=find_packages(), version="{}")\n'.format(
value
)
)
# test a package with an underscore in the name
package_1 = os.path.join(dirname, "package_1")
os.mkdir(package_1)
with open(os.path.join(package_1, "__init__.py"), "w") as f:
f.write("a = {}\n".format(value))
# test multiple top-level packages
package_2 = os.path.join(dirname, "package_2")
os.mkdir(package_2)
with open(os.path.join(package_2, "__init__.py"), "w") as f:
f.write("b = {}\n".format(value))
# compile these into an egg
subprocess.check_call(
[sys.executable, "setup.py", "bdist_egg"], cwd=dirname
)
egg_root = os.path.join(dirname, "dist")
# first file ending with '.egg'
egg_name = [
fname for fname in os.listdir(egg_root) if fname.endswith(".egg")
][0]
egg_path = os.path.join(egg_root, egg_name)
await c.upload_file(egg_path)
os.remove(egg_path)
x = c.submit(g, pure=False)
result = await x
assert result == (value, value)
@gen_cluster(client=True)
async def test_upload_large_file(c, s, a, b):
assert a.local_directory
assert b.local_directory
with tmp_text("myfile", "abc") as fn:
with tmp_text("myfile2", "def") as fn2:
await c._upload_large_file(fn, remote_filename="x")
await c._upload_large_file(fn2)
for w in [a, b]:
assert os.path.exists(os.path.join(w.local_directory, "x"))
assert os.path.exists(os.path.join(w.local_directory, "myfile2"))
with open(os.path.join(w.local_directory, "x")) as f:
assert f.read() == "abc"
with open(os.path.join(w.local_directory, "myfile2")) as f:
assert f.read() == "def"
def test_upload_file_sync(c):
def g():
import myfile
return myfile.x
with tmp_text("myfile.py", "x = 123") as fn:
c.upload_file(fn)
x = c.submit(g)
assert x.result() == 123
@gen_cluster(client=True)
async def test_upload_file_exception(c, s, a, b):
with tmp_text("myfile.py", "syntax-error!") as fn:
with pytest.raises(SyntaxError):
await c.upload_file(fn)
def test_upload_file_exception_sync(c):
with tmp_text("myfile.py", "syntax-error!") as fn:
with pytest.raises(SyntaxError):
c.upload_file(fn)
@pytest.mark.skip
@gen_cluster()
async def test_multiple_clients(s, a, b):
a = await Client(s.address, asynchronous=True)
b = await Client(s.address, asynchronous=True)
x = a.submit(inc, 1)
y = b.submit(inc, 2)
assert x.client is a
assert y.client is b
xx = await x
yy = await y
assert xx == 2
assert yy == 3
z = a.submit(add, x, y)
assert z.client is a
zz = await z
assert zz == 5
await a.close()
await b.close()
@gen_cluster(client=True)
async def test_async_compute(c, s, a, b):
from dask.delayed import delayed
x = delayed(1)
y = delayed(inc)(x)
z = delayed(dec)(x)
[yy, zz, aa] = c.compute([y, z, 3], sync=False)
assert isinstance(yy, Future)
assert isinstance(zz, Future)
assert aa == 3
result = await c.gather([yy, zz])
assert result == [2, 0]
assert isinstance(c.compute(y), Future)
assert isinstance(c.compute([y]), (tuple, list))
@gen_cluster(client=True)
async def test_async_compute_with_scatter(c, s, a, b):
d = await c.scatter({("x", 1): 1, ("y", 1): 2})
x, y = d[("x", 1)], d[("y", 1)]
from dask.delayed import delayed
z = delayed(add)(delayed(inc)(x), delayed(inc)(y))
zz = c.compute(z)
[result] = await c.gather([zz])
assert result == 2 + 3
def test_sync_compute(c):
x = delayed(1)
y = delayed(inc)(x)
z = delayed(dec)(x)
yy, zz = c.compute([y, z], sync=True)
assert (yy, zz) == (2, 0)
@gen_cluster(client=True)
async def test_remote_scatter_gather(c, s, a, b):
x, y, z = await c.scatter([1, 2, 3])
assert x.key in a.data or x.key in b.data
assert y.key in a.data or y.key in b.data
assert z.key in a.data or z.key in b.data
xx, yy, zz = await c.gather([x, y, z])
assert (xx, yy, zz) == (1, 2, 3)
@gen_cluster(timeout=1000, client=True)
async def test_remote_submit_on_Future(c, s, a, b):
x = c.submit(lambda x: x + 1, 1)
y = c.submit(lambda x: x + 1, x)
result = await y
assert result == 3
def test_start_is_idempotent(c):
c.start()
c.start()
c.start()
x = c.submit(inc, 1)
assert x.result() == 2
@gen_cluster(client=True)
async def test_client_with_scheduler(c, s, a, b):
assert s.nthreads == {a.address: a.nthreads, b.address: b.nthreads}
x = c.submit(inc, 1)
y = c.submit(inc, 2)
z = c.submit(add, x, y)
result = await x
assert result == 1 + 1
result = await z
assert result == 1 + 1 + 1 + 2
A, B, C = await c.scatter([1, 2, 3])
AA, BB, xx = await c.gather([A, B, x])
assert (AA, BB, xx) == (1, 2, 2)
result = await c.get({"x": (inc, 1), "y": (add, "x", 10)}, "y", sync=False)
assert result == 12
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_allow_restrictions(c, s, a, b):
aws = s.workers[a.address]
bws = s.workers[a.address]
x = c.submit(inc, 1, workers=a.ip)
await x
assert s.tasks[x.key].who_has == {aws}
assert not s.loose_restrictions
x = c.submit(inc, 2, workers=a.ip, allow_other_workers=True)
await x
assert s.tasks[x.key].who_has == {aws}
assert x.key in s.loose_restrictions
L = c.map(inc, range(3, 13), workers=a.ip, allow_other_workers=True)
await wait(L)
assert all(s.tasks[f.key].who_has == {aws} for f in L)
assert {f.key for f in L}.issubset(s.loose_restrictions)
x = c.submit(inc, 15, workers="127.0.0.3", allow_other_workers=True)
await x
assert s.tasks[x.key].who_has
assert x.key in s.loose_restrictions
L = c.map(inc, range(15, 25), workers="127.0.0.3", allow_other_workers=True)
await wait(L)
assert all(s.tasks[f.key].who_has for f in L)
assert {f.key for f in L}.issubset(s.loose_restrictions)
with pytest.raises(ValueError):
c.submit(inc, 1, allow_other_workers=True)
with pytest.raises(ValueError):
c.map(inc, [1], allow_other_workers=True)
with pytest.raises(TypeError):
c.submit(inc, 20, workers="127.0.0.1", allow_other_workers="Hello!")
with pytest.raises(TypeError):
c.map(inc, [20], workers="127.0.0.1", allow_other_workers="Hello!")
@pytest.mark.skipif("True", reason="because")
def test_bad_address():
try:
Client("123.123.123.123:1234", timeout=0.1)
except (IOError, TimeoutError) as e:
assert "connect" in str(e).lower()
try:
Client("127.0.0.1:1234", timeout=0.1)
except (IOError, TimeoutError) as e:
assert "connect" in str(e).lower()
def test_informative_error_on_cluster_type():
with pytest.raises(TypeError) as exc_info:
Client(LocalCluster)
assert "Scheduler address must be a string or a Cluster instance" in str(
exc_info.value
)
@gen_cluster(client=True)
async def test_long_error(c, s, a, b):
def bad(x):
raise ValueError("a" * 100000)
x = c.submit(bad, 10)
try:
await x
except ValueError as e:
assert len(str(e)) < 100000
tb = await x.traceback()
assert all(
len(line) < 100000
for line in concat(traceback.extract_tb(tb))
if isinstance(line, str)
)
@gen_cluster(client=True)
async def test_map_on_futures_with_kwargs(c, s, a, b):
def f(x, y=10):
return x + y
futures = c.map(inc, range(10))
futures2 = c.map(f, futures, y=20)
results = await c.gather(futures2)
assert results == [i + 1 + 20 for i in range(10)]
future = c.submit(inc, 100)
future2 = c.submit(f, future, y=200)
result = await future2
assert result == 100 + 1 + 200
class BadlySerializedObject:
def __getstate__(self):
return 1
def __setstate__(self, state):
raise TypeError("hello!")
class FatallySerializedObject:
def __getstate__(self):
return 1
def __setstate__(self, state):
print("This should never have been deserialized, closing")
import sys
sys.exit(0)
@gen_cluster(client=True)
async def test_badly_serialized_input(c, s, a, b):
o = BadlySerializedObject()
future = c.submit(inc, o)
futures = c.map(inc, range(10))
L = await c.gather(futures)
assert list(L) == list(map(inc, range(10)))
assert future.status == "error"
@pytest.mark.skipif("True", reason="")
async def test_badly_serialized_input_stderr(capsys, c):
o = BadlySerializedObject()
future = c.submit(inc, o)
start = time()
while True:
sleep(0.01)
out, err = capsys.readouterr()
if "hello!" in err:
break
assert time() - start < 20
assert future.status == "error"
def test_repr(loop):
funcs = [str, repr, lambda x: x._repr_html_()]
with cluster(nworkers=3, worker_kwargs={"memory_limit": "2 GB"}) as (s, [a, b, c]):
with Client(s["address"], loop=loop) as c:
for func in funcs:
text = func(c)
assert c.scheduler.address in text
assert "3" in text
assert "6" in text
assert "GB" in text
if "<table" not in text:
assert len(text) < 80
for func in funcs:
text = func(c)
assert "not connected" in text
@gen_cluster(client=True)
async def test_repr_async(c, s, a, b):
c._repr_html_()
@gen_cluster(client=True, worker_kwargs={"memory_limit": None})
async def test_repr_no_memory_limit(c, s, a, b):
c._repr_html_()
@gen_test()
async def test_repr_localcluster():
cluster = await LocalCluster(
processes=False, dashboard_address=None, asynchronous=True
)
client = await Client(cluster, asynchronous=True)
try:
text = client._repr_html_()
assert cluster.scheduler.address in text
assert is_valid_xml(client._repr_html_())
finally:
await client.close()
await cluster.close()
@gen_cluster(client=True)
async def test_forget_simple(c, s, a, b):
x = c.submit(inc, 1, retries=2)
y = c.submit(inc, 2)
z = c.submit(add, x, y, workers=[a.ip], allow_other_workers=True)
await wait([x, y, z])
assert not s.waiting_data.get(x.key)
assert not s.waiting_data.get(y.key)
assert set(s.tasks) == {x.key, y.key, z.key}
s.client_releases_keys(keys=[x.key], client=c.id)
assert x.key in s.tasks
s.client_releases_keys(keys=[z.key], client=c.id)
assert x.key not in s.tasks
assert z.key not in s.tasks
assert not s.tasks[y.key].dependents
s.client_releases_keys(keys=[y.key], client=c.id)
assert not s.tasks
@gen_cluster(client=True)
async def test_forget_complex(e, s, A, B):
a, b, c, d = await e.scatter(list(range(4)))
ab = e.submit(add, a, b)
cd = e.submit(add, c, d)
ac = e.submit(add, a, c)
acab = e.submit(add, ac, ab)
await wait([a, b, c, d, ab, ac, cd, acab])
assert set(s.tasks) == {f.key for f in [ab, ac, cd, acab, a, b, c, d]}
s.client_releases_keys(keys=[ab.key], client=e.id)
assert set(s.tasks) == {f.key for f in [ab, ac, cd, acab, a, b, c, d]}
s.client_releases_keys(keys=[b.key], client=e.id)
assert set(s.tasks) == {f.key for f in [ac, cd, acab, a, c, d]}
s.client_releases_keys(keys=[acab.key], client=e.id)
assert set(s.tasks) == {f.key for f in [ac, cd, a, c, d]}
assert b.key not in s.tasks
start = time()
while b.key in A.data or b.key in B.data:
await asyncio.sleep(0.01)
assert time() < start + 10
s.client_releases_keys(keys=[ac.key], client=e.id)
assert set(s.tasks) == {f.key for f in [cd, a, c, d]}
@gen_cluster(client=True)
async def test_forget_in_flight(e, s, A, B):
delayed2 = partial(delayed, pure=True)
a, b, c, d = [delayed2(slowinc)(i) for i in range(4)]
ab = delayed2(slowadd)(a, b, dask_key_name="ab")
cd = delayed2(slowadd)(c, d, dask_key_name="cd")
ac = delayed2(slowadd)(a, c, dask_key_name="ac")
acab = delayed2(slowadd)(ac, ab, dask_key_name="acab")
x, y = e.compute([ac, acab])
s.validate_state()
for i in range(5):
await asyncio.sleep(0.01)
s.validate_state()
s.client_releases_keys(keys=[y.key], client=e.id)
s.validate_state()
for k in [acab.key, ab.key, b.key]:
assert k not in s.tasks
@gen_cluster(client=True)
async def test_forget_errors(c, s, a, b):
x = c.submit(div, 1, 0)
y = c.submit(inc, x)
z = c.submit(inc, y)
await wait([y])
assert x.key in s.exceptions
assert x.key in s.exceptions_blame
assert y.key in s.exceptions_blame
assert z.key in s.exceptions_blame
s.client_releases_keys(keys=[z.key], client=c.id)
assert x.key in s.exceptions
assert x.key in s.exceptions_blame
assert y.key in s.exceptions_blame
assert z.key not in s.exceptions_blame
s.client_releases_keys(keys=[x.key], client=c.id)
assert x.key in s.exceptions
assert x.key in s.exceptions_blame
assert y.key in s.exceptions_blame
assert z.key not in s.exceptions_blame
s.client_releases_keys(keys=[y.key], client=c.id)
assert x.key not in s.exceptions
assert x.key not in s.exceptions_blame
assert y.key not in s.exceptions_blame
assert z.key not in s.exceptions_blame
def test_repr_sync(c):
s = str(c)
r = repr(c)
assert c.scheduler.address in s
assert c.scheduler.address in r
assert str(2) in s # nworkers
assert "cores" in s or "threads" in s
@gen_cluster(client=True)
async def test_waiting_data(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
z = c.submit(add, x, y, workers=[a.ip], allow_other_workers=True)
await wait([x, y, z])
assert not s.waiting_data.get(x.key)
assert not s.waiting_data.get(y.key)
@gen_cluster()
async def test_multi_client(s, a, b):
c = await Client(s.address, asynchronous=True)
f = await Client(s.address, asynchronous=True)
assert set(s.client_comms) == {c.id, f.id}
x = c.submit(inc, 1)
y = f.submit(inc, 2)
y2 = c.submit(inc, 2)
assert y.key == y2.key
await wait([x, y])
assert s.wants_what == {
c.id: {x.key, y.key},
f.id: {y.key},
"fire-and-forget": set(),
}
assert s.who_wants == {x.key: {c.id}, y.key: {c.id, f.id}}
await c.close()
start = time()
while c.id in s.wants_what:
await asyncio.sleep(0.01)
assert time() < start + 5
assert c.id not in s.wants_what
assert c.id not in s.who_wants[y.key]
assert x.key not in s.who_wants
await f.close()
start = time()
while s.tasks:
await asyncio.sleep(0.01)
assert time() < start + 2, s.tasks
def long_running_client_connection(address):
with pristine_loop():
c = Client(address)
x = c.submit(lambda x: x + 1, 10)
x.result()
sleep(100)
@gen_cluster()
async def test_cleanup_after_broken_client_connection(s, a, b):
proc = mp_context.Process(target=long_running_client_connection, args=(s.address,))
proc.daemon = True
proc.start()
start = time()
while not s.tasks:
await asyncio.sleep(0.01)
assert time() < start + 5
proc.terminate()
start = time()
while s.tasks:
await asyncio.sleep(0.01)
assert time() < start + 5
@gen_cluster()
async def test_multi_garbage_collection(s, a, b):
c = await Client(s.address, asynchronous=True)
f = await Client(s.address, asynchronous=True)
x = c.submit(inc, 1)
y = f.submit(inc, 2)
y2 = c.submit(inc, 2)
assert y.key == y2.key
await wait([x, y])
x.__del__()
start = time()
while x.key in a.data or x.key in b.data:
await asyncio.sleep(0.01)
assert time() < start + 5
assert s.wants_what == {c.id: {y.key}, f.id: {y.key}, "fire-and-forget": set()}
assert s.who_wants == {y.key: {c.id, f.id}}
y.__del__()
start = time()
while x.key in s.wants_what[f.id]:
await asyncio.sleep(0.01)
assert time() < start + 5
await asyncio.sleep(0.1)
assert y.key in a.data or y.key in b.data
assert s.wants_what == {c.id: {y.key}, f.id: set(), "fire-and-forget": set()}
assert s.who_wants == {y.key: {c.id}}
y2.__del__()
start = time()
while y.key in a.data or y.key in b.data:
await asyncio.sleep(0.01)
assert time() < start + 5
assert not any(v for v in s.wants_what.values())
assert not s.who_wants
await c.close()
await f.close()
@gen_cluster(client=True)
async def test__broadcast(c, s, a, b):
x, y = await c.scatter([1, 2], broadcast=True)
assert a.data == b.data == {x.key: 1, y.key: 2}
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test__broadcast_integer(c, s, *workers):
x, y = await c.scatter([1, 2], broadcast=2)
assert len(s.tasks[x.key].who_has) == 2
assert len(s.tasks[y.key].who_has) == 2
@gen_cluster(client=True)
async def test__broadcast_dict(c, s, a, b):
d = await c.scatter({"x": 1}, broadcast=True)
assert a.data == b.data == {"x": 1}
def test_broadcast(c, s, a, b):
x, y = c.scatter([1, 2], broadcast=True)
has_what = sync(c.loop, c.scheduler.has_what)
assert {k: set(v) for k, v in has_what.items()} == {
a["address"]: {x.key, y.key},
b["address"]: {x.key, y.key},
}
[z] = c.scatter([3], broadcast=True, workers=[a["address"]])
has_what = sync(c.loop, c.scheduler.has_what)
assert {k: set(v) for k, v in has_what.items()} == {
a["address"]: {x.key, y.key, z.key},
b["address"]: {x.key, y.key},
}
@gen_cluster(client=True)
async def test_proxy(c, s, a, b):
msg = await c.scheduler.proxy(msg={"op": "identity"}, worker=a.address)
assert msg["id"] == a.identity()["id"]
@gen_cluster(client=True)
async def test__cancel(c, s, a, b):
x = c.submit(slowinc, 1)
y = c.submit(slowinc, x)
while y.key not in s.tasks:
await asyncio.sleep(0.01)
await c.cancel([x])
assert x.cancelled()
assert "cancel" in str(x)
s.validate_state()
start = time()
while not y.cancelled():
await asyncio.sleep(0.01)
assert time() < start + 5
assert not s.tasks
s.validate_state()
@gen_cluster(client=True)
async def test_cancel_tuple_key(c, s, a, b):
x = c.submit(inc, 1, key=("x", 0, 1))
await x
await c.cancel(x)
with pytest.raises(CancelledError):
await x
@gen_cluster()
async def test_cancel_multi_client(s, a, b):
c = await Client(s.address, asynchronous=True)
f = await Client(s.address, asynchronous=True)
x = c.submit(slowinc, 1)
y = f.submit(slowinc, 1)
assert x.key == y.key
await c.cancel([x])
assert x.cancelled()
assert not y.cancelled()
start = time()
while y.key not in s.tasks:
await asyncio.sleep(0.01)
assert time() < start + 5
out = await y
assert out == 2
with pytest.raises(CancelledError):
await x
await c.close()
await f.close()
@gen_cluster(client=True)
async def test_cancel_collection(c, s, a, b):
L = c.map(double, [[1], [2], [3]])
x = db.Bag({("b", i): f for i, f in enumerate(L)}, "b", 3)
await c.cancel(x)
await c.cancel([x])
assert all(f.cancelled() for f in L)
start = time()
while s.tasks:
assert time() < start + 1
await asyncio.sleep(0.01)
def test_cancel(c):
x = c.submit(slowinc, 1, key="x")
y = c.submit(slowinc, x, key="y")
z = c.submit(slowinc, y, key="z")
c.cancel([y])
start = time()
while not z.cancelled():
sleep(0.01)
assert time() < start + 5
assert x.result() == 2
z.cancel()
assert z.cancelled()
@gen_cluster(client=True)
async def test_future_type(c, s, a, b):
x = c.submit(inc, 1)
await wait([x])
assert x.type == int
assert "int" in str(x)
@gen_cluster(client=True)
async def test_traceback_clean(c, s, a, b):
x = c.submit(div, 1, 0)
try:
await x
except Exception as e:
f = e
exc_type, exc_value, tb = sys.exc_info()
while tb:
assert "scheduler" not in tb.tb_frame.f_code.co_filename
assert "worker" not in tb.tb_frame.f_code.co_filename
tb = tb.tb_next
@gen_cluster(client=True)
async def test_map_differnet_lengths(c, s, a, b):
assert len(c.map(add, [1, 2], [1, 2, 3])) == 2
def test_Future_exception_sync_2(loop, capsys):
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
assert dask.base.get_scheduler() == c.get
out, err = capsys.readouterr()
assert len(out.strip().split("\n")) == 1
assert dask.base.get_scheduler() != c.get
@gen_cluster(timeout=60, client=True)
async def test_async_persist(c, s, a, b):
from dask.delayed import delayed, Delayed
x = delayed(1)
y = delayed(inc)(x)
z = delayed(dec)(x)
w = delayed(add)(y, z)
yy, ww = c.persist([y, w])
assert type(yy) == type(y)
assert type(ww) == type(w)
assert len(yy.dask) == 1
assert len(ww.dask) == 1
assert len(w.dask) > 1
assert y.__dask_keys__() == yy.__dask_keys__()
assert w.__dask_keys__() == ww.__dask_keys__()
while y.key not in s.tasks and w.key not in s.tasks:
await asyncio.sleep(0.01)
assert s.who_wants[y.key] == {c.id}
assert s.who_wants[w.key] == {c.id}
yyf, wwf = c.compute([yy, ww])
yyy, www = await c.gather([yyf, wwf])
assert yyy == inc(1)
assert www == add(inc(1), dec(1))
assert isinstance(c.persist(y), Delayed)
assert isinstance(c.persist([y]), (list, tuple))
@gen_cluster(client=True)
async def test__persist(c, s, a, b):
pytest.importorskip("dask.array")
import dask.array as da
x = da.ones((10, 10), chunks=(5, 10))
y = 2 * (x + 1)
assert len(y.dask) == 6
yy = c.persist(y)
assert len(y.dask) == 6
assert len(yy.dask) == 2
assert all(isinstance(v, Future) for v in yy.dask.values())
assert yy.__dask_keys__() == y.__dask_keys__()
g, h = c.compute([y, yy])
gg, hh = await c.gather([g, h])
assert (gg == hh).all()
def test_persist(c):
pytest.importorskip("dask.array")
import dask.array as da
x = da.ones((10, 10), chunks=(5, 10))
y = 2 * (x + 1)
assert len(y.dask) == 6
yy = c.persist(y)
assert len(y.dask) == 6
assert len(yy.dask) == 2
assert all(isinstance(v, Future) for v in yy.dask.values())
assert yy.__dask_keys__() == y.__dask_keys__()
zz = yy.compute()
z = y.compute()
assert (zz == z).all()
@gen_cluster(timeout=60, client=True)
async def test_long_traceback(c, s, a, b):
from distributed.protocol.pickle import dumps
def deep(n):
if n == 0:
1 / 0
else:
return deep(n - 1)
x = c.submit(deep, 200)
await wait([x])
assert len(dumps(c.futures[x.key].traceback)) < 10000
assert isinstance(c.futures[x.key].exception, ZeroDivisionError)
@gen_cluster(client=True)
async def test_wait_on_collections(c, s, a, b):
L = c.map(double, [[1], [2], [3]])
x = db.Bag({("b", i): f for i, f in enumerate(L)}, "b", 3)
await wait(x)
assert all(f.key in a.data or f.key in b.data for f in L)
@gen_cluster(client=True)
async def test_futures_of_get(c, s, a, b):
x, y, z = c.map(inc, [1, 2, 3])
assert set(futures_of(0)) == set()
assert set(futures_of(x)) == {x}
assert set(futures_of([x, y, z])) == {x, y, z}
assert set(futures_of([x, [y], [[z]]])) == {x, y, z}
assert set(futures_of({"x": x, "y": [y]})) == {x, y}
b = db.Bag({("b", i): f for i, f in enumerate([x, y, z])}, "b", 3)
assert set(futures_of(b)) == {x, y, z}
sg = SubgraphCallable(
{"x": x, "y": y, "z": z, "out": (add, (add, (add, x, y), z), "in")},
"out",
("in",),
)
assert set(futures_of(sg)) == {x, y, z}
def test_futures_of_class():
da = pytest.importorskip("dask.array")
assert futures_of([da.Array]) == []
@gen_cluster(client=True)
async def test_futures_of_cancelled_raises(c, s, a, b):
x = c.submit(inc, 1)
await c.cancel([x])
with pytest.raises(CancelledError):
await x
with pytest.raises(CancelledError):
await c.get({"x": (inc, x), "y": (inc, 2)}, ["x", "y"], sync=False)
with pytest.raises(CancelledError):
c.submit(inc, x)
with pytest.raises(CancelledError):
c.submit(add, 1, y=x)
with pytest.raises(CancelledError):
c.map(add, [1], y=x)
assert "y" not in s.tasks
@pytest.mark.skip
@gen_cluster(nthreads=[("127.0.0.1", 1)], client=True)
async def test_dont_delete_recomputed_results(c, s, w):
x = c.submit(inc, 1) # compute first time
await wait([x])
x.__del__() # trigger garbage collection
await asyncio.sleep(0)
xx = c.submit(inc, 1) # compute second time
start = time()
while xx.key not in w.data: # data shows up
await asyncio.sleep(0.01)
assert time() < start + 1
while time() < start + (s.delete_interval + 100) / 1000: # and stays
assert xx.key in w.data
await asyncio.sleep(0.01)
@gen_cluster(nthreads=[], client=True)
async def test_fatally_serialized_input(c, s):
o = FatallySerializedObject()
future = c.submit(inc, o)
while not s.tasks:
await asyncio.sleep(0.01)
@pytest.mark.skip(reason="Use fast random selection now")
@gen_cluster(client=True)
async def test_balance_tasks_by_stacks(c, s, a, b):
x = c.submit(inc, 1)
await wait(x)
y = c.submit(inc, 2)
await wait(y)
assert len(a.data) == len(b.data) == 1
@gen_cluster(client=True)
async def test_run(c, s, a, b):
results = await c.run(inc, 1)
assert results == {a.address: 2, b.address: 2}
results = await c.run(inc, 1, workers=[a.address])
assert results == {a.address: 2}
results = await c.run(inc, 1, workers=[])
assert results == {}
@gen_cluster(client=True)
async def test_run_handles_picklable_data(c, s, a, b):
futures = c.map(inc, range(10))
await wait(futures)
def func():
return {}, set(), [], (), 1, "hello", b"100"
results = await c.run_on_scheduler(func)
assert results == func()
results = await c.run(func)
assert results == {w.address: func() for w in [a, b]}
def test_run_sync(c, s, a, b):
def func(x, y=10):
return x + y
result = c.run(func, 1, y=2)
assert result == {a["address"]: 3, b["address"]: 3}
result = c.run(func, 1, y=2, workers=[a["address"]])
assert result == {a["address"]: 3}
@gen_cluster(client=True)
async def test_run_coroutine(c, s, a, b):
results = await c.run(geninc, 1, delay=0.05)
assert results == {a.address: 2, b.address: 2}
results = await c.run(geninc, 1, delay=0.05, workers=[a.address])
assert results == {a.address: 2}
results = await c.run(geninc, 1, workers=[])
assert results == {}
with pytest.raises(RuntimeError, match="hello"):
await c.run(throws, 1)
results = await c.run(asyncinc, 2, delay=0.01)
assert results == {a.address: 3, b.address: 3}
def test_run_coroutine_sync(c, s, a, b):
result = c.run(geninc, 2, delay=0.01)
assert result == {a["address"]: 3, b["address"]: 3}
result = c.run(geninc, 2, workers=[a["address"]])
assert result == {a["address"]: 3}
t1 = time()
result = c.run(geninc, 2, delay=10, wait=False)
t2 = time()
assert result is None
assert t2 - t1 <= 1.0
def test_run_exception(c):
def raise_exception(exc_type, exc_msg):
raise exc_type(exc_msg)
for exc_type in [ValueError, RuntimeError]:
with pytest.raises(exc_type, match="informative message"):
c.run(raise_exception, exc_type, "informative message")
def test_diagnostic_ui(loop):
with cluster() as (s, [a, b]):
a_addr = a["address"]
b_addr = b["address"]
with Client(s["address"], loop=loop) as c:
d = c.nthreads()
assert d == {a_addr: 1, b_addr: 1}
d = c.nthreads([a_addr])
assert d == {a_addr: 1}
d = c.nthreads(a_addr)
assert d == {a_addr: 1}
d = c.nthreads(a["address"])
assert d == {a_addr: 1}
x = c.submit(inc, 1)
y = c.submit(inc, 2)
z = c.submit(inc, 3)
wait([x, y, z])
d = c.who_has()
assert set(d) == {x.key, y.key, z.key}
assert all(w in [a_addr, b_addr] for v in d.values() for w in v)
assert all(d.values())
d = c.who_has([x, y])
assert set(d) == {x.key, y.key}
d = c.who_has(x)
assert set(d) == {x.key}
d = c.has_what()
assert set(d) == {a_addr, b_addr}
assert all(k in [x.key, y.key, z.key] for v in d.values() for k in v)
d = c.has_what([a_addr])
assert set(d) == {a_addr}
d = c.has_what(a_addr)
assert set(d) == {a_addr}
def test_diagnostic_nbytes_sync(c):
incs = c.map(inc, [1, 2, 3])
doubles = c.map(double, [1, 2, 3])
wait(incs + doubles)
assert c.nbytes(summary=False) == {k.key: sizeof(1) for k in incs + doubles}
assert c.nbytes(summary=True) == {"inc": sizeof(1) * 3, "double": sizeof(1) * 3}
@gen_cluster(client=True)
async def test_diagnostic_nbytes(c, s, a, b):
incs = c.map(inc, [1, 2, 3])
doubles = c.map(double, [1, 2, 3])
await wait(incs + doubles)
assert s.get_nbytes(summary=False) == {k.key: sizeof(1) for k in incs + doubles}
assert s.get_nbytes(summary=True) == {"inc": sizeof(1) * 3, "double": sizeof(1) * 3}
@gen_test()
async def test_worker_aliases():
s = await Scheduler(validate=True, port=0)
a = Worker(s.address, name="alice")
b = Worker(s.address, name="bob")
w = Worker(s.address, name=3)
await asyncio.gather(a, b, w)
c = await Client(s.address, asynchronous=True)
L = c.map(inc, range(10), workers="alice")
future = await c.scatter(123, workers=3)
await wait(L)
assert len(a.data) == 10
assert len(b.data) == 0
assert dict(w.data) == {future.key: 123}
for i, alias in enumerate([3, [3], "alice"]):
result = await c.submit(lambda x: x + 1, i, workers=alias)
assert result == i + 1
await c.close()
await asyncio.gather(a.close(), b.close(), w.close())
await s.close()
def test_persist_get_sync(c):
dadd = delayed(add)
x, y = delayed(1), delayed(2)
xx = delayed(add)(x, x)
yy = delayed(add)(y, y)
xxyy = delayed(add)(xx, yy)
xxyy2 = c.persist(xxyy)
xxyy3 = delayed(add)(xxyy2, 10)
assert xxyy3.compute() == ((1 + 1) + (2 + 2)) + 10
@gen_cluster(client=True)
async def test_persist_get(c, s, a, b):
dadd = delayed(add)
x, y = delayed(1), delayed(2)
xx = delayed(add)(x, x)
yy = delayed(add)(y, y)
xxyy = delayed(add)(xx, yy)
xxyy2 = c.persist(xxyy)
xxyy3 = delayed(add)(xxyy2, 10)
await asyncio.sleep(0.5)
result = await c.gather(c.get(xxyy3.dask, xxyy3.__dask_keys__(), sync=False))
assert result[0] == ((1 + 1) + (2 + 2)) + 10
result = await c.compute(xxyy3)
assert result == ((1 + 1) + (2 + 2)) + 10
result = await c.compute(xxyy3)
assert result == ((1 + 1) + (2 + 2)) + 10
result = await c.compute(xxyy3)
assert result == ((1 + 1) + (2 + 2)) + 10
@pytest.mark.skipif(WINDOWS, reason="num_fds not supported on windows")
def test_client_num_fds(loop):
psutil = pytest.importorskip("psutil")
with cluster() as (s, [a, b]):
proc = psutil.Process()
with Client(s["address"], loop=loop) as c: # first client to start loop
before = proc.num_fds() # measure
for i in range(4):
with Client(s["address"], loop=loop): # start more clients
pass
start = time()
while proc.num_fds() > before:
sleep(0.01)
assert time() < start + 4
@gen_cluster()
async def test_startup_close_startup(s, a, b):
c = await Client(s.address, asynchronous=True)
await c.close()
c = await Client(s.address, asynchronous=True)
await c.close()
def test_startup_close_startup_sync(loop):
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
sleep(0.1)
with Client(s["address"]) as c:
pass
with Client(s["address"]) as c:
pass
sleep(0.1)
with Client(s["address"]) as c:
pass
@gen_cluster(client=True)
async def test_badly_serialized_exceptions(c, s, a, b):
def f():
class BadlySerializedException(Exception):
def __reduce__(self):
raise TypeError()
raise BadlySerializedException("hello world")
x = c.submit(f)
try:
result = await x
except Exception as e:
assert "hello world" in str(e)
else:
assert False
@gen_cluster(client=True)
async def test_rebalance(c, s, a, b):
aws = s.workers[a.address]
bws = s.workers[b.address]
x, y = await c.scatter([1, 2], workers=[a.address])
assert len(a.data) == 2
assert len(b.data) == 0
s.validate_state()
await c.rebalance()
s.validate_state()
assert len(b.data) == 1
assert {ts.key for ts in bws.has_what} == set(b.data)
assert bws in s.tasks[x.key].who_has or bws in s.tasks[y.key].who_has
assert len(a.data) == 1
assert {ts.key for ts in aws.has_what} == set(a.data)
assert aws not in s.tasks[x.key].who_has or aws not in s.tasks[y.key].who_has
@gen_cluster(nthreads=[("127.0.0.1", 1)] * 4, client=True)
async def test_rebalance_workers(e, s, a, b, c, d):
w, x, y, z = await e.scatter([1, 2, 3, 4], workers=[a.address])
assert len(a.data) == 4
assert len(b.data) == 0
assert len(c.data) == 0
assert len(d.data) == 0
await e.rebalance([x, y], workers=[a.address, c.address])
assert len(a.data) == 3
assert len(b.data) == 0
assert len(c.data) == 1
assert len(d.data) == 0
assert c.data == {x.key: 2} or c.data == {y.key: 3}
await e.rebalance()
assert len(a.data) == 1
assert len(b.data) == 1
assert len(c.data) == 1
assert len(d.data) == 1
s.validate_state()
@gen_cluster(client=True)
async def test_rebalance_execution(c, s, a, b):
futures = c.map(inc, range(10), workers=a.address)
await c.rebalance(futures)
assert len(a.data) == len(b.data) == 5
s.validate_state()
def test_rebalance_sync(c, s, a, b):
futures = c.map(inc, range(10), workers=[a["address"]])
c.rebalance(futures)
has_what = c.has_what()
assert len(has_what) == 2
assert list(valmap(len, has_what).values()) == [5, 5]
@gen_cluster(client=True)
async def test_rebalance_unprepared(c, s, a, b):
futures = c.map(slowinc, range(10), delay=0.05, workers=a.address)
await asyncio.sleep(0.1)
await c.rebalance(futures)
s.validate_state()
@gen_cluster(client=True)
async def test_rebalance_raises_missing_data(c, s, a, b):
with pytest.raises(ValueError, match="keys were found to be missing"):
futures = await c.scatter(range(100))
keys = [f.key for f in futures]
del futures
await c.rebalance(keys)
@gen_cluster(client=True)
async def test_receive_lost_key(c, s, a, b):
x = c.submit(inc, 1, workers=[a.address])
await x
await a.close()
start = time()
while x.status == "finished":
assert time() < start + 5
await asyncio.sleep(0.01)
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_unrunnable_task_runs(c, s, a, b):
x = c.submit(inc, 1, workers=[a.ip])
await x
await a.close()
start = time()
while x.status == "finished":
assert time() < start + 5
await asyncio.sleep(0.01)
assert s.tasks[x.key] in s.unrunnable
assert s.get_task_status(keys=[x.key]) == {x.key: "no-worker"}
w = await Worker(s.address, loop=s.loop)
start = time()
while x.status != "finished":
assert time() < start + 2
await asyncio.sleep(0.01)
assert s.tasks[x.key] not in s.unrunnable
result = await x
assert result == 2
await w.close()
@gen_cluster(client=True, nthreads=[])
async def test_add_worker_after_tasks(c, s):
futures = c.map(inc, range(10))
n = await Nanny(s.address, nthreads=2, loop=s.loop, port=0)
await c.gather(futures)
await n.close()
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_workers_register_indirect_data(c, s, a, b):
[x] = await c.scatter([1], workers=a.address)
y = c.submit(inc, x, workers=b.ip)
await y
assert b.data[x.key] == 1
assert s.tasks[x.key].who_has == {s.workers[a.address], s.workers[b.address]}
assert s.workers[b.address].has_what == {s.tasks[x.key], s.tasks[y.key]}
s.validate_state()
@gen_cluster(client=True)
async def test_submit_on_cancelled_future(c, s, a, b):
x = c.submit(inc, 1)
await x
await c.cancel(x)
with pytest.raises(CancelledError):
c.submit(inc, x)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
async def test_replicate(c, s, *workers):
[a, b] = await c.scatter([1, 2])
await s.replicate(keys=[a.key, b.key], n=5)
s.validate_state()
assert len(s.tasks[a.key].who_has) == 5
assert len(s.tasks[b.key].who_has) == 5
assert sum(a.key in w.data for w in workers) == 5
assert sum(b.key in w.data for w in workers) == 5
@gen_cluster(client=True)
async def test_replicate_tuple_keys(c, s, a, b):
x = delayed(inc)(1, dask_key_name=("x", 1))
f = c.persist(x)
await c.replicate(f, n=5)
s.validate_state()
assert a.data and b.data
await c.rebalance(f)
s.validate_state()
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
async def test_replicate_workers(c, s, *workers):
[a, b] = await c.scatter([1, 2], workers=[workers[0].address])
await s.replicate(
keys=[a.key, b.key], n=5, workers=[w.address for w in workers[:5]]
)
assert len(s.tasks[a.key].who_has) == 5
assert len(s.tasks[b.key].who_has) == 5
assert sum(a.key in w.data for w in workers[:5]) == 5
assert sum(b.key in w.data for w in workers[:5]) == 5
assert sum(a.key in w.data for w in workers[5:]) == 0
assert sum(b.key in w.data for w in workers[5:]) == 0
await s.replicate(keys=[a.key, b.key], n=1)
assert len(s.tasks[a.key].who_has) == 1
assert len(s.tasks[b.key].who_has) == 1
assert sum(a.key in w.data for w in workers) == 1
assert sum(b.key in w.data for w in workers) == 1
s.validate_state()
await s.replicate(keys=[a.key, b.key], n=None) # all
assert len(s.tasks[a.key].who_has) == 10
assert len(s.tasks[b.key].who_has) == 10
s.validate_state()
await s.replicate(
keys=[a.key, b.key], n=1, workers=[w.address for w in workers[:5]]
)
assert sum(a.key in w.data for w in workers[:5]) == 1
assert sum(b.key in w.data for w in workers[:5]) == 1
assert sum(a.key in w.data for w in workers[5:]) == 5
assert sum(b.key in w.data for w in workers[5:]) == 5
s.validate_state()
class CountSerialization:
def __init__(self):
self.n = 0
def __setstate__(self, n):
self.n = n + 1
def __getstate__(self):
return self.n
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
async def test_replicate_tree_branching(c, s, *workers):
obj = CountSerialization()
[future] = await c.scatter([obj])
await s.replicate(keys=[future.key], n=10)
max_count = max(w.data[future.key].n for w in workers)
assert max_count > 1
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
async def test_client_replicate(c, s, *workers):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
await c.replicate([x, y], n=5)
assert len(s.tasks[x.key].who_has) == 5
assert len(s.tasks[y.key].who_has) == 5
await c.replicate([x, y], n=3)
assert len(s.tasks[x.key].who_has) == 3
assert len(s.tasks[y.key].who_has) == 3
await c.replicate([x, y])
s.validate_state()
assert len(s.tasks[x.key].who_has) == 10
assert len(s.tasks[y.key].who_has) == 10
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster(
client=True,
nthreads=[("127.0.0.1", 1), ("127.0.0.2", 1), ("127.0.0.2", 1)],
timeout=None,
)
async def test_client_replicate_host(client, s, a, b, c):
aws = s.workers[a.address]
bws = s.workers[b.address]
cws = s.workers[c.address]
x = client.submit(inc, 1, workers="127.0.0.2")
await wait([x])
assert s.tasks[x.key].who_has == {bws} or s.tasks[x.key].who_has == {cws}
await client.replicate([x], workers=["127.0.0.2"])
assert s.tasks[x.key].who_has == {bws, cws}
await client.replicate([x], workers=["127.0.0.1"])
assert s.tasks[x.key].who_has == {aws, bws, cws}
def test_client_replicate_sync(c):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
c.replicate([x, y], n=2)
who_has = c.who_has()
assert len(who_has[x.key]) == len(who_has[y.key]) == 2
with pytest.raises(ValueError):
c.replicate([x], n=0)
assert y.result() == 3
@pytest.mark.skipif(WINDOWS, reason="Windows timer too coarse-grained")
@gen_cluster(client=True, nthreads=[("127.0.0.1", 4)] * 1)
async def test_task_load_adapts_quickly(c, s, a):
future = c.submit(slowinc, 1, delay=0.2) # slow
await wait(future)
assert 0.15 < s.task_prefixes["slowinc"].duration_average < 0.4
futures = c.map(slowinc, range(10), delay=0) # very fast
await wait(futures)
assert 0 < s.task_prefixes["slowinc"].duration_average < 0.1
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 2)
async def test_even_load_after_fast_functions(c, s, a, b):
x = c.submit(inc, 1, workers=a.address) # very fast
y = c.submit(inc, 2, workers=b.address) # very fast
await wait([x, y])
futures = c.map(inc, range(2, 11))
await wait(futures)
assert any(f.key in a.data for f in futures)
assert any(f.key in b.data for f in futures)
# assert abs(len(a.data) - len(b.data)) <= 3
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 2)
async def test_even_load_on_startup(c, s, a, b):
x, y = c.map(inc, [1, 2])
await wait([x, y])
assert len(a.data) == len(b.data) == 1
@pytest.mark.skip
@gen_cluster(client=True, nthreads=[("127.0.0.1", 2)] * 2)
async def test_contiguous_load(c, s, a, b):
w, x, y, z = c.map(inc, [1, 2, 3, 4])
await wait([w, x, y, z])
groups = [set(a.data), set(b.data)]
assert {w.key, x.key} in groups
assert {y.key, z.key} in groups
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test_balanced_with_submit(c, s, *workers):
L = [c.submit(slowinc, i) for i in range(4)]
await wait(L)
for w in workers:
assert len(w.data) == 1
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test_balanced_with_submit_and_resident_data(c, s, *workers):
[x] = await c.scatter([10], broadcast=True)
L = [c.submit(slowinc, x, pure=False) for i in range(4)]
await wait(L)
for w in workers:
assert len(w.data) == 2
@gen_cluster(client=True, nthreads=[("127.0.0.1", 20)] * 2)
async def test_scheduler_saturates_cores(c, s, a, b):
for delay in [0, 0.01, 0.1]:
futures = c.map(slowinc, range(100), delay=delay)
futures = c.map(slowinc, futures, delay=delay / 10)
while not s.tasks:
if s.tasks:
assert all(
len(p) >= 20
for w in s.workers.values()
for p in w.processing.values()
)
await asyncio.sleep(0.01)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 20)] * 2)
async def test_scheduler_saturates_cores_random(c, s, a, b):
for delay in [0, 0.01, 0.1]:
futures = c.map(randominc, range(100), scale=0.1)
while not s.tasks:
if s.tasks:
assert all(
len(p) >= 20
for w in s.workers.values()
for p in w.processing.values()
)
await asyncio.sleep(0.01)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test_cancel_clears_processing(c, s, *workers):
da = pytest.importorskip("dask.array")
x = c.submit(slowinc, 1, delay=0.2)
while not s.tasks:
await asyncio.sleep(0.01)
await c.cancel(x)
start = time()
while any(v for w in s.workers.values() for v in w.processing):
assert time() < start + 0.2
await asyncio.sleep(0.01)
s.validate_state()
def test_default_get():
with cluster() as (s, [a, b]):
pre_get = dask.base.get_scheduler()
pytest.raises(KeyError, dask.config.get, "shuffle")
with Client(s["address"], set_as_default=True) as c:
assert dask.base.get_scheduler() == c.get
assert dask.config.get("shuffle") == "tasks"
assert dask.base.get_scheduler() == pre_get
pytest.raises(KeyError, dask.config.get, "shuffle")
c = Client(s["address"], set_as_default=False)
assert dask.base.get_scheduler() == pre_get
pytest.raises(KeyError, dask.config.get, "shuffle")
c.close()
c = Client(s["address"], set_as_default=True)
assert dask.config.get("shuffle") == "tasks"
assert dask.base.get_scheduler() == c.get
c.close()
assert dask.base.get_scheduler() == pre_get
pytest.raises(KeyError, dask.config.get, "shuffle")
with Client(s["address"]) as c:
assert dask.base.get_scheduler() == c.get
with Client(s["address"], set_as_default=False) as c:
assert dask.base.get_scheduler() != c.get
assert dask.base.get_scheduler() != c.get
with Client(s["address"], set_as_default=True) as c1:
assert dask.base.get_scheduler() == c1.get
with Client(s["address"], set_as_default=True) as c2:
assert dask.base.get_scheduler() == c2.get
assert dask.base.get_scheduler() == c1.get
assert dask.base.get_scheduler() == pre_get
@gen_cluster(client=True)
async def test_get_processing(c, s, a, b):
processing = await c.processing()
assert processing == valmap(tuple, s.processing)
futures = c.map(
slowinc, range(10), delay=0.1, workers=[a.address], allow_other_workers=True
)
await asyncio.sleep(0.2)
x = await c.processing()
assert set(x) == {a.address, b.address}
x = await c.processing(workers=[a.address])
assert isinstance(x[a.address], (list, tuple))
@gen_cluster(client=True)
async def test_get_foo(c, s, a, b):
futures = c.map(inc, range(10))
await wait(futures)
x = await c.scheduler.ncores()
assert x == s.nthreads
x = await c.scheduler.ncores(workers=[a.address])
assert x == {a.address: s.nthreads[a.address]}
x = await c.scheduler.has_what()
assert valmap(sorted, x) == valmap(sorted, s.has_what)
x = await c.scheduler.has_what(workers=[a.address])
assert valmap(sorted, x) == {a.address: sorted(s.has_what[a.address])}
x = await c.scheduler.nbytes(summary=False)
assert x == s.get_nbytes(summary=False)
x = await c.scheduler.nbytes(keys=[futures[0].key], summary=False)
assert x == {futures[0].key: s.tasks[futures[0].key].nbytes}
x = await c.scheduler.who_has()
assert valmap(sorted, x) == valmap(sorted, s.who_has)
x = await c.scheduler.who_has(keys=[futures[0].key])
assert valmap(sorted, x) == {futures[0].key: sorted(s.who_has[futures[0].key])}
def assert_dict_key_equal(expected, actual):
assert set(expected.keys()) == set(actual.keys())
for k in actual.keys():
ev = expected[k]
av = actual[k]
assert list(ev) == list(av)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 3)
async def test_get_foo_lost_keys(c, s, u, v, w):
x = c.submit(inc, 1, workers=[u.address])
y = await c.scatter(3, workers=[v.address])
await wait([x, y])
ua, va, wa = u.address, v.address, w.address
d = await c.scheduler.has_what()
assert_dict_key_equal(d, {ua: [x.key], va: [y.key], wa: []})
d = await c.scheduler.has_what(workers=[ua, va])
assert_dict_key_equal(d, {ua: [x.key], va: [y.key]})
d = await c.scheduler.who_has()
assert_dict_key_equal(d, {x.key: [ua], y.key: [va]})
d = await c.scheduler.who_has(keys=[x.key, y.key])
assert_dict_key_equal(d, {x.key: [ua], y.key: [va]})
await u.close()
await v.close()
d = await c.scheduler.has_what()
assert_dict_key_equal(d, {wa: []})
d = await c.scheduler.has_what(workers=[ua, va])
assert_dict_key_equal(d, {ua: [], va: []})
# The scattered key cannot be recomputed so it is forgotten
d = await c.scheduler.who_has()
assert_dict_key_equal(d, {x.key: []})
# ... but when passed explicitly, it is included in the result
d = await c.scheduler.who_has(keys=[x.key, y.key])
assert_dict_key_equal(d, {x.key: [], y.key: []})
@pytest.mark.slow
@gen_cluster(
client=True, Worker=Nanny, clean_kwargs={"threads": False, "processes": False}
)
async def test_bad_tasks_fail(c, s, a, b):
f = c.submit(sys.exit, 0)
with pytest.raises(KilledWorker) as info:
await f
assert info.value.last_worker.nanny in {a.address, b.address}
await asyncio.gather(a.close(), b.close())
def test_get_processing_sync(c, s, a, b):
processing = c.processing()
assert not any(v for v in processing.values())
futures = c.map(
slowinc, range(10), delay=0.1, workers=[a["address"]], allow_other_workers=False
)
sleep(0.2)
aa = a["address"]
bb = b["address"]
processing = c.processing()
assert set(c.processing(aa)) == {aa}
assert set(c.processing([aa])) == {aa}
c.cancel(futures)
def test_close_idempotent(c):
c.close()
c.close()
c.close()
@nodebug
def test_get_returns_early(c):
start = time()
with suppress(RuntimeError):
result = c.get({"x": (throws, 1), "y": (sleep, 1)}, ["x", "y"])
assert time() < start + 0.5
# Futures should be released and forgotten
wait_for(lambda: not c.futures, timeout=0.1)
wait_for(lambda: not any(c.processing().values()), timeout=3)
x = c.submit(inc, 1)
x.result()
with suppress(RuntimeError):
result = c.get({"x": (throws, 1), x.key: (inc, 1)}, ["x", x.key])
assert x.key in c.futures
@pytest.mark.slow
@gen_cluster(Worker=Nanny, client=True)
async def test_Client_clears_references_after_restart(c, s, a, b):
x = c.submit(inc, 1)
assert x.key in c.refcount
await c.restart()
assert x.key not in c.refcount
key = x.key
del x
import gc
gc.collect()
await asyncio.sleep(0)
assert key not in c.refcount
def test_get_stops_work_after_error(c):
with pytest.raises(RuntimeError):
c.get({"x": (throws, 1), "y": (sleep, 1.5)}, ["x", "y"])
start = time()
while any(c.processing().values()):
sleep(0.01)
assert time() < start + 0.5
def test_as_completed_list(c):
seq = c.map(inc, range(5))
seq2 = list(as_completed(seq))
assert set(c.gather(seq2)) == {1, 2, 3, 4, 5}
def test_as_completed_results(c):
seq = c.map(inc, range(5))
seq2 = list(as_completed(seq, with_results=True))
assert set(pluck(1, seq2)) == {1, 2, 3, 4, 5}
assert set(pluck(0, seq2)) == set(seq)
@pytest.mark.parametrize("with_results", [True, False])
def test_as_completed_batches(c, with_results):
n = 50
futures = c.map(slowinc, range(n), delay=0.01)
out = []
for batch in as_completed(futures, with_results=with_results).batches():
assert isinstance(batch, (tuple, list))
sleep(0.05)
out.extend(batch)
assert len(out) == n
if with_results:
assert set(pluck(1, out)) == set(range(1, n + 1))
else:
assert set(out) == set(futures)
def test_as_completed_next_batch(c):
futures = c.map(slowinc, range(2), delay=0.1)
ac = as_completed(futures)
assert not ac.is_empty()
assert ac.next_batch(block=False) == []
assert set(ac.next_batch(block=True)).issubset(futures)
while not ac.is_empty():
assert set(ac.next_batch(block=True)).issubset(futures)
assert ac.is_empty()
assert not ac.has_ready()
@gen_test()
async def test_status():
s = await Scheduler(port=0)
c = await Client(s.address, asynchronous=True)
assert c.status == "running"
x = c.submit(inc, 1)
await c.close()
assert c.status == "closed"
await s.close()
@gen_cluster(client=True)
async def test_persist_optimize_graph(c, s, a, b):
i = 10
for method in [c.persist, c.compute]:
b = db.range(i, npartitions=2)
i += 1
b2 = b.map(inc)
b3 = b2.map(inc)
b4 = method(b3, optimize_graph=False)
await wait(b4)
assert set(map(tokey, b3.__dask_keys__())).issubset(s.tasks)
b = db.range(i, npartitions=2)
i += 1
b2 = b.map(inc)
b3 = b2.map(inc)
b4 = method(b3, optimize_graph=True)
await wait(b4)
assert not any(tokey(k) in s.tasks for k in b2.__dask_keys__())
@gen_cluster(client=True, nthreads=[])
async def test_scatter_raises_if_no_workers(c, s):
with pytest.raises(TimeoutError):
await c.scatter(1, timeout=0.5)
@pytest.mark.slow
def test_reconnect(loop):
w = Worker("127.0.0.1", 9393, loop=loop)
loop.add_callback(w.start)
scheduler_cli = [
"dask-scheduler",
"--host",
"127.0.0.1",
"--port",
"9393",
"--no-dashboard",
]
with popen(scheduler_cli) as s:
c = Client("127.0.0.1:9393", loop=loop)
start = time()
while len(c.nthreads()) != 1:
sleep(0.1)
assert time() < start + 3
x = c.submit(inc, 1)
assert x.result() == 2
start = time()
while c.status != "connecting":
assert time() < start + 5
sleep(0.01)
assert x.status == "cancelled"
with pytest.raises(CancelledError):
x.result()
with popen(scheduler_cli) as s:
start = time()
while c.status != "running":
sleep(0.1)
assert time() < start + 5
start = time()
while len(c.nthreads()) != 1:
sleep(0.05)
assert time() < start + 15
x = c.submit(inc, 1)
assert x.result() == 2
start = time()
while True:
try:
x.result()
assert False
except CommClosedError:
continue
except CancelledError:
break
assert time() < start + 5
sleep(0.1)
sync(loop, w.close)
c.close()
@gen_cluster(client=True, nthreads=[], client_kwargs={"timeout": 0.5})
async def test_reconnect_timeout(c, s):
with captured_logger(logging.getLogger("distributed.client")) as logger:
await s.close()
start = time()
while c.status != "closed":
await c._update_scheduler_info()
await asyncio.sleep(0.05)
assert time() < start + 5, "Timeout waiting for reconnect to fail"
text = logger.getvalue()
assert "Failed to reconnect" in text
@pytest.mark.slow
@pytest.mark.skipif(WINDOWS, reason="num_fds not supported on windows")
@pytest.mark.skipif(sys.version_info < (3, 7), reason="TODO: intermittent failures")
@pytest.mark.parametrize("worker,count,repeat", [(Worker, 100, 5), (Nanny, 10, 20)])
def test_open_close_many_workers(loop, worker, count, repeat):
psutil = pytest.importorskip("psutil")
proc = psutil.Process()
with cluster(nworkers=0, active_rpc_timeout=2) as (s, _):
gc.collect()
before = proc.num_fds()
done = Semaphore(0)
running = weakref.WeakKeyDictionary()
workers = set()
status = True
async def start_worker(sleep, duration, repeat=1):
for i in range(repeat):
await asyncio.sleep(sleep)
if not status:
return
w = worker(s["address"], loop=loop)
running[w] = None
await w
workers.add(w)
addr = w.worker_address
running[w] = addr
await asyncio.sleep(duration)
await w.close()
del w
await asyncio.sleep(0)
done.release()
for i in range(count):
loop.add_callback(
start_worker, random.random() / 5, random.random() / 5, repeat=repeat
)
with Client(s["address"], loop=loop) as c:
sleep(1)
for i in range(count):
done.acquire(timeout=5)
gc.collect()
if not running:
break
start = time()
while c.nthreads():
sleep(0.2)
assert time() < start + 10
while len(workers) < count * repeat:
sleep(0.2)
status = False
[c.sync(w.close) for w in list(workers)]
for w in workers:
assert w.status == "closed"
start = time()
while proc.num_fds() > before:
print("fds:", before, proc.num_fds())
sleep(0.1)
assert time() < start + 10
@gen_cluster(client=False, timeout=None)
async def test_idempotence(s, a, b):
c = await Client(s.address, asynchronous=True)
f = await Client(s.address, asynchronous=True)
# Submit
x = c.submit(inc, 1)
await x
log = list(s.transition_log)
len_single_submit = len(log) # see last assert
y = f.submit(inc, 1)
assert x.key == y.key
await y
await asyncio.sleep(0.1)
log2 = list(s.transition_log)
assert log == log2
# Error
a = c.submit(div, 1, 0)
await wait(a)
assert a.status == "error"
log = list(s.transition_log)
b = f.submit(div, 1, 0)
assert a.key == b.key
await wait(b)
await asyncio.sleep(0.1)
log2 = list(s.transition_log)
assert log == log2
s.transition_log.clear()
# Simultaneous Submit
d = c.submit(inc, 2)
e = c.submit(inc, 2)
await wait([d, e])
assert len(s.transition_log) == len_single_submit
await c.close()
await f.close()
def test_scheduler_info(c):
info = c.scheduler_info()
assert isinstance(info, dict)
assert len(info["workers"]) == 2
def test_write_scheduler_file(c):
info = c.scheduler_info()
with tmpfile("json") as scheduler_file:
c.write_scheduler_file(scheduler_file)
with Client(scheduler_file=scheduler_file) as c2:
info2 = c2.scheduler_info()
assert c.scheduler.address == c2.scheduler.address
# test that a ValueError is raised if the scheduler_file
# attribute is already set
with pytest.raises(ValueError):
c.write_scheduler_file(scheduler_file)
def test_get_versions(c):
requests = pytest.importorskip("requests")
v = c.get_versions()
assert v["scheduler"] is not None
assert v["client"] is not None
assert len(v["workers"]) == 2
for k, v in v["workers"].items():
assert v is not None
c.get_versions(check=True)
# smoke test for versions
# that this does not raise
v = c.get_versions(packages=["requests"])
assert v["client"]["packages"]["requests"] == requests.__version__
@gen_cluster(client=True)
async def test_async_get_versions(c, s, a, b):
await c.get_versions(check=True)
def test_threaded_get_within_distributed(c):
import dask.multiprocessing
for get in [dask.local.get_sync, dask.multiprocessing.get, dask.threaded.get]:
def f():
return get({"x": (lambda: 1,)}, "x")
future = c.submit(f)
assert future.result() == 1
@gen_cluster(client=True)
async def test_lose_scattered_data(c, s, a, b):
[x] = await c.scatter([1], workers=a.address)
await a.close()
await asyncio.sleep(0.1)
assert x.status == "cancelled"
assert x.key not in s.tasks
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 3)
async def test_partially_lose_scattered_data(e, s, a, b, c):
x = await e.scatter(1, workers=a.address)
await e.replicate(x, n=2)
await a.close()
await asyncio.sleep(0.1)
assert x.status == "finished"
assert s.get_task_status(keys=[x.key]) == {x.key: "memory"}
@gen_cluster(client=True)
async def test_scatter_compute_lose(c, s, a, b):
[x] = await c.scatter([[1, 2, 3, 4]], workers=a.address)
y = c.submit(inc, 1, workers=b.address)
z = c.submit(slowadd, x, y, delay=0.2)
await asyncio.sleep(0.1)
await a.close()
with pytest.raises(CancelledError):
await wait(z)
assert x.status == "cancelled"
assert y.status == "finished"
assert z.status == "cancelled"
@gen_cluster(client=True)
async def test_scatter_compute_store_lose(c, s, a, b):
"""
Create irreplaceable data on one machine,
cause a dependent computation to occur on another and complete
Kill the machine with the irreplaceable data. What happens to the complete
result? How about after it GCs and tries to come back?
"""
x = await c.scatter(1, workers=a.address)
xx = c.submit(inc, x, workers=a.address)
y = c.submit(inc, 1)
z = c.submit(slowadd, xx, y, delay=0.2, workers=b.address)
await wait(z)
await a.close()
start = time()
while x.status == "finished":
await asyncio.sleep(0.01)
assert time() < start + 2
# assert xx.status == 'finished'
assert y.status == "finished"
assert z.status == "finished"
zz = c.submit(inc, z)
await wait(zz)
zkey = z.key
del z
start = time()
while s.get_task_status(keys=[zkey]) != {zkey: "released"}:
await asyncio.sleep(0.01)
assert time() < start + 2
xxkey = xx.key
del xx
start = time()
while x.key in s.tasks and zkey not in s.tasks and xxkey not in s.tasks:
await asyncio.sleep(0.01)
assert time() < start + 2
@gen_cluster(client=True)
async def test_scatter_compute_store_lose_processing(c, s, a, b):
"""
Create irreplaceable data on one machine,
cause a dependent computation to occur on another and complete
Kill the machine with the irreplaceable data. What happens to the complete
result? How about after it GCs and tries to come back?
"""
[x] = await c.scatter([1], workers=a.address)
y = c.submit(slowinc, x, delay=0.2)
z = c.submit(inc, y)
await asyncio.sleep(0.1)
await a.close()
start = time()
while x.status == "finished":
await asyncio.sleep(0.01)
assert time() < start + 2
assert y.status == "cancelled"
assert z.status == "cancelled"
@gen_cluster(client=False)
async def test_serialize_future(s, a, b):
c1 = await Client(s.address, asynchronous=True)
c2 = await Client(s.address, asynchronous=True)
future = c1.submit(lambda: 1)
result = await future
for ci in (c1, c2):
for ctxman in ci.as_current, lambda: temp_default_client(ci):
with ctxman():
future2 = pickle.loads(pickle.dumps(future))
assert future2.client is ci
assert tokey(future2.key) in ci.futures
result2 = await future2
assert result == result2
await c1.close()
await c2.close()
@gen_cluster(client=False)
async def test_temp_default_client(s, a, b):
c1 = await Client(s.address, asynchronous=True)
c2 = await Client(s.address, asynchronous=True)
with temp_default_client(c1):
assert default_client() is c1
assert default_client(c2) is c2
with temp_default_client(c2):
assert default_client() is c2
assert default_client(c1) is c1
await c1.close()
await c2.close()
@gen_cluster(client=True)
async def test_as_current(c, s, a, b):
c1 = await Client(s.address, asynchronous=True)
c2 = await Client(s.address, asynchronous=True)
with temp_default_client(c):
assert Client.current() is c
with pytest.raises(ValueError):
Client.current(allow_global=False)
with c1.as_current():
assert Client.current() is c1
assert Client.current(allow_global=True) is c1
with c2.as_current():
assert Client.current() is c2
assert Client.current(allow_global=True) is c2
await c1.close()
await c2.close()
def test_as_current_is_thread_local(s):
l1 = threading.Lock()
l2 = threading.Lock()
l3 = threading.Lock()
l4 = threading.Lock()
l1.acquire()
l2.acquire()
l3.acquire()
l4.acquire()
def run1():
with Client(s.address) as c:
with c.as_current():
l1.acquire()
l2.release()
try:
# This line runs only when both run1 and run2 are inside the
# context manager
assert Client.current(allow_global=False) is c
finally:
l3.acquire()
l4.release()
def run2():
with Client(s.address) as c:
with c.as_current():
l1.release()
l2.acquire()
try:
# This line runs only when both run1 and run2 are inside the
# context manager
assert Client.current(allow_global=False) is c
finally:
l3.release()
l4.acquire()
t1 = threading.Thread(target=run1)
t2 = threading.Thread(target=run2)
t1.start()
t2.start()
t1.join()
t2.join()
@pytest.mark.xfail(
sys.version_info < (3, 7),
reason="Python 3.6 contextvars are not copied on Task creation",
)
@gen_cluster(client=False)
async def test_as_current_is_task_local(s, a, b):
l1 = asyncio.Lock()
l2 = asyncio.Lock()
l3 = asyncio.Lock()
l4 = asyncio.Lock()
await l1.acquire()
await l2.acquire()
await l3.acquire()
await l4.acquire()
async def run1():
async with Client(s.address, asynchronous=True) as c:
with c.as_current():
await l1.acquire()
l2.release()
try:
# This line runs only when both run1 and run2 are inside the
# context manager
assert Client.current(allow_global=False) is c
finally:
await l3.acquire()
l4.release()
async def run2():
async with Client(s.address, asynchronous=True) as c:
with c.as_current():
l1.release()
await l2.acquire()
try:
# This line runs only when both run1 and run2 are inside the
# context manager
assert Client.current(allow_global=False) is c
finally:
l3.release()
await l4.acquire()
await asyncio.gather(run1(), run2())
@nodebug # test timing is fragile
@gen_cluster(nthreads=[("127.0.0.1", 1)] * 3, client=True)
async def test_persist_workers(e, s, a, b, c):
L1 = [delayed(inc)(i) for i in range(4)]
total = delayed(sum)(L1)
L2 = [delayed(add)(i, total) for i in L1]
total2 = delayed(sum)(L2)
out = e.persist(
L1 + L2 + [total, total2],
workers={
tuple(L1): a.address,
total: b.address,
tuple(L2): [c.address],
total2: b.address,
},
allow_other_workers=L2 + [total2],
)
await wait(out)
assert all(v.key in a.data for v in L1)
assert total.key in b.data
assert s.loose_restrictions == {total2.key} | {v.key for v in L2}
@gen_cluster(nthreads=[("127.0.0.1", 1)] * 3, client=True)
async def test_compute_workers(e, s, a, b, c):
L1 = [delayed(inc)(i) for i in range(4)]
total = delayed(sum)(L1)
L2 = [delayed(add)(i, total) for i in L1]
out = e.compute(
L1 + L2 + [total],
workers={tuple(L1): a.address, total: b.address, tuple(L2): [c.address]},
allow_other_workers=L1 + [total],
)
await wait(out)
for v in L1:
assert s.worker_restrictions[v.key] == {a.address}
for v in L2:
assert s.worker_restrictions[v.key] == {c.address}
assert s.worker_restrictions[total.key] == {b.address}
assert s.loose_restrictions == {total.key} | {v.key for v in L1}
@gen_cluster(client=True)
async def test_compute_nested_containers(c, s, a, b):
da = pytest.importorskip("dask.array")
np = pytest.importorskip("numpy")
x = da.ones(10, chunks=(5,)) + 1
future = c.compute({"x": [x], "y": 123})
result = await future
assert isinstance(result, dict)
assert (result["x"][0] == np.ones(10) + 1).all()
assert result["y"] == 123
def test_get_restrictions():
L1 = [delayed(inc)(i) for i in range(4)]
total = delayed(sum)(L1)
L2 = [delayed(add)(i, total) for i in L1]
r1, loose = Client.get_restrictions(L2, "127.0.0.1", False)
assert r1 == {d.key: ["127.0.0.1"] for d in L2}
assert not loose
r1, loose = Client.get_restrictions(L2, ["127.0.0.1"], True)
assert r1 == {d.key: ["127.0.0.1"] for d in L2}
assert set(loose) == {d.key for d in L2}
r1, loose = Client.get_restrictions(L2, {total: "127.0.0.1"}, True)
assert r1 == {total.key: ["127.0.0.1"]}
assert loose == [total.key]
r1, loose = Client.get_restrictions(L2, {(total,): "127.0.0.1"}, True)
assert r1 == {total.key: ["127.0.0.1"]}
assert loose == [total.key]
@gen_cluster(client=True)
async def test_scatter_type(c, s, a, b):
[future] = await c.scatter([1])
assert future.type == int
d = await c.scatter({"x": 1.0})
assert d["x"].type == float
@gen_cluster(client=True)
async def test_retire_workers_2(c, s, a, b):
[x] = await c.scatter([1], workers=a.address)
await s.retire_workers(workers=[a.address])
assert b.data == {x.key: 1}
assert s.who_has == {x.key: {b.address}}
assert s.has_what == {b.address: {x.key}}
assert a.address not in s.workers
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
async def test_retire_many_workers(c, s, *workers):
futures = await c.scatter(list(range(100)))
await s.retire_workers(workers=[w.address for w in workers[:7]])
results = await c.gather(futures)
assert results == list(range(100))
while len(s.workers) != 3:
await asyncio.sleep(0.01)
assert len(s.has_what) == len(s.nthreads) == 3
assert all(future.done() for future in futures)
assert all(s.tasks[future.key].state == "memory" for future in futures)
for w, keys in s.has_what.items():
assert 15 < len(keys) < 50
@gen_cluster(
client=True,
nthreads=[("127.0.0.1", 3)] * 2,
config={"distributed.scheduler.default-task-durations": {"f": "10ms"}},
)
async def test_weight_occupancy_against_data_movement(c, s, a, b):
s.extensions["stealing"]._pc.callback_time = 1000000
def f(x, y=0, z=0):
sleep(0.01)
return x
y = await c.scatter([[1, 2, 3, 4]], workers=[a.address])
z = await c.scatter([1], workers=[b.address])
futures = c.map(f, [1, 2, 3, 4], y=y, z=z)
await wait(futures)
assert sum(f.key in a.data for f in futures) >= 2
assert sum(f.key in b.data for f in futures) >= 1
@gen_cluster(
client=True,
nthreads=[("127.0.0.1", 1), ("127.0.0.1", 10)],
config={"distributed.scheduler.default-task-durations": {"f": "10ms"}},
)
async def test_distribute_tasks_by_nthreads(c, s, a, b):
s.extensions["stealing"]._pc.callback_time = 1000000
def f(x, y=0):
sleep(0.01)
return x
y = await c.scatter([1], broadcast=True)
futures = c.map(f, range(20), y=y)
await wait(futures)
assert len(b.data) > 2 * len(a.data)
@gen_cluster(client=True, clean_kwargs={"threads": False})
async def test_add_done_callback(c, s, a, b):
S = set()
def f(future):
future.add_done_callback(g)
def g(future):
S.add((future.key, future.status))
u = c.submit(inc, 1, key="u")
v = c.submit(throws, "hello", key="v")
w = c.submit(slowinc, 2, delay=0.3, key="w")
x = c.submit(inc, 3, key="x")
u.add_done_callback(f)
v.add_done_callback(f)
w.add_done_callback(f)
await wait((u, v, w, x))
x.add_done_callback(f)
t = time()
while len(S) < 4 and time() - t < 2.0:
await asyncio.sleep(0.01)
assert S == {(f.key, f.status) for f in (u, v, w, x)}
@gen_cluster(client=True)
async def test_normalize_collection(c, s, a, b):
x = delayed(inc)(1)
y = delayed(inc)(x)
z = delayed(inc)(y)
yy = c.persist(y)
zz = c.normalize_collection(z)
assert len(z.dask) == len(y.dask) + 1
assert isinstance(zz.dask[y.key], Future)
assert len(zz.dask) < len(z.dask)
@gen_cluster(client=True)
async def test_normalize_collection_dask_array(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones(10, chunks=(5,))
y = x + 1
yy = c.persist(y)
z = y.sum()
zdsk = dict(z.dask)
zz = c.normalize_collection(z)
assert z.dask == zdsk # do not mutate input
assert len(z.dask) > len(zz.dask)
assert any(isinstance(v, Future) for v in zz.dask.values())
for k, v in yy.dask.items():
assert zz.dask[k].key == v.key
result1 = await c.compute(z)
result2 = await c.compute(zz)
assert result1 == result2
@pytest.mark.slow
def test_normalize_collection_with_released_futures(c):
da = pytest.importorskip("dask.array")
x = da.arange(2 ** 20, chunks=2 ** 10)
y = x.persist()
wait(y)
sol = y.sum().compute()
# Start releasing futures
del y
# Try to reuse futures. Previously this was a race condition,
# and the call to `.compute()` would error out due to missing
# futures on the scheduler at compute time.
normalized = c.normalize_collection(x)
res = normalized.sum().compute()
assert res == sol
@gen_cluster(client=True)
async def test_auto_normalize_collection(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones(10, chunks=5)
assert len(x.dask) == 2
with dask.config.set(optimizations=[c._optimize_insert_futures]):
y = x.map_blocks(slowinc, delay=1, dtype=x.dtype)
yy = c.persist(y)
await wait(yy)
start = time()
future = c.compute(y.sum())
await future
end = time()
assert end - start < 1
start = time()
z = c.persist(y + 1)
await wait(z)
end = time()
assert end - start < 1
def test_auto_normalize_collection_sync(c):
da = pytest.importorskip("dask.array")
x = da.ones(10, chunks=5)
y = x.map_blocks(slowinc, delay=1, dtype=x.dtype)
yy = c.persist(y)
wait(yy)
with dask.config.set(optimizations=[c._optimize_insert_futures]):
start = time()
y.sum().compute()
end = time()
assert end - start < 1
def assert_no_data_loss(scheduler):
for key, start, finish, recommendations, _ in scheduler.transition_log:
if start == "memory" and finish == "released":
for k, v in recommendations.items():
assert not (k == key and v == "waiting")
@gen_cluster(client=True, timeout=None)
async def test_interleave_computations(c, s, a, b):
import distributed
distributed.g = s
xs = [delayed(slowinc)(i, delay=0.02) for i in range(30)]
ys = [delayed(slowdec)(x, delay=0.02) for x in xs]
zs = [delayed(slowadd)(x, y, delay=0.02) for x, y in zip(xs, ys)]
total = delayed(sum)(zs)
future = c.compute(total)
done = ("memory", "released")
await asyncio.sleep(0.1)
x_keys = [x.key for x in xs]
y_keys = [y.key for y in ys]
z_keys = [z.key for z in zs]
while not s.tasks or any(w.processing for w in s.workers.values()):
await asyncio.sleep(0.05)
x_done = sum(state in done for state in s.get_task_status(keys=x_keys).values())
y_done = sum(state in done for state in s.get_task_status(keys=y_keys).values())
z_done = sum(state in done for state in s.get_task_status(keys=z_keys).values())
assert x_done >= y_done >= z_done
assert x_done < y_done + 10
assert y_done < z_done + 10
assert_no_data_loss(s)
@pytest.mark.skip(reason="Now prefer first-in-first-out")
@gen_cluster(client=True, timeout=None)
async def test_interleave_computations_map(c, s, a, b):
xs = c.map(slowinc, range(30), delay=0.02)
ys = c.map(slowdec, xs, delay=0.02)
zs = c.map(slowadd, xs, ys, delay=0.02)
done = ("memory", "released")
x_keys = [x.key for x in xs]
y_keys = [y.key for y in ys]
z_keys = [z.key for z in zs]
while not s.tasks or any(w.processing for w in s.workers.values()):
await asyncio.sleep(0.05)
x_done = sum(state in done for state in s.get_task_status(keys=x_keys).values())
y_done = sum(state in done for state in s.get_task_status(keys=y_keys).values())
z_done = sum(state in done for state in s.get_task_status(keys=z_keys).values())
assert x_done >= y_done >= z_done
assert x_done < y_done + 10
assert y_done < z_done + 10
@gen_cluster(client=True)
async def test_scatter_dict_workers(c, s, a, b):
await c.scatter({"a": 10}, workers=[a.address, b.address])
assert "a" in a.data or "a" in b.data
@pytest.mark.slow
@gen_test()
async def test_client_timeout():
c = Client("127.0.0.1:57484", asynchronous=True)
s = Scheduler(loop=c.loop, port=57484)
await asyncio.sleep(4)
try:
await s
except EnvironmentError: # port in use
await c.close()
return
start = time()
await c
try:
assert time() < start + 2
finally:
await c.close()
await s.close()
@gen_cluster(client=True)
async def test_submit_list_kwargs(c, s, a, b):
futures = await c.scatter([1, 2, 3])
def f(L=None):
return sum(L)
future = c.submit(f, L=futures)
result = await future
assert result == 1 + 2 + 3
@gen_cluster(client=True)
async def test_map_list_kwargs(c, s, a, b):
futures = await c.scatter([1, 2, 3])
def f(i, L=None):
return i + sum(L)
futures = c.map(f, range(10), L=futures)
results = await c.gather(futures)
assert results == [i + 6 for i in range(10)]
@gen_cluster(client=True)
async def test_dont_clear_waiting_data(c, s, a, b):
start = time()
x = await c.scatter(1)
y = c.submit(slowinc, x, delay=0.5)
while y.key not in s.tasks:
await asyncio.sleep(0.01)
key = x.key
del x
for i in range(5):
assert s.waiting_data[key]
await asyncio.sleep(0)
@gen_cluster(client=True)
async def test_get_future_error_simple(c, s, a, b):
f = c.submit(div, 1, 0)
await wait(f)
assert f.status == "error"
function, args, kwargs, deps = await c._get_futures_error(f)
# args contains only solid values, not keys
assert function.__name__ == "div"
with pytest.raises(ZeroDivisionError):
function(*args, **kwargs)
@gen_cluster(client=True)
async def test_get_futures_error(c, s, a, b):
x0 = delayed(dec)(2, dask_key_name="x0")
y0 = delayed(dec)(1, dask_key_name="y0")
x = delayed(div)(1, x0, dask_key_name="x")
y = delayed(div)(1, y0, dask_key_name="y")
tot = delayed(sum)(x, y, dask_key_name="tot")
f = c.compute(tot)
await wait(f)
assert f.status == "error"
function, args, kwargs, deps = await c._get_futures_error(f)
assert function.__name__ == "div"
assert args == (1, y0.key)
@gen_cluster(client=True)
async def test_recreate_error_delayed(c, s, a, b):
x0 = delayed(dec)(2)
y0 = delayed(dec)(1)
x = delayed(div)(1, x0)
y = delayed(div)(1, y0)
tot = delayed(sum)(x, y)
f = c.compute(tot)
assert f.status == "pending"
function, args, kwargs = await c._recreate_error_locally(f)
assert f.status == "error"
assert function.__name__ == "div"
assert args == (1, 0)
with pytest.raises(ZeroDivisionError):
function(*args, **kwargs)
@gen_cluster(client=True)
async def test_recreate_error_futures(c, s, a, b):
x0 = c.submit(dec, 2)
y0 = c.submit(dec, 1)
x = c.submit(div, 1, x0)
y = c.submit(div, 1, y0)
tot = c.submit(sum, x, y)
f = c.compute(tot)
assert f.status == "pending"
function, args, kwargs = await c._recreate_error_locally(f)
assert f.status == "error"
assert function.__name__ == "div"
assert args == (1, 0)
with pytest.raises(ZeroDivisionError):
function(*args, **kwargs)
@gen_cluster(client=True)
async def test_recreate_error_collection(c, s, a, b):
b = db.range(10, npartitions=4)
b = b.map(lambda x: 1 / x)
b = b.persist()
f = c.compute(b)
function, args, kwargs = await c._recreate_error_locally(f)
with pytest.raises(ZeroDivisionError):
function(*args, **kwargs)
dd = pytest.importorskip("dask.dataframe")
import pandas as pd
df = dd.from_pandas(pd.DataFrame({"a": [0, 1, 2, 3, 4]}), chunksize=2)
def make_err(x):
# because pandas would happily work with NaN
if x == 0:
raise ValueError
return x
df2 = df.a.map(make_err)
f = c.compute(df2)
function, args, kwargs = await c._recreate_error_locally(f)
with pytest.raises(ValueError):
function(*args, **kwargs)
# with persist
df3 = c.persist(df2)
function, args, kwargs = await c._recreate_error_locally(df3)
with pytest.raises(ValueError):
function(*args, **kwargs)
@gen_cluster(client=True)
async def test_recreate_error_array(c, s, a, b):
da = pytest.importorskip("dask.array")
pytest.importorskip("scipy")
z = (da.linalg.inv(da.zeros((10, 10), chunks=10)) + 1).sum()
zz = z.persist()
func, args, kwargs = await c._recreate_error_locally(zz)
assert "0.,0.,0." in str(args).replace(" ", "") # args contain actual arrays
def test_recreate_error_sync(c):
x0 = c.submit(dec, 2)
y0 = c.submit(dec, 1)
x = c.submit(div, 1, x0)
y = c.submit(div, 1, y0)
tot = c.submit(sum, x, y)
f = c.compute(tot)
with pytest.raises(ZeroDivisionError):
c.recreate_error_locally(f)
assert f.status == "error"
def test_recreate_error_not_error(c):
f = c.submit(dec, 2)
with pytest.raises(ValueError, match="No errored futures passed"):
c.recreate_error_locally(f)
@gen_cluster(client=True)
async def test_retire_workers(c, s, a, b):
assert set(s.workers) == {a.address, b.address}
await c.retire_workers(workers=[a.address], close_workers=True)
assert set(s.workers) == {b.address}
start = time()
while a.status != "closed":
await asyncio.sleep(0.01)
assert time() < start + 5
class MyException(Exception):
pass
@gen_cluster(client=True)
async def test_robust_unserializable(c, s, a, b):
class Foo:
def __getstate__(self):
raise MyException()
with pytest.raises(MyException):
future = c.submit(identity, Foo())
futures = c.map(inc, range(10))
results = await c.gather(futures)
assert results == list(map(inc, range(10)))
assert a.data and b.data
@gen_cluster(client=True)
async def test_robust_undeserializable(c, s, a, b):
class Foo:
def __getstate__(self):
return 1
def __setstate__(self, state):
raise MyException("hello")
future = c.submit(identity, Foo())
with pytest.raises(MyException):
await future
futures = c.map(inc, range(10))
results = await c.gather(futures)
assert results == list(map(inc, range(10)))
assert a.data and b.data
@gen_cluster(client=True)
async def test_robust_undeserializable_function(c, s, a, b):
class Foo:
def __getstate__(self):
return 1
def __setstate__(self, state):
raise MyException("hello")
def __call__(self, *args):
return 1
future = c.submit(Foo(), 1)
with pytest.raises(MyException):
await future
futures = c.map(inc, range(10))
results = await c.gather(futures)
assert results == list(map(inc, range(10)))
assert a.data and b.data
@gen_cluster(client=True)
async def test_fire_and_forget(c, s, a, b):
future = c.submit(slowinc, 1, delay=0.1)
import distributed
def f(x):
distributed.foo = 123
try:
fire_and_forget(c.submit(f, future))
start = time()
while not hasattr(distributed, "foo"):
await asyncio.sleep(0.01)
assert time() < start + 2
assert distributed.foo == 123
finally:
del distributed.foo
start = time()
while len(s.tasks) > 1:
await asyncio.sleep(0.01)
assert time() < start + 2
assert set(s.who_wants) == {future.key}
assert set(s.tasks) == {future.key}
@gen_cluster(client=True)
async def test_fire_and_forget_err(c, s, a, b):
fire_and_forget(c.submit(div, 1, 0))
await asyncio.sleep(0.1)
# erred task should clear out quickly
start = time()
while s.tasks:
await asyncio.sleep(0.01)
assert time() < start + 1
def test_quiet_client_close(loop):
with captured_logger(logging.getLogger("distributed")) as logger:
with Client(loop=loop, processes=False, threads_per_worker=4) as c:
futures = c.map(slowinc, range(1000), delay=0.01)
sleep(0.200) # stop part-way
sleep(0.1) # let things settle
out = logger.getvalue()
lines = out.strip().split("\n")
assert len(lines) <= 2
for line in lines:
assert (
not line
or "Reconnecting" in line
or "garbage" in line
or set(line) == {"-"}
), line
@pytest.mark.slow
def test_quiet_client_close_when_cluster_is_closed_before_client(loop):
with captured_logger(logging.getLogger("tornado.application")) as logger:
cluster = LocalCluster(loop=loop, n_workers=1, dashboard_address=":0")
client = Client(cluster, loop=loop)
cluster.close()
client.close()
out = logger.getvalue()
assert "CancelledError" not in out
@gen_cluster()
async def test_close(s, a, b):
c = await Client(s.address, asynchronous=True)
future = c.submit(inc, 1)
await wait(future)
assert c.id in s.wants_what
await c.close()
start = time()
while c.id in s.wants_what or s.tasks:
await asyncio.sleep(0.01)
assert time() < start + 5
def test_threadsafe(c):
def f(_):
d = deque(maxlen=50)
for i in range(100):
future = c.submit(inc, random.randint(0, 100))
d.append(future)
sleep(0.001)
c.gather(list(d))
total = c.submit(sum, list(d))
return total.result()
from concurrent.futures import ThreadPoolExecutor
with ThreadPoolExecutor(20) as e:
results = list(e.map(f, range(20)))
assert results and all(results)
del results
@pytest.mark.slow
def test_threadsafe_get(c):
da = pytest.importorskip("dask.array")
x = da.arange(100, chunks=(10,))
def f(_):
total = 0
for i in range(20):
total += (x + random.randint(0, 20)).sum().compute()
sleep(0.001)
return total
from concurrent.futures import ThreadPoolExecutor
with ThreadPoolExecutor(30) as e:
results = list(e.map(f, range(30)))
assert results and all(results)
@pytest.mark.slow
def test_threadsafe_compute(c):
da = pytest.importorskip("dask.array")
x = da.arange(100, chunks=(10,))
def f(_):
total = 0
for i in range(20):
future = c.compute((x + random.randint(0, 20)).sum())
total += future.result()
sleep(0.001)
return total
from concurrent.futures import ThreadPoolExecutor
e = ThreadPoolExecutor(30)
results = list(e.map(f, range(30)))
assert results and all(results)
@gen_cluster(client=True)
async def test_identity(c, s, a, b):
assert c.id.lower().startswith("client")
assert a.id.lower().startswith("worker")
assert b.id.lower().startswith("worker")
assert s.id.lower().startswith("scheduler")
@gen_cluster(client=True, nthreads=[("127.0.0.1", 4)] * 2)
async def test_get_client(c, s, a, b):
assert get_client() is c
assert c.asynchronous
def f(x):
client = get_client()
future = client.submit(inc, x)
import distributed
assert not client.asynchronous
assert client is distributed.tmp_client
return future.result()
import distributed
distributed.tmp_client = c
try:
futures = c.map(f, range(5))
results = await c.gather(futures)
assert results == list(map(inc, range(5)))
finally:
del distributed.tmp_client
def test_get_client_no_cluster():
# Clean up any global workers added by other tests. This test requires that
# there are no global workers.
Worker._instances.clear()
msg = "No global client found and no address provided"
with pytest.raises(ValueError, match=r"^{}$".format(msg)):
get_client()
@gen_cluster(client=True)
async def test_serialize_collections(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.arange(10, chunks=(5,)).persist()
def f(x):
assert isinstance(x, da.Array)
return x.sum().compute()
future = c.submit(f, x)
result = await future
assert result == sum(range(10))
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 1, timeout=100)
async def test_secede_simple(c, s, a):
def f():
client = get_client()
secede()
return client.submit(inc, 1).result()
result = await c.submit(f)
assert result == 2
@pytest.mark.slow
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 2, timeout=60)
async def test_secede_balances(c, s, a, b):
count = threading.active_count()
def f(x):
client = get_client()
sleep(0.01) # do some work
secede()
futures = client.map(slowinc, range(10), pure=False, delay=0.01)
total = client.submit(sum, futures).result()
return total
futures = c.map(f, range(100))
start = time()
while not all(f.status == "finished" for f in futures):
await asyncio.sleep(0.01)
assert threading.active_count() < count + 50
assert len(a.log) < 2 * len(b.log)
assert len(b.log) < 2 * len(a.log)
results = await c.gather(futures)
assert results == [sum(map(inc, range(10)))] * 100
@gen_cluster(client=True)
async def test_sub_submit_priority(c, s, a, b):
def f():
client = get_client()
client.submit(slowinc, 1, delay=0.2, key="slowinc")
future = c.submit(f, key="f")
await asyncio.sleep(0.1)
if len(s.tasks) == 2:
assert (
s.priorities["f"] > s.priorities["slowinc"]
) # lower values schedule first
def test_get_client_sync(c, s, a, b):
results = c.run(lambda: get_worker().scheduler.address)
assert results == {w["address"]: s["address"] for w in [a, b]}
results = c.run(lambda: get_client().scheduler.address)
assert results == {w["address"]: s["address"] for w in [a, b]}
@gen_cluster(client=True)
async def test_serialize_collections_of_futures(c, s, a, b):
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
from dask.dataframe.utils import assert_eq
df = pd.DataFrame({"x": [1, 2, 3]})
ddf = dd.from_pandas(df, npartitions=2).persist()
future = await c.scatter(ddf)
ddf2 = await future
df2 = await c.compute(ddf2)
assert_eq(df, df2)
def test_serialize_collections_of_futures_sync(c):
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
from dask.dataframe.utils import assert_eq
df = pd.DataFrame({"x": [1, 2, 3]})
ddf = dd.from_pandas(df, npartitions=2).persist()
future = c.scatter(ddf)
result = future.result()
assert_eq(result.compute(), df)
assert future.type == dd.DataFrame
assert c.submit(lambda x, y: assert_eq(x.compute(), y), future, df).result()
def _dynamic_workload(x, delay=0.01):
if delay == "random":
sleep(random.random() / 2)
else:
sleep(delay)
if x > 4:
return 4
secede()
client = get_client()
futures = client.map(
_dynamic_workload, [x + i + 1 for i in range(2)], pure=False, delay=delay
)
total = client.submit(sum, futures)
return total.result()
def _test_dynamic_workloads_sync(c, delay):
future = c.submit(_dynamic_workload, 0, delay=delay)
assert future.result(timeout=40) == 52
def test_dynamic_workloads_sync(c):
_test_dynamic_workloads_sync(c, delay=0.02)
@pytest.mark.slow
def test_dynamic_workloads_sync_random(c):
_test_dynamic_workloads_sync(c, delay="random")
@gen_cluster(client=True)
async def test_bytes_keys(c, s, a, b):
key = b"inc-123"
future = c.submit(inc, 1, key=key)
result = await future
assert type(future.key) is bytes
assert set(s.tasks) == {key}
assert key in a.data or key in b.data
assert result == 2
@gen_cluster(client=True)
async def test_unicode_ascii_keys(c, s, a, b):
uni_type = type("")
key = "inc-123"
future = c.submit(inc, 1, key=key)
result = await future
assert type(future.key) is uni_type
assert set(s.tasks) == {key}
assert key in a.data or key in b.data
assert result == 2
@gen_cluster(client=True)
async def test_unicode_keys(c, s, a, b):
uni_type = type("")
key = "inc-123\u03bc"
future = c.submit(inc, 1, key=key)
result = await future
assert type(future.key) is uni_type
assert set(s.tasks) == {key}
assert key in a.data or key in b.data
assert result == 2
future2 = c.submit(inc, future)
result2 = await future2
assert result2 == 3
future3 = await c.scatter({"data-123": 123})
result3 = await future3["data-123"]
assert result3 == 123
def test_use_synchronous_client_in_async_context(loop, c):
async def f():
x = await c.scatter(123)
y = c.submit(inc, x)
z = await c.gather(y)
return z
z = sync(loop, f)
assert z == 124
def test_quiet_quit_when_cluster_leaves(loop_in_thread):
loop = loop_in_thread
with LocalCluster(
loop=loop, scheduler_port=0, dashboard_address=None, silence_logs=False
) as cluster:
with captured_logger("distributed.comm") as sio:
with Client(cluster, loop=loop) as client:
futures = client.map(lambda x: x + 1, range(10))
sleep(0.05)
cluster.close()
sleep(0.05)
text = sio.getvalue()
assert not text
def test_warn_executor(loop, s, a, b):
with warnings.catch_warnings(record=True) as record:
with Executor(s["address"], loop=loop) as c:
pass
assert any("Client" in str(r.message) for r in record)
@gen_cluster([("127.0.0.1", 4)] * 2, client=True)
async def test_call_stack_future(c, s, a, b):
x = c.submit(slowdec, 1, delay=0.5)
future = c.submit(slowinc, 1, delay=0.5)
await asyncio.sleep(0.1)
results = await asyncio.gather(
c.call_stack(future), c.call_stack(keys=[future.key])
)
assert all(list(first(result.values())) == [future.key] for result in results)
assert results[0] == results[1]
result = results[0]
w = a if future.key in a.executing else b
assert list(result) == [w.address]
assert list(result[w.address]) == [future.key]
assert "slowinc" in str(result)
assert "slowdec" not in str(result)
@gen_cluster([("127.0.0.1", 4)] * 2, client=True)
async def test_call_stack_all(c, s, a, b):
future = c.submit(slowinc, 1, delay=0.8)
while not a.executing and not b.executing:
await asyncio.sleep(0.01)
result = await c.call_stack()
w = a if a.executing else b
assert list(result) == [w.address]
assert list(result[w.address]) == [future.key]
assert "slowinc" in str(result)
@gen_cluster([("127.0.0.1", 4)] * 2, client=True)
async def test_call_stack_collections(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.random.random(100, chunks=(10,)).map_blocks(slowinc, delay=0.5).persist()
while not a.executing and not b.executing:
await asyncio.sleep(0.001)
result = await c.call_stack(x)
assert result
@gen_cluster([("127.0.0.1", 4)] * 2, client=True)
async def test_call_stack_collections_all(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.random.random(100, chunks=(10,)).map_blocks(slowinc, delay=0.5).persist()
while not a.executing and not b.executing:
await asyncio.sleep(0.001)
result = await c.call_stack()
assert result
@gen_cluster(client=True, worker_kwargs={"profile_cycle_interval": "100ms"})
async def test_profile(c, s, a, b):
futures = c.map(slowinc, range(10), delay=0.05, workers=a.address)
await wait(futures)
x = await c.profile(start=time() + 10, stop=time() + 20)
assert not x["count"]
x = await c.profile(start=0, stop=time())
assert (
x["count"]
== sum(p["count"] for _, p in a.profile_history) + a.profile_recent["count"]
)
y = await c.profile(start=time() - 0.300, stop=time())
assert 0 < y["count"] < x["count"]
assert not any(p["count"] for _, p in b.profile_history)
result = await c.profile(workers=b.address)
assert not result["count"]
@gen_cluster(client=True, worker_kwargs={"profile_cycle_interval": "100ms"})
async def test_profile_keys(c, s, a, b):
x = c.map(slowinc, range(10), delay=0.05, workers=a.address)
y = c.map(slowdec, range(10), delay=0.05, workers=a.address)
await wait(x + y)
xp = await c.profile("slowinc")
yp = await c.profile("slowdec")
p = await c.profile()
assert p["count"] == xp["count"] + yp["count"]
with captured_logger(logging.getLogger("distributed")) as logger:
prof = await c.profile("does-not-exist")
assert prof == profile.create()
out = logger.getvalue()
assert not out
@gen_cluster()
async def test_client_with_name(s, a, b):
with captured_logger("distributed.scheduler") as sio:
client = await Client(s.address, asynchronous=True, name="foo")
assert "foo" in client.id
await client.close()
text = sio.getvalue()
assert "foo" in text
@gen_cluster(client=True)
async def test_future_defaults_to_default_client(c, s, a, b):
x = c.submit(inc, 1)
await wait(x)
future = Future(x.key)
assert future.client is c
@gen_cluster(client=True)
async def test_future_auto_inform(c, s, a, b):
x = c.submit(inc, 1)
await wait(x)
client = await Client(s.address, asynchronous=True)
future = Future(x.key, client)
start = time()
while future.status != "finished":
await asyncio.sleep(0.01)
assert time() < start + 1
await client.close()
def test_client_async_before_loop_starts():
with pristine_loop() as loop:
client = Client(asynchronous=True, loop=loop)
assert client.asynchronous
client.close()
@pytest.mark.slow
@gen_cluster(client=True, Worker=Nanny, timeout=60, nthreads=[("127.0.0.1", 3)] * 2)
async def test_nested_compute(c, s, a, b):
def fib(x):
assert get_worker().get_current_task()
if x < 2:
return x
a = delayed(fib)(x - 1)
b = delayed(fib)(x - 2)
c = a + b
return c.compute()
future = c.submit(fib, 8)
result = await future
assert result == 21
assert len(s.transition_log) > 50
@gen_cluster(client=True)
async def test_task_metadata(c, s, a, b):
await c.set_metadata("x", 1)
result = await c.get_metadata("x")
assert result == 1
future = c.submit(inc, 1)
key = future.key
await wait(future)
await c.set_metadata(key, 123)
result = await c.get_metadata(key)
assert result == 123
del future
while key in s.tasks:
await asyncio.sleep(0.01)
with pytest.raises(KeyError):
await c.get_metadata(key)
result = await c.get_metadata(key, None)
assert result is None
await c.set_metadata(["x", "a"], 1)
result = await c.get_metadata("x")
assert result == {"a": 1}
await c.set_metadata(["x", "b"], 2)
result = await c.get_metadata("x")
assert result == {"a": 1, "b": 2}
result = await c.get_metadata(["x", "a"])
assert result == 1
await c.set_metadata(["x", "a", "c", "d"], 1)
result = await c.get_metadata("x")
assert result == {"a": {"c": {"d": 1}}, "b": 2}
@gen_cluster(client=True, Worker=Nanny)
async def test_logs(c, s, a, b):
await wait(c.map(inc, range(5)))
logs = await c.get_scheduler_logs(n=5)
assert logs
for _, msg in logs:
assert "distributed.scheduler" in msg
w_logs = await c.get_worker_logs(n=5)
assert set(w_logs.keys()) == {a.worker_address, b.worker_address}
for log in w_logs.values():
for _, msg in log:
assert "distributed.worker" in msg
n_logs = await c.get_worker_logs(nanny=True)
assert set(n_logs.keys()) == {a.worker_address, b.worker_address}
for log in n_logs.values():
for _, msg in log:
assert "distributed.nanny" in msg
n_logs = await c.get_worker_logs(nanny=True, workers=[a.worker_address])
assert set(n_logs.keys()) == {a.worker_address}
for log in n_logs.values():
for _, msg in log:
assert "distributed.nanny" in msg
@gen_cluster(client=True)
async def test_avoid_delayed_finalize(c, s, a, b):
x = delayed(inc)(1)
future = c.compute(x)
result = await future
assert result == 2
assert list(s.tasks) == [future.key] == [x.key]
@gen_cluster()
async def test_config_scheduler_address(s, a, b):
with dask.config.set({"scheduler-address": s.address}):
with captured_logger("distributed.client") as sio:
c = await Client(asynchronous=True)
assert c.scheduler.address == s.address
text = sio.getvalue()
assert s.address in text
await c.close()
@gen_cluster(client=True)
async def test_warn_when_submitting_large_values(c, s, a, b):
with warnings.catch_warnings(record=True) as record:
future = c.submit(lambda x: x + 1, b"0" * 2000000)
text = str(record[0].message)
assert "2.00 MB" in text
assert "large" in text
assert "..." in text
assert "'000" in text
assert "000'" in text
assert len(text) < 2000
with warnings.catch_warnings(record=True) as record:
data = b"0" * 2000000
for i in range(10):
future = c.submit(lambda x, y: x, data, i)
assert len(record) < 2
@gen_cluster()
async def test_scatter_direct(s, a, b):
c = await Client(s.address, asynchronous=True, heartbeat_interval=10)
last = s.clients[c.id].last_seen
start = time()
while s.clients[c.id].last_seen == last:
await asyncio.sleep(0.10)
assert time() < start + 5
await c.close()
@gen_cluster(client=True)
async def test_unhashable_function(c, s, a, b):
d = {"a": 1}
result = await c.submit(d.get, "a")
assert result == 1
@gen_cluster()
async def test_client_name(s, a, b):
with dask.config.set({"client-name": "hello-world"}):
c = await Client(s.address, asynchronous=True)
assert any("hello-world" in name for name in list(s.clients))
await c.close()
def test_client_doesnt_close_given_loop(loop, s, a, b):
with Client(s["address"], loop=loop) as c:
assert c.submit(inc, 1).result() == 2
with Client(s["address"], loop=loop) as c:
assert c.submit(inc, 2).result() == 3
@gen_cluster(client=True, nthreads=[])
async def test_quiet_scheduler_loss(c, s):
c._periodic_callbacks["scheduler-info"].interval = 10
with captured_logger(logging.getLogger("distributed.client")) as logger:
await s.close()
await c._update_scheduler_info()
text = logger.getvalue()
assert "BrokenPipeError" not in text
def test_dashboard_link(loop, monkeypatch):
monkeypatch.setenv("USER", "myusername")
with cluster(scheduler_kwargs={"dashboard_address": ":12355"}) as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
with dask.config.set(
{"distributed.dashboard.link": "{scheme}://foo-{USER}:{port}/status"}
):
link = "http://foo-myusername:12355/status"
assert link == c.dashboard_link
text = c._repr_html_()
assert link in text
@pytest.mark.asyncio
async def test_dashboard_link_inproc(cleanup):
async with Client(processes=False, asynchronous=True) as c:
with dask.config.set({"distributed.dashboard.link": "{host}"}):
assert "/" not in c.dashboard_link
@gen_test()
async def test_client_timeout_2():
with dask.config.set({"distributed.comm.timeouts.connect": "10ms"}):
start = time()
c = Client("127.0.0.1:3755", asynchronous=True)
with pytest.raises((TimeoutError, IOError)):
await c
stop = time()
assert c.status == "closed"
await c.close()
assert stop - start < 1
@gen_test()
async def test_client_active_bad_port():
import tornado.web
import tornado.httpserver
application = tornado.web.Application([(r"/", tornado.web.RequestHandler)])
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(8080)
with dask.config.set({"distributed.comm.timeouts.connect": "10ms"}):
c = Client("127.0.0.1:8080", asynchronous=True)
with pytest.raises((TimeoutError, IOError)):
await c
await c._close(fast=True)
http_server.stop()
@pytest.mark.parametrize("direct", [True, False])
def test_turn_off_pickle(direct):
@gen_cluster()
async def test(s, a, b):
import numpy as np
async with Client(
s.address, asynchronous=True, serializers=["dask", "msgpack"]
) as c:
assert (await c.submit(inc, 1)) == 2
await c.submit(np.ones, 5)
await c.scatter(1)
# Can't send complex data
with pytest.raises(TypeError):
future = await c.scatter(inc)
# can send complex tasks (this uses pickle regardless)
future = c.submit(lambda x: x, inc)
await wait(future)
# but can't receive complex results
with pytest.raises(TypeError):
await c.gather(future, direct=direct)
# Run works
result = await c.run(lambda: 1)
assert list(result.values()) == [1, 1]
result = await c.run_on_scheduler(lambda: 1)
assert result == 1
# But not with complex return values
with pytest.raises(TypeError):
await c.run(lambda: inc)
with pytest.raises(TypeError):
await c.run_on_scheduler(lambda: inc)
test()
@gen_cluster()
async def test_de_serialization(s, a, b):
import numpy as np
c = await Client(
s.address,
asynchronous=True,
serializers=["msgpack", "pickle"],
deserializers=["msgpack"],
)
try:
# Can send complex data
future = await c.scatter(np.ones(5))
# But can not retrieve it
with pytest.raises(TypeError):
result = await future
finally:
await c.close()
@gen_cluster()
async def test_de_serialization_none(s, a, b):
import numpy as np
c = await Client(s.address, asynchronous=True, deserializers=["msgpack"])
try:
# Can send complex data
future = await c.scatter(np.ones(5))
# But can not retrieve it
with pytest.raises(TypeError):
result = await future
finally:
await c.close()
@gen_cluster()
async def test_client_repr_closed(s, a, b):
c = await Client(s.address, asynchronous=True)
await c.close()
c._repr_html_()
def test_client_repr_closed_sync(loop):
with Client(loop=loop, processes=False, dashboard_address=None) as c:
c.close()
c._repr_html_()
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)])
async def test_nested_prioritization(c, s, w):
x = delayed(inc)(1, dask_key_name=("a", 2))
y = delayed(inc)(2, dask_key_name=("a", 10))
o = dask.order.order(merge(x.__dask_graph__(), y.__dask_graph__()))
fx, fy = c.compute([x, y])
await wait([fx, fy])
assert (o[x.key] < o[y.key]) == (
s.tasks[tokey(fx.key)].priority < s.tasks[tokey(fy.key)].priority
)
@gen_cluster(client=True)
async def test_scatter_error_cancel(c, s, a, b):
# https://github.com/dask/distributed/issues/2038
def bad_fn(x):
raise Exception("lol")
x = await c.scatter(1)
y = c.submit(bad_fn, x)
del x
await wait(y)
assert y.status == "error"
await asyncio.sleep(0.1)
assert y.status == "error" # not cancelled
def test_no_threads_lingering():
active = dict(threading._active)
assert threading.active_count() < 40, list(active.values())
@gen_cluster()
async def test_direct_async(s, a, b):
c = await Client(s.address, asynchronous=True, direct_to_workers=True)
assert c.direct_to_workers
await c.close()
c = await Client(s.address, asynchronous=True, direct_to_workers=False)
assert not c.direct_to_workers
await c.close()
def test_direct_sync(c):
assert not c.direct_to_workers
def f():
return get_client().direct_to_workers
assert c.submit(f).result()
@gen_cluster()
async def test_mixing_clients(s, a, b):
c1 = await Client(s.address, asynchronous=True)
c2 = await Client(s.address, asynchronous=True)
future = c1.submit(inc, 1)
with pytest.raises(ValueError):
c2.submit(inc, future)
assert not c2.futures # Don't create Futures on second Client
await c1.close()
await c2.close()
@gen_cluster(client=True)
async def test_tuple_keys(c, s, a, b):
x = dask.delayed(inc)(1, dask_key_name=("x", 1))
y = dask.delayed(inc)(x, dask_key_name=("y", 1))
future = c.compute(y)
assert (await future) == 3
@gen_cluster(client=True)
async def test_multiple_scatter(c, s, a, b):
futures = await asyncio.gather(*[c.scatter(1, direct=True) for _ in range(5)])
x = await futures[0]
x = await futures[0]
@gen_cluster(client=True)
async def test_map_large_kwargs_in_graph(c, s, a, b):
np = pytest.importorskip("numpy")
x = np.random.random(100000)
futures = c.map(lambda a, b: a + b, range(100), b=x)
while not s.tasks:
await asyncio.sleep(0.01)
assert len(s.tasks) == 101
assert any(k.startswith("ndarray") for k in s.tasks)
@gen_cluster(client=True)
async def test_retry(c, s, a, b):
def f():
assert dask.config.get("foo")
with dask.config.set(foo=False):
future = c.submit(f)
with pytest.raises(AssertionError):
await future
with dask.config.set(foo=True):
await future.retry()
await future
@gen_cluster(client=True)
async def test_retry_dependencies(c, s, a, b):
def f():
return dask.config.get("foo")
x = c.submit(f)
y = c.submit(inc, x)
with pytest.raises(KeyError):
await y
with dask.config.set(foo=100):
await y.retry()
result = await y
assert result == 101
await y.retry()
await x.retry()
result = await y
assert result == 101
@gen_cluster(client=True)
async def test_released_dependencies(c, s, a, b):
def f(x):
return dask.config.get("foo") + 1
x = c.submit(inc, 1, key="x")
y = c.submit(f, x, key="y")
del x
with pytest.raises(KeyError):
await y
with dask.config.set(foo=100):
await y.retry()
result = await y
assert result == 101
@gen_cluster(client=True, clean_kwargs={"threads": False})
async def test_profile_bokeh(c, s, a, b):
pytest.importorskip("bokeh.plotting")
from bokeh.model import Model
await c.gather(c.map(slowinc, range(10), delay=0.2))
state, figure = await c.profile(plot=True)
assert isinstance(figure, Model)
with tmpfile("html") as fn:
try:
await c.profile(filename=fn)
except PermissionError:
if WINDOWS:
pytest.xfail()
assert os.path.exists(fn)
@gen_cluster(client=True)
async def test_get_mix_futures_and_SubgraphCallable(c, s, a, b):
future = c.submit(add, 1, 2)
subgraph = SubgraphCallable(
{"_2": (add, "_0", "_1"), "_3": (add, future, "_2")}, "_3", ("_0", "_1")
)
dsk = {"a": 1, "b": 2, "c": (subgraph, "a", "b"), "d": (subgraph, "c", "b")}
future2 = c.get(dsk, "d", sync=False)
result = await future2
assert result == 11
# Nested subgraphs
subgraph2 = SubgraphCallable(
{
"_2": (subgraph, "_0", "_1"),
"_3": (subgraph, "_2", "_1"),
"_4": (add, "_3", future2),
},
"_4",
("_0", "_1"),
)
dsk2 = {"e": 1, "f": 2, "g": (subgraph2, "e", "f")}
result = await c.get(dsk2, "g", sync=False)
assert result == 22
@gen_cluster(client=True)
async def test_get_mix_futures_and_SubgraphCallable_dask_dataframe(c, s, a, b):
dd = pytest.importorskip("dask.dataframe")
import pandas as pd
df = pd.DataFrame({"x": range(1, 11)})
ddf = dd.from_pandas(df, npartitions=2).persist()
ddf = ddf.map_partitions(lambda x: x)
ddf["x"] = ddf["x"].astype("f8")
ddf = ddf.map_partitions(lambda x: x)
ddf["x"] = ddf["x"].astype("f8")
result = await c.compute(ddf)
assert result.equals(df.astype("f8"))
def test_direct_to_workers(s, loop):
with Client(s["address"], loop=loop, direct_to_workers=True) as client:
future = client.scatter(1)
future.result()
resp = client.run_on_scheduler(lambda dask_scheduler: dask_scheduler.events)
assert "gather" not in str(resp)
@gen_cluster(client=True)
async def test_instances(c, s, a, b):
assert list(Client._instances) == [c]
assert list(Scheduler._instances) == [s]
assert set(Worker._instances) == {a, b}
@gen_cluster(client=True)
async def test_wait_for_workers(c, s, a, b):
future = asyncio.ensure_future(c.wait_for_workers(n_workers=3))
await asyncio.sleep(0.22) # 2 chances
assert not future.done()
w = await Worker(s.address)
start = time()
await future
assert time() < start + 1
await w.close()
@pytest.mark.skipif(WINDOWS, reason="num_fds not supported on windows")
@pytest.mark.asyncio
@pytest.mark.parametrize("Worker", [Worker, Nanny])
async def test_file_descriptors_dont_leak(Worker):
pytest.importorskip("pandas")
df = dask.datasets.timeseries(freq="10s", dtypes={"x": int, "y": float})
proc = psutil.Process()
start = proc.num_fds()
async with Scheduler(port=0, dashboard_address=":0") as s:
async with Worker(s.address, nthreads=2) as a, Worker(
s.address, nthreads=2
) as b:
async with Client(s.address, asynchronous=True) as c:
await df.sum().persist()
begin = time()
while proc.num_fds() > begin:
await asyncio.sleep(0.01)
assert time() < begin + 5, (start, proc.num_fds())
@pytest.mark.asyncio
async def test_dashboard_link_cluster(cleanup):
class MyCluster(LocalCluster):
@property
def dashboard_link(self):
return "http://foo.com"
async with MyCluster(processes=False, asynchronous=True) as cluster:
async with Client(cluster, asynchronous=True) as client:
assert "http://foo.com" in client._repr_html_()
@pytest.mark.asyncio
async def test_shutdown(cleanup):
async with Scheduler(port=0) as s:
async with Worker(s.address) as w:
async with Client(s.address, asynchronous=True) as c:
await c.shutdown()
assert s.status == "closed"
assert w.status == "closed"
@pytest.mark.asyncio
async def test_shutdown_localcluster(cleanup):
async with LocalCluster(n_workers=1, asynchronous=True, processes=False) as lc:
async with Client(lc, asynchronous=True) as c:
await c.shutdown()
assert lc.scheduler.status == "closed"
@pytest.mark.asyncio
async def test_config_inherited_by_subprocess(cleanup):
def f(x):
return dask.config.get("foo") + 1
with dask.config.set(foo=100):
async with LocalCluster(n_workers=1, asynchronous=True, processes=True) as lc:
async with Client(lc, asynchronous=True) as c:
result = await c.submit(f, 1)
assert result == 101
@gen_cluster(client=True)
async def test_futures_of_sorted(c, s, a, b):
pytest.importorskip("dask.dataframe")
df = await dask.datasets.timeseries(dtypes={"x": int}).persist()
futures = futures_of(df)
for k, f in zip(df.__dask_keys__(), futures):
assert str(k) in str(f)
@gen_cluster(client=True, worker_kwargs={"profile_cycle_interval": "10ms"})
async def test_profile_server(c, s, a, b):
for i in range(5):
try:
x = c.map(slowinc, range(10), delay=0.01, workers=a.address, pure=False)
await wait(x)
await asyncio.gather(
c.run(slowinc, 1, delay=0.5), c.run_on_scheduler(slowdec, 1, delay=0.5)
)
p = await c.profile(server=True) # All worker servers
assert "slowinc" in str(p)
p = await c.profile(scheduler=True) # Scheduler
assert "slowdec" in str(p)
except AssertionError:
if i == 4:
raise
else:
pass
else:
break
@gen_cluster(client=True)
async def test_await_future(c, s, a, b):
future = c.submit(inc, 1)
async def f(): # flake8: noqa
result = await future
assert result == 2
await f()
future = c.submit(div, 1, 0)
async def f():
with pytest.raises(ZeroDivisionError):
await future
await f()
@gen_cluster(client=True)
async def test_as_completed_async_for(c, s, a, b):
futures = c.map(inc, range(10))
ac = as_completed(futures)
results = []
async def f():
async for future in ac:
result = await future
results.append(result)
await f()
assert set(results) == set(range(1, 11))
@gen_cluster(client=True)
async def test_as_completed_async_for_results(c, s, a, b):
futures = c.map(inc, range(10))
ac = as_completed(futures, with_results=True)
results = []
async def f():
async for future, result in ac:
results.append(result)
await f()
assert set(results) == set(range(1, 11))
assert not s.counters["op"].components[0]["gather"]
@gen_cluster(client=True)
async def test_as_completed_async_for_cancel(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(sleep, 0.3)
ac = as_completed([x, y])
async def _():
await asyncio.sleep(0.1)
await y.cancel(asynchronous=True)
c.loop.add_callback(_)
L = []
async def f():
async for future in ac:
L.append(future)
await f()
assert L == [x, y]
def test_async_with(loop):
result = None
client = None
cluster = None
async def f():
async with Client(processes=False, asynchronous=True) as c:
nonlocal result, client, cluster
result = await c.submit(lambda x: x + 1, 10)
client = c
cluster = c.cluster
loop.run_sync(f)
assert result == 11
assert client.status == "closed"
assert cluster.status == "closed"
def test_client_sync_with_async_def(loop):
async def ff():
await asyncio.sleep(0.01)
return 1
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
assert sync(loop, ff) == 1
assert c.sync(ff) == 1
@pytest.mark.skip(reason="known intermittent failure")
@gen_cluster(client=True)
async def test_dont_hold_on_to_large_messages(c, s, a, b):
np = pytest.importorskip("numpy")
da = pytest.importorskip("dask.array")
x = np.random.random(1000000)
xr = weakref.ref(x)
d = da.from_array(x, chunks=(100000,))
d = d.persist()
del x
start = time()
while xr() is not None:
if time() > start + 5:
# Help diagnosing
from types import FrameType
x = xr()
if x is not None:
del x
rc = sys.getrefcount(xr())
refs = gc.get_referrers(xr())
print("refs to x:", rc, refs, gc.isenabled())
frames = [r for r in refs if isinstance(r, FrameType)]
for i, f in enumerate(frames):
print(
"frames #%d:" % i,
f.f_code.co_name,
f.f_code.co_filename,
sorted(f.f_locals),
)
pytest.fail("array should have been destroyed")
await asyncio.sleep(0.200)
@gen_cluster(client=True)
async def test_run_scheduler_async_def(c, s, a, b):
async def f(dask_scheduler):
await asyncio.sleep(0.01)
dask_scheduler.foo = "bar"
await c.run_on_scheduler(f)
assert s.foo == "bar"
async def f(dask_worker):
await asyncio.sleep(0.01)
dask_worker.foo = "bar"
await c.run(f)
assert a.foo == "bar"
assert b.foo == "bar"
@gen_cluster(client=True)
async def test_run_scheduler_async_def_wait(c, s, a, b):
async def f(dask_scheduler):
await asyncio.sleep(0.01)
dask_scheduler.foo = "bar"
await c.run_on_scheduler(f, wait=False)
while not hasattr(s, "foo"):
await asyncio.sleep(0.01)
assert s.foo == "bar"
async def f(dask_worker):
await asyncio.sleep(0.01)
dask_worker.foo = "bar"
await c.run(f, wait=False)
while not hasattr(a, "foo") or not hasattr(b, "foo"):
await asyncio.sleep(0.01)
assert a.foo == "bar"
assert b.foo == "bar"
@gen_cluster(client=True, nthreads=[("127.0.0.1", 2)] * 2)
async def test_performance_report(c, s, a, b):
da = pytest.importorskip("dask.array")
async def f():
"""
We wrap this in a function so that the assertions aren't in the
performanace report itself
Also, we want this comment to appear
"""
x = da.random.random((1000, 1000), chunks=(100, 100))
with tmpfile(extension="html") as fn:
async with performance_report(filename=fn):
await c.compute((x + x.T).sum())
with open(fn) as f:
data = f.read()
return data
data = await f()
assert "Also, we want this comment to appear" in data
assert "bokeh" in data
assert "random" in data
assert "Dask Performance Report" in data
assert "x = da.random" in data
assert "Threads: 4" in data
@pytest.mark.asyncio
async def test_client_gather_semaphor_loop(cleanup):
async with Scheduler(port=0) as s:
async with Client(s.address, asynchronous=True) as c:
assert c._gather_semaphore._loop is c.loop.asyncio_loop
@gen_cluster(client=True)
async def test_as_completed_condition_loop(c, s, a, b):
seq = c.map(inc, range(5))
ac = as_completed(seq)
assert ac.condition._loop == c.loop.asyncio_loop
def test_client_connectionpool_semaphore_loop(s, a, b):
with Client(s["address"]) as c:
assert c.rpc.semaphore._loop is c.loop.asyncio_loop
|
netcat.py
|
import socket, threading
class Netcat2:
def __init__(self):
self.RHOST = ''
self.RPORT = 0
self.LHOST = ''
self.LPORT = 0
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.default_recv_len = 1024
self.server_mode = False
self.connected_client = None
self.connected_client_addr = None
def connect(self,rhost,rport):
self.RHOST = rhost
self.RPORT = rport
self.sock.connect((self.RHOST,self.RPORT))
def listen(self,lhost,lport):
self.LHOST = lhost
self.LPORT = lport
self.server_mode = True
self.sock.bind((self.LHOST,self.LPORT))
self.sock.listen(1)
t = threading.Thread(target=self.__client_accept)
t.start()
def receive(self,limit=1024):
if self.server_mode:
return self.__client_receive(self.connected_client,limit)
else:
return self.__client_receive(self.sock,limit)
def send(self,data):
if type(data) == type("a"):
data = data.encode()
if self.server_mode:
self.__client_send(self.connected_client,data)
else:
self.__client_send(self.sock,data)
def close(self):
if self.server_mode:
self.connected_client.close()
else:
self.sock.close()
def interactive(self,receive_data=True):
while True:
cmd = input()
cmd += "\n"
self.send(cmd)
if receive_data:
rdata = self.receive_all()
print(rdata.decode(),end=' ')
def receive_all(self):
recv_len = 1
response = b''
s = None
if self.server_mode:
s = self.connected_client
else:
s = self.sock
while recv_len:
data = self.__client_receive(s,self.default_recv_len)
response += data
recv_len = len(data)
if recv_len < self.default_recv_len:
recv_len = 0
break
return response
def __client_accept(self):
c, addr = self.sock.accept()
self.connected_client = c
self.connected_client_addr = addr
def __client_receive(self,client,limit=1024):
return client.recv(limit)
def __client_send(self,client,data):
client.send(data)
|
stockbarcollector.py
|
# **************************************************************************** #
# #
# ::: :::::::: #
# stockbarcollector.py :+: :+: :+: #
# +:+ +:+ +:+ #
# By: zhongjy1992 <zhongjy1992@outlook.com> +#+ +:+ +#+ #
# +#+#+#+#+#+ +#+ #
# Created: 2019/10/01 22:07:05 by zhongjy1992 #+# #+# #
# Updated: 2019/10/01 22:07:05 by zhongjy1992 ### ########.fr #
# #
# **************************************************************************** #
import json
import threading
import datetime
from QAPUBSUB.consumer import subscriber_routing
from QAPUBSUB.producer import publisher
from QARealtimeCollector.setting import eventmq_ip
from QUANTAXIS.QAFetch.QATdx_adv import QA_Tdx_Executor
class QARTCStockBar(QA_Tdx_Executor):
def __init__(self, delay=30.5):
super().__init__(name='QA_REALTIME_COLLECTOR_STOCK_BAR')
# 数据获取请求间隔
self.isOK = True
self.delay = delay
self.codelist = []
self.sub = subscriber_routing(host=eventmq_ip,
exchange='QARealtime_Market', routing_key='stock')
self.sub.callback = self.callback
self.pub = publisher(
host=eventmq_ip, exchange='realtime_stock_min'
)
print("QA_REALTIME_COLLECTOR_STOCK_BAR INIT, delay %s" % self.delay)
threading.Thread(target=self.sub.start, daemon=True).start()
def subscribe(self, code):
"""继续订阅
Arguments:
code {[type]} -- [description]
"""
if code not in self.codelist:
self.codelist.append(code)
def unsubscribe(self, code):
self.codelist.remove(code)
def callback(self, a, b, c, data):
data = json.loads(data)
if data['topic'].lower() == 'subscribe':
print('stock bar collector service receive new subscribe: {}'.format(data['code']))
new_ins = data['code'].replace('_', '.').split(',')
if isinstance(new_ins, list):
for item in new_ins:
self.subscribe(item)
else:
self.subscribe(new_ins)
if data['topic'].lower() == 'unsubscribe':
print('stock bar collector service receive new unsubscribe: {}'.format(data['code']))
new_ins = data['code'].replace('_', '.').split(',')
if isinstance(new_ins, list):
for item in new_ins:
self.unsubscribe(item)
else:
self.unsubscribe(new_ins)
def get_data(self):
# 获取当前及上一bar
lens = 2 # TODO default 2
data = self.get_security_bar_concurrent(self.codelist, "1min", lens)
# print(data)
self.pub.pub(json.dumps(data))
def run(self):
# 循环定时获取数据
import time
while 1:
print(self.codelist, self.isOK)
if len(self.codelist) > 0 and self.isOK:
print(datetime.datetime.now(), " : stock bar collector service requested data")
self.isOK = False
self.get_data()
self.isOK = True
time.sleep(self.delay)
else:
time.sleep(1)
if __name__ == "__main__":
QARTCStockBar().start()
|
detection_input.py
|
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
import mxnet as mx
from queue import Queue
from threading import Thread
from operator_py.cython.bbox import bbox_overlaps_cython
from operator_py.bbox_transform import nonlinear_transform as bbox_transform
class DetectionAugmentation(object):
def __init__(self):
pass
def apply(self, input_record):
pass
class ReadRoiRecord(DetectionAugmentation):
"""
input: image_url, str
gt_url, str
output: image, ndarray(h, w, rgb)
image_raw_meta, tuple(h, w)
gt, any
"""
def __init__(self, gt_select):
super().__init__()
self.gt_select = gt_select
def apply(self, input_record):
image = cv2.imread(input_record["image_url"], cv2.IMREAD_COLOR)
input_record["image"] = image[:, :, ::-1].astype("float32")
# TODO: remove this compatibility method
input_record["gt_bbox"] = np.concatenate([input_record["gt_bbox"],
input_record["gt_class"].reshape(-1, 1)],
axis=1)
# gt_dict = pkl.load(input_record["gt_url"])
# for s in self.gt_select:
# input_record[s] = gt_dict[s]
class Norm2DImage(DetectionAugmentation):
"""
input: image, ndarray(h, w, rgb)
output: image, ndarray(h, w, rgb)
"""
def __init__(self, pNorm):
super().__init__()
self.p = pNorm # type: NormParam
def apply(self, input_record):
p = self.p
image = input_record["image"].astype(np.float32)
image -= p.mean
image /= p.std
input_record["image"] = image
class Resize2DImageBbox(DetectionAugmentation):
"""
input: image, ndarray(h, w, rgb)
gt_bbox, ndarry(n, 5)
output: image, ndarray(h', w', rgb)
im_info, tuple(h', w', scale)
gt_bbox, ndarray(n, 5)
"""
def __init__(self, pResize):
super().__init__()
self.p = pResize # type: ResizeParam
def apply(self, input_record):
p = self.p
image = input_record["image"]
gt_bbox = input_record["gt_bbox"].astype(np.float32)
short = min(image.shape[:2])
long = max(image.shape[:2])
scale = min(p.short / short, p.long / long)
input_record["image"] = cv2.resize(image, None, None, scale, scale,
interpolation=cv2.INTER_LINEAR)
# make sure gt boxes do not overflow
gt_bbox[:, :4] = gt_bbox[:, :4] * scale
if image.shape[0] < image.shape[1]:
gt_bbox[:, [0, 2]] = np.clip(gt_bbox[:, [0, 2]], 0, p.long)
gt_bbox[:, [1, 3]] = np.clip(gt_bbox[:, [1, 3]], 0, p.short)
else:
gt_bbox[:, [0, 2]] = np.clip(gt_bbox[:, [0, 2]], 0, p.short)
gt_bbox[:, [1, 3]] = np.clip(gt_bbox[:, [1, 3]], 0, p.long)
input_record["gt_bbox"] = gt_bbox
# exactly as opencv
h, w = image.shape[:2]
input_record["im_info"] = np.array([round(h * scale), round(w * scale), scale], dtype=np.float32)
class Resize2DImage(DetectionAugmentation):
"""
input: image, ndarray(h, w, rgb)
gt_bbox, ndarry(n, 5)
output: image, ndarray(h', w', rgb)
im_info, tuple(h', w', scale)
gt_bbox, ndarray(n, 5)
"""
def __init__(self, pResize):
super().__init__()
self.p = pResize # type: ResizeParam
def apply(self, input_record):
p = self.p
image = input_record["image"]
short = min(image.shape[:2])
long = max(image.shape[:2])
scale = min(p.short / short, p.long / long)
input_record["image"] = cv2.resize(image, None, None, scale, scale,
interpolation=cv2.INTER_LINEAR)
# exactly as opencv
h, w = image.shape[:2]
input_record["im_info"] = np.array([round(h * scale), round(w * scale), scale], dtype=np.float32)
class Resize2DImageByRoidb(DetectionAugmentation):
"""
input: image, ndarray(h, w, rgb)
gt_bbox, ndarry(n, 5)
output: image, ndarray(h', w', rgb)
im_info, tuple(h', w', scale)
gt_bbox, ndarray(n, 5)
"""
def __init__(self):
super().__init__()
class ResizeParam:
long = None
short = None
self.resize_aug = Resize2DImage(ResizeParam)
def apply(self, input_record):
self.resize_aug.p.long = input_record["resize_long"]
self.resize_aug.p.short = input_record["resize_short"]
self.resize_aug.apply(input_record)
class RandResize2DImageBbox(DetectionAugmentation):
"""
input: image, ndarray(h, w, rgb)
gt_bbox, ndarry(n, 4)
output: image, ndarray(h', w', rgb)
im_info, tuple(h', w', scale)
gt_bbox, ndarray(n, 4)
"""
def __init__(self, pRandResize):
super().__init__()
self.p = pRandResize
class ResizeParam:
long = None
short = None
self.resize_aug = Resize2DImageBbox(ResizeParam)
def apply(self, input_record):
scale_id = np.random.randint(len(self.p.long_ranges))
self.resize_aug.p.long = self.p.long_ranges[scale_id]
self.resize_aug.p.short = self.p.short_ranges[scale_id]
self.resize_aug.apply(input_record)
class Flip2DImageBbox(DetectionAugmentation):
"""
input: image, ndarray(h, w, rgb)
gt_bbox, ndarry(n, 4)
output: image, ndarray(h, w, rgb)
gt_bbox, ndarray(n, 4)
"""
def __init__(self):
super().__init__()
def apply(self, input_record):
if input_record["flipped"]:
image = input_record["image"]
gt_bbox = input_record["gt_bbox"]
input_record["image"] = image[:, ::-1]
flipped_bbox = gt_bbox.copy()
h, w = image.shape[:2]
flipped_bbox[:, 0] = (w - 1) - gt_bbox[:, 2]
flipped_bbox[:, 2] = (w - 1) - gt_bbox[:, 0]
input_record["gt_bbox"] = flipped_bbox
class RandCrop2DImageBbox(DetectionAugmentation):
def __init__(self, pCrop):
super().__init__()
self.p = pCrop
assert pCrop.mode in ["center", "random"], "The {} crop mode is not supported".format(pCrop.mode)
def apply(self, input_record):
p = self.p
image = input_record["image"]
gt_bbox = input_record["gt_bbox"]
if image.shape[0] >= image.shape[1]:
crop_w, crop_h = p.short, p.long
else:
crop_w, crop_h = p.long, p.short
crop_w = min(crop_w, image.shape[1])
crop_h = min(crop_h, image.shape[0])
if p.mode == "center" and gt_bbox.shape[0] > 0:
# random select a box as cropping center
rand_index = np.random.randint(gt_bbox.shape[0])
box = gt_bbox[rand_index, :]
# decide start point
ctr_x = (box[2] + box[0]) / 2.0
ctr_y = (box[3] + box[1]) / 2.0
noise_h = np.random.randint(-10, 10)
noise_w = np.random.randint(-30, 30)
start_h = int(round(ctr_y - crop_h / 2)) + noise_h
start_w = int(round(ctr_x - crop_w / 2)) + noise_w
end_h = start_h + crop_h
end_w = start_w + crop_w
# prevent crop cross border
if start_h < 0:
off = -start_h
start_h += off
end_h += off
if start_w < 0:
off = -start_w
start_w += off
end_w += off
if end_h > image.shape[0]:
off = end_h - image.shape[0]
end_h -= off
start_h -= off
if end_w > image.shape[1]:
off = end_w - image.shape[1]
end_w -= off
start_w -= off
else:
# random crop from image
start_h = np.random.randint(0, image.shape[0] - crop_h + 1)
start_w = np.random.randint(0, image.shape[1] - crop_w + 1)
end_h = start_h + crop_h
end_w = start_w + crop_w
assert start_h >= 0 and start_w >= 0 and end_h <= image.shape[0] and end_w <= image.shape[1]
# crop then resize
im_cropped = image[start_h:end_h, start_w:end_w, :]
# transform ground truth
ctrs_x = (gt_bbox[:, 2] + gt_bbox[:, 0]) / 2.0
ctrs_y = (gt_bbox[:, 3] + gt_bbox[:, 1]) / 2.0
keep = np.where((ctrs_y > start_h) & (ctrs_x > start_w) & (ctrs_y < end_h) & (ctrs_x < end_w))
gt_bbox = gt_bbox[keep]
gt_bbox[:, [0, 2]] -= start_w
gt_bbox[:, [1, 3]] -= start_h
gt_bbox[:, [0, 2]] = np.clip(gt_bbox[:, [0, 2]], 0, crop_w - 1)
gt_bbox[:, [1, 3]] = np.clip(gt_bbox[:, [1, 3]], 0, crop_h - 1)
input_record["image"] = im_cropped
input_record["gt_bbox"] = gt_bbox
input_record["im_info"] = np.array([crop_h, crop_w, input_record["im_info"][2]], dtype=np.float32)
class Pad2DImageBbox(DetectionAugmentation):
"""
input: image, ndarray(h, w, rgb)
gt_bbox, ndarry(n, 5)
output: image, ndarray(h, w, rgb)
gt_bbox, ndarray(max_num_gt, 5)
"""
def __init__(self, pPad):
super().__init__()
self.p = pPad # type: PadParam
def apply(self, input_record):
p = self.p
image = input_record["image"]
gt_bbox = input_record["gt_bbox"]
h, w = image.shape[:2]
shape = (p.long, p.short, 3) if h >= w \
else (p.short, p.long, 3)
padded_image = np.zeros(shape, dtype=np.float32)
padded_image[:h, :w] = image
padded_gt_bbox = np.full(shape=(p.max_num_gt, 5), fill_value=-1, dtype=np.float32)
padded_gt_bbox[:len(gt_bbox)] = gt_bbox
input_record["image"] = padded_image
input_record["gt_bbox"] = padded_gt_bbox
class Pad2DImage(DetectionAugmentation):
"""
input: image, ndarray(h, w, rgb)
gt_bbox, ndarry(n, 5)
output: image, ndarray(h, w, rgb)
gt_bbox, ndarray(max_num_gt, 5)
"""
def __init__(self, pPad):
super().__init__()
self.p = pPad # type: PadParam
def apply(self, input_record):
p = self.p
image = input_record["image"]
h, w = image.shape[:2]
shape = (p.long, p.short, 3) if h >= w \
else (p.short, p.long, 3)
padded_image = np.zeros(shape, dtype=np.float32)
padded_image[:h, :w] = image
input_record["image"] = padded_image
class ConvertImageFromHwcToChw(DetectionAugmentation):
def __init__(self):
super().__init__()
def apply(self, input_record):
input_record["image"] = input_record["image"].transpose((2, 0, 1))
class AnchorTarget2D(DetectionAugmentation):
"""
input: image_meta: tuple(h, w, scale)
gt_bbox, ndarry(max_num_gt, 5)
output: anchor_label, ndarray(num_anchor * 2, h, w)
anchor_bbox_target, ndarray(num_anchor * 4, h, w)
anchor_bbox_weight, ndarray(num_anchor * 4, h, w)
"""
def __init__(self, pAnchor):
super().__init__()
self.p = pAnchor # type: AnchorTarget2DParam
self.__base_anchor = None
self.__v_all_anchor = None
self.__h_all_anchor = None
self.__num_anchor = None
self.DEBUG = False
@property
def base_anchor(self):
if self.__base_anchor is not None:
return self.__base_anchor
p = self.p
base_anchor = np.array([0, 0, p.generate.stride - 1, self.p.generate.stride - 1])
w = base_anchor[2] - base_anchor[0] + 1
h = base_anchor[3] - base_anchor[1] + 1
x_ctr = base_anchor[0] + 0.5 * (w - 1)
y_ctr = base_anchor[1] + 0.5 * (h - 1)
w_ratios = np.round(np.sqrt(w * h / p.generate.aspects))
h_ratios = np.round(w_ratios * p.generate.aspects)
ws = (np.outer(w_ratios, p.generate.scales)).reshape(-1)
hs = (np.outer(h_ratios, p.generate.scales)).reshape(-1)
base_anchor = np.stack(
[x_ctr - 0.5 * (ws - 1),
y_ctr - 0.5 * (hs - 1),
x_ctr + 0.5 * (ws - 1),
y_ctr + 0.5 * (hs - 1)],
axis=1)
self.__base_anchor = base_anchor
return self.__base_anchor
@property
def v_all_anchor(self):
if self.__v_all_anchor is not None:
return self.__v_all_anchor
p = self.p
shift_x = np.arange(0, p.generate.short, dtype=np.float32) * p.generate.stride
shift_y = np.arange(0, p.generate.long, dtype=np.float32) * p.generate.stride
grid_x, grid_y = np.meshgrid(shift_x, shift_y)
grid_x, grid_y = grid_x.reshape(-1), grid_y.reshape(-1)
grid = np.stack([grid_x, grid_y, grid_x, grid_y], axis=1)
all_anchor = grid[:, None, :] + self.base_anchor[None, :, :]
all_anchor = all_anchor.reshape(-1, 4)
self.__v_all_anchor = all_anchor
self.__num_anchor = all_anchor.shape[0]
return self.__v_all_anchor
@property
def h_all_anchor(self):
if self.__h_all_anchor is not None:
return self.__h_all_anchor
p = self.p
shift_x = np.arange(0, p.generate.long, dtype=np.float32) * p.generate.stride
shift_y = np.arange(0, p.generate.short, dtype=np.float32) * p.generate.stride
grid_x, grid_y = np.meshgrid(shift_x, shift_y)
grid_x, grid_y = grid_x.reshape(-1), grid_y.reshape(-1)
grid = np.stack([grid_x, grid_y, grid_x, grid_y], axis=1)
all_anchor = grid[:, None, :] + self.base_anchor[None, :, :]
all_anchor = all_anchor.reshape(-1, 4)
self.__h_all_anchor = all_anchor
self.__num_anchor = all_anchor.shape[0]
return self.__h_all_anchor
@v_all_anchor.setter
def v_all_anchor(self, value):
self.__v_all_anchor = value
self.__num_anchor = value.shape[0]
@h_all_anchor.setter
def h_all_anchor(self, value):
self.__h_all_anchor = value
self.__num_anchor = value.shape[0]
def _assign_label_to_anchor(self, valid_anchor, gt_bbox, neg_thr, pos_thr, min_pos_thr):
num_anchor = valid_anchor.shape[0]
cls_label = np.full(shape=(num_anchor,), fill_value=-1, dtype=np.float32)
if len(gt_bbox) > 0:
# num_anchor x num_gt
overlaps = bbox_overlaps_cython(valid_anchor.astype(np.float32, copy=False), gt_bbox.astype(np.float32, copy=False))
max_overlaps = overlaps.max(axis=1)
argmax_overlaps = overlaps.argmax(axis=1)
gt_max_overlaps = overlaps.max(axis=0)
# TODO: speed up this
# TODO: fix potentially assigning wrong anchors as positive
# A correct implementation is given as
# gt_argmax_overlaps = np.where((overlaps.transpose() == gt_max_overlaps[:, None]) &
# (overlaps.transpose() >= min_pos_thr))[1]
gt_argmax_overlaps = np.where((overlaps == gt_max_overlaps) &
(overlaps >= min_pos_thr))[0]
# anchor class
cls_label[max_overlaps < neg_thr] = 0
# fg label: for each gt, anchor with highest overlap
cls_label[gt_argmax_overlaps] = 1
# fg label: above threshold IoU
cls_label[max_overlaps >= pos_thr] = 1
else:
cls_label[:] = 0
argmax_overlaps = np.zeros(shape=(num_anchor, ))
return cls_label, argmax_overlaps
def _sample_anchor(self, label, num, fg_fraction):
num_fg = int(fg_fraction * num)
fg_inds = np.where(label == 1)[0]
if len(fg_inds) > num_fg:
disable_inds = np.random.choice(fg_inds, size=(len(fg_inds) - num_fg), replace=False)
if self.DEBUG:
disable_inds = fg_inds[:(len(fg_inds) - num_fg)]
label[disable_inds] = -1
num_bg = num - np.sum(label == 1)
bg_inds = np.where(label == 0)[0]
if len(bg_inds) > num_bg:
disable_inds = np.random.choice(bg_inds, size=(len(bg_inds) - num_bg), replace=False)
if self.DEBUG:
disable_inds = bg_inds[:(len(bg_inds) - num_bg)]
label[disable_inds] = -1
def _cal_anchor_target(self, label, valid_anchor, gt_bbox, anchor_label):
num_anchor = valid_anchor.shape[0]
reg_target = np.zeros(shape=(num_anchor, 4), dtype=np.float32)
reg_weight = np.zeros(shape=(num_anchor, 4), dtype=np.float32)
fg_index = np.where(label == 1)[0]
if len(fg_index) > 0:
reg_target[fg_index] = bbox_transform(valid_anchor[fg_index], gt_bbox[anchor_label[fg_index], :4])
reg_weight[fg_index, :] = 1.0
return reg_target, reg_weight
def _gather_valid_anchor(self, image_info):
h, w = image_info[:2]
all_anchor = self.v_all_anchor if h >= w else self.h_all_anchor
allowed_border = self.p.assign.allowed_border
valid_index = np.where((all_anchor[:, 0] >= -allowed_border) &
(all_anchor[:, 1] >= -allowed_border) &
(all_anchor[:, 2] < w + allowed_border) &
(all_anchor[:, 3] < h + allowed_border))[0]
return valid_index, all_anchor[valid_index]
def _scatter_valid_anchor(self, valid_index, cls_label, reg_target, reg_weight):
num_anchor = self.__num_anchor
all_cls_label = np.full(shape=(num_anchor,), fill_value=-1, dtype=np.float32)
all_reg_target = np.zeros(shape=(num_anchor, 4), dtype=np.float32)
all_reg_weight = np.zeros(shape=(num_anchor, 4), dtype=np.float32)
all_cls_label[valid_index] = cls_label
all_reg_target[valid_index] = reg_target
all_reg_weight[valid_index] = reg_weight
return all_cls_label, all_reg_target, all_reg_weight
def apply(self, input_record):
p = self.p
im_info = input_record["im_info"]
gt_bbox = input_record["gt_bbox"]
assert isinstance(gt_bbox, np.ndarray)
assert gt_bbox.dtype == np.float32
valid = np.where(gt_bbox[:, 0] != -1)[0]
gt_bbox = gt_bbox[valid]
if gt_bbox.shape[1] == 5:
gt_bbox = gt_bbox[:, :4]
valid_index, valid_anchor = self._gather_valid_anchor(im_info)
cls_label, anchor_label = \
self._assign_label_to_anchor(valid_anchor, gt_bbox,
p.assign.neg_thr, p.assign.pos_thr, p.assign.min_pos_thr)
self._sample_anchor(cls_label, p.sample.image_anchor, p.sample.pos_fraction)
reg_target, reg_weight = self._cal_anchor_target(cls_label, valid_anchor, gt_bbox, anchor_label)
cls_label, reg_target, reg_weight = \
self._scatter_valid_anchor(valid_index, cls_label, reg_target, reg_weight)
h, w = im_info[:2]
if h >= w:
fh, fw = p.generate.long, p.generate.short
else:
fh, fw = p.generate.short, p.generate.long
input_record["rpn_cls_label"] = cls_label.reshape((fh, fw, -1)).transpose(2, 0, 1).reshape(-1)
input_record["rpn_reg_target"] = reg_target.reshape((fh, fw, -1)).transpose(2, 0, 1)
input_record["rpn_reg_weight"] = reg_weight.reshape((fh, fw, -1)).transpose(2, 0, 1)
return input_record["rpn_cls_label"], \
input_record["rpn_reg_target"], \
input_record["rpn_reg_weight"]
class RenameRecord(DetectionAugmentation):
def __init__(self, mapping):
super().__init__()
self.mapping = mapping
def apply(self, input_record):
for k, new_k in self.mapping.items():
input_record[new_k] = input_record[k]
del input_record[k]
class Loader(mx.io.DataIter):
"""
Loader is now a 3-thread design,
Loader.next is called in the main thread,
multiple worker threads are responsible for performing transform,
a collector thread is responsible for converting numpy array to mxnet array.
"""
def __init__(self, roidb, transform, data_name, label_name, batch_size=1,
shuffle=False, num_worker=None, num_collector=None,
worker_queue_depth=None, collector_queue_depth=None, kv=None, valid_count=-1):
"""
This Iter will provide roi data to Fast R-CNN network
:param roidb: must be preprocessed
:param batch_size:
:param shuffle: bool
:return: Loader
"""
super().__init__(batch_size=batch_size)
if kv:
(self.rank, self.num_worker) = (kv.rank, kv.num_workers)
else:
(self.rank, self.num_worker) = (0, 1)
# data processing utilities
if isinstance(transform, dict):
self.transform = transform["sample"]
self.batch_transform = transform["batch"]
else:
self.transform = transform
self.batch_transform = list()
# save parameters as properties
self.roidb = roidb
self.shuffle = shuffle
# infer properties from roidb
self.total_index = np.arange(len(roidb))
self.valid_count = valid_count if valid_count != -1 else len(roidb)
# decide data and label names
self.data_name = data_name
self.label_name = label_name
# status variable for synchronization between get_data and get_label
self._cur = 0
self.data = None
self.label = None
self.debug = False
self.result = None
# multi-thread settings
self.num_worker = num_worker
self.num_collector = num_collector
self.index_queue = Queue()
self.data_queue = Queue(maxsize=worker_queue_depth)
self.result_queue = Queue(maxsize=collector_queue_depth)
self.workers = None
self.collectors = None
# get first batch to fill in provide_data and provide_label
self._thread_start()
self.load_first_batch()
self.reset()
@property
def index(self):
return self.total_index[:self.valid_count]
@property
def total_record(self):
return len(self.index) // self.batch_size * self.batch_size
@property
def provide_data(self):
return [(k, v.shape) for k, v in zip(self.data_name, self.data)]
@property
def provide_label(self):
return [(k, v.shape) for k, v in zip(self.label_name, self.label)]
def _insert_queue(self):
for i in range(0, len(self.index), self.batch_size):
batch_index = self.index[i:i + self.batch_size]
if len(batch_index) == self.batch_size:
self.index_queue.put(batch_index)
def _thread_start(self):
self.workers = \
[Thread(target=self.worker, args=[self.roidb, self.index_queue, self.data_queue])
for _ in range(self.num_worker)]
for worker in self.workers:
worker.daemon = True
worker.start()
self.collectors = [Thread(target=self.collector, args=[]) for _ in range(self.num_collector)]
for c in self.collectors:
c.daemon = True
c.start()
def reset(self):
self._cur = 0
if self.shuffle:
np.random.shuffle(self.total_index)
self._insert_queue()
def iter_next(self):
return self._cur + self.batch_size <= len(self.index)
def load_first_batch(self):
self.index_queue.put(range(self.batch_size))
self.next()
def load_batch(self):
self._cur += self.batch_size
result = self.result_queue.get()
return result
def next(self):
if self.debug and self.result is not None:
return self.result
if self.iter_next():
# print("[worker] %d" % self.data_queue.qsize())
# print("[collector] %d" % self.result_queue.qsize())
result = self.load_batch()
self.data = result.data
self.label = result.label
self.result = result
return result
else:
raise StopIteration
def worker(self, roidb, index_queue, data_queue):
while True:
batch_index = index_queue.get()
records = []
for index in batch_index:
roi_record = roidb[index].copy()
for trans in self.transform:
trans.apply(roi_record)
records.append(roi_record)
data_batch = {}
for name in self.data_name + self.label_name:
data_batch[name] = np.ascontiguousarray(np.stack([r[name] for r in records]))
for trans in self.batch_transform:
trans.apply(data_batch)
data_queue.put(data_batch)
def collector(self):
while True:
record = self.data_queue.get()
data = [mx.nd.from_numpy(record[name], zero_copy=True) for name in self.data_name]
label = [mx.nd.from_numpy(record[name], zero_copy=True) for name in self.label_name]
provide_data = [(k, v.shape) for k, v in zip(self.data_name, data)]
provide_label = [(k, v.shape) for k, v in zip(self.label_name, label)]
data_batch = mx.io.DataBatch(data=data,
label=label,
provide_data=provide_data,
provide_label=provide_label)
self.result_queue.put(data_batch)
class SequentialLoader(mx.io.DataIter):
def __init__(self, iters):
super().__init__()
self.iters = iters
self.exhausted = [False] * len(iters)
def __getattr__(self, attr):
# delegate unknown keys to underlying iterators
first_non_empty_idx = self.exhausted.index(False)
first_non_empty_iter = self.iters[first_non_empty_idx]
return getattr(first_non_empty_iter, attr)
def next(self):
while True:
if all(self.exhausted):
raise StopIteration
first_non_empty_idx = self.exhausted.index(False)
first_non_empty_iter = self.iters[first_non_empty_idx]
try:
result = first_non_empty_iter.next()
return result
except StopIteration:
self.exhausted[first_non_empty_idx] = True
def reset(self):
for it in self.iters:
it.reset()
self.exhausted = [False] * len(self.iters)
@property
def provide_data(self):
return self.iters[0].provide_data
@property
def provide_label(self):
return self.iters[0].provide_label
class AnchorLoader(mx.io.DataIter):
def __init__(self, roidb, transform, data_name, label_name, batch_size=1,
shuffle=False, num_worker=12, num_collector=4, worker_queue_depth=4,
collector_queue_depth=4, kv=None):
super().__init__(batch_size=batch_size)
v_roidb, h_roidb = self.roidb_aspect_group(roidb)
if kv:
rank, num_rank = kv.rank, kv.num_workers
else:
rank, num_rank = 0, 1
if num_rank > 1:
v_part = len(v_roidb) // num_rank
v_remain = len(v_roidb) % num_rank
v_roidb_part = v_roidb[rank * v_part:(rank + 1) * v_part]
v_valid_count = len(v_roidb_part)
v_roidb_part += v_roidb[-v_remain:][rank:rank+1]
h_part = len(h_roidb) // num_rank
h_remain = len(h_roidb) % num_rank
h_roidb_part = h_roidb[rank * h_part:(rank + 1) * h_part]
h_valid_count = len(h_roidb_part)
h_roidb_part += h_roidb[-h_remain:][rank:rank+1]
else:
v_roidb_part = v_roidb
v_valid_count = len(v_roidb)
h_roidb_part = h_roidb
h_valid_count = len(h_roidb)
loaders = []
if len(h_roidb_part) >= batch_size:
h_loader = Loader(roidb=h_roidb_part,
valid_count=h_valid_count,
transform=transform,
data_name=data_name,
label_name=label_name,
batch_size=batch_size,
shuffle=shuffle,
num_worker=num_worker,
num_collector=num_collector,
worker_queue_depth=worker_queue_depth,
collector_queue_depth=collector_queue_depth,
kv=kv)
loaders.append(h_loader)
if len(v_roidb_part) >= batch_size:
v_loader = Loader(roidb=v_roidb_part,
valid_count=v_valid_count,
transform=transform,
data_name=data_name,
label_name=label_name,
batch_size=batch_size,
shuffle=shuffle,
num_worker=num_worker,
num_collector=num_collector,
worker_queue_depth=worker_queue_depth,
collector_queue_depth=collector_queue_depth,
kv=kv)
loaders.append(v_loader)
assert len(loaders) > 0, "at least one loader should be constructed"
self.__loader = SequentialLoader(loaders)
@property
def total_record(self):
return sum([it.total_record for it in self.__loader.iters])
def __len__(self):
return self.total_record
def __getattr__(self, attr):
# delegate unknown keys to underlying iterators
return getattr(self.__loader, attr)
def next(self):
return self.__loader.next()
def reset(self):
return self.__loader.reset()
@staticmethod
def roidb_aspect_group(roidb):
v_roidb, h_roidb = [], []
for roirec in roidb:
if roirec["h"] >= roirec["w"]:
v_roidb.append(roirec)
else:
h_roidb.append(roirec)
return v_roidb, h_roidb
def visualize_anchor_loader(batch_data):
image = batch_data.data[0][0].asnumpy().astype(np.uint8).transpose((1, 2, 0)).copy()
gt_bbox = batch_data.data[2][0].asnumpy().astype(np.int32)
for box in gt_bbox:
cv2.rectangle(image, tuple(box[:2]), tuple(box[2:4]), color=(0, 255, 0))
cv2.imshow("image", image)
cv2.waitKey()
def visualize_anchor_loader_old(batch_data):
image = batch_data.data[0][0].asnumpy().astype(np.uint8).transpose((1, 2, 0)).copy()
gt_bbox = batch_data.label[3][0].asnumpy().astype(np.int32)
for box in gt_bbox:
cv2.rectangle(image, tuple(box[:2]), tuple(box[2:4]), color=(0, 255, 0))
cv2.imshow("image", image)
cv2.waitKey()
def visualize_original_input(roirec):
image = cv2.imread(roirec["image_url"], cv2.IMREAD_COLOR)
gt_bbox = roirec["gt_bbox"]
for box in gt_bbox:
cv2.rectangle(image, tuple(box[:2]), tuple(box[2:4]), color=(0, 255, 0))
cv2.imshow("image", image)
cv2.waitKey()
|
n1ql_fts_integration_phase2.py
|
from .tuq import QueryTests
from membase.api.exception import CBQError
from lib.membase.api.rest_client import RestConnection
from pytests.fts.fts_base import CouchbaseCluster
from remote.remote_util import RemoteMachineShellConnection
import json
from pytests.security.rbac_base import RbacBase
from lib.remote.remote_util import RemoteMachineShellConnection
import threading
class N1qlFTSIntegrationPhase2Test(QueryTests):
users = {}
def suite_setUp(self):
super(N1qlFTSIntegrationPhase2Test, self).suite_setUp()
def setUp(self):
super(N1qlFTSIntegrationPhase2Test, self).setUp()
self._load_test_buckets()
self.log.info("============== N1qlFTSIntegrationPhase2Test setup has started ==============")
self.log_config_info()
self.log.info("============== N1qlFTSIntegrationPhase2Test setup has completed ==============")
def tearDown(self):
self.log.info("============== N1qlFTSIntegrationPhase2Test tearDown has started ==============")
self.log_config_info()
self.log.info("============== N1qlFTSIntegrationPhase2Test tearDown has completed ==============")
super(N1qlFTSIntegrationPhase2Test, self).tearDown()
if self.get_bucket_from_name("beer-sample"):
self.delete_bucket("beer-sample")
def suite_tearDown(self):
self.log.info("============== N1qlFTSIntegrationPhase2Test suite_tearDown has started ==============")
self.log_config_info()
self.log.info("============== N1qlFTSIntegrationPhase2Test suite_tearDown has completed ==============")
super(N1qlFTSIntegrationPhase2Test, self).suite_tearDown()
# ======================== tests =====================================================
def test_keyspace_alias_single_bucket(self):
self.cbcluster = CouchbaseCluster(name='cluster', nodes=self.servers, log=self.log)
self._create_fts_index(index_name='idx_beer_sample_fts', doc_count=7303, source_name='beer-sample')
bucket_names = {
"no_alias": "`beer-sample`",
"alias": "`beer-sample` t",
"as_alias": "`beer-sample` as t"
}
test_name = self.input.param("test_name", '')
bucket_name = bucket_names[test_name]
aliases = []
if bucket_name == "`beer-sample`":
aliases = ["", "`beer-sample`"]
else:
aliases = ["t"]
try:
for alias in aliases:
dot = ""
if alias!="":
dot="."
fts_query = "select "+str(alias)+str(dot)+"code, "+str(alias)+str(dot)+"state from "+str(bucket_name)+" " \
"where "+str(alias)+str(dot)+"type='brewery' and SEARCH("+alias+dot+"state, {'query':{'field': 'state', 'match': 'California'}, 'size': 10000}) order by "+str(alias)+str(dot)+"code"
n1ql_query = "select code, state from `beer-sample` where type='brewery' and state like '%California%' order by code"
fts_results = self.run_cbq_query(fts_query)['results']
n1ql_results = self.run_cbq_query(n1ql_query)['results']
self.assertEqual(fts_results, n1ql_results, "Incorrect query : "+str(fts_query))
finally:
self._remove_all_fts_indexes()
def test_keyspace_alias_two_buckets(self):
#TODO:
#Add tests for 2 SEARCH() calls
#Size 20 limit 10
#Index UUID
#Covering-non covering gsi, at least 2 fields
#option - index can be specified in 2 wais: string, object.
#2 key spaces in select, no specs in search() - shopuld fail with appropriate error message.
test_name = self.input.param("test_name", '')
if test_name == '':
raise Exception("Invalid test configuration! Test name should not be empty.")
self.cbcluster = CouchbaseCluster(name='cluster', nodes=self.servers, log=self.log)
test_cases = {
'test_t1_t2_1': {"bucket1_alias": "t1", "bucket2_alias": "t2", "keyspace_param": "t1"},
'test_t1_t2_2': {"bucket1_alias": "t1", "bucket2_alias": "t2", "keyspace_param": "`t1`"},
'test_t1_t2_3': {"bucket1_alias": "t1", "bucket2_alias": "t2", "keyspace_param": "`t1`.state"},
'test_t1_t2_4': {"bucket1_alias": "t1", "bucket2_alias": "t2", "keyspace_param": "t1.`state`"},
'test_t1_t2_5': {"bucket1_alias": "t1", "bucket2_alias": "t2", "keyspace_param": "`t1`.`state`"},
'test_ast1_t2_1': {"bucket1_alias": "as t1", "bucket2_alias": "t2", "keyspace_param": "t1"},
'test_ast1_t2_2': {"bucket1_alias": "as t1", "bucket2_alias": "t2", "keyspace_param": "`t1`"},
'test_ast1_t2_3': {"bucket1_alias": "as t1", "bucket2_alias": "t2", "keyspace_param": "`t1`.state"},
'test_ast1_t2_4': {"bucket1_alias": "as t1", "bucket2_alias": "t2", "keyspace_param": "t1.`state`"},
'test_ast1_t2_5': {"bucket1_alias": "as t1", "bucket2_alias": "t2", "keyspace_param": "`t1`.`state`"},
'test_t1_ast2_1': {"bucket1_alias": "t1", "bucket2_alias": "as t2", "keyspace_param": "t1"},
'test_t1_ast2_2': {"bucket1_alias": "t1", "bucket2_alias": "as t2", "keyspace_param": "`t1`"},
'test_t1_ast2_3': {"bucket1_alias": "t1", "bucket2_alias": "as t2", "keyspace_param": "`t1`.state"},
'test_t1_ast2_4': {"bucket1_alias": "t1", "bucket2_alias": "as t2", "keyspace_param": "t1.`state`"},
'test_t1_ast2_5': {"bucket1_alias": "t1", "bucket2_alias": "as t2", "keyspace_param": "`t1`.`state`"},
'test_ast1_ast2_1': {"bucket1_alias": "as t1", "bucket2_alias": "as t2", "keyspace_param": "t1"},
'test_ast1_ast2_2': {"bucket1_alias": "as t1", "bucket2_alias": "as t2", "keyspace_param": "`t1`"},
'test_ast1_ast2_3': {"bucket1_alias": "as t1", "bucket2_alias": "as t2", "keyspace_param": "`t1`.state"},
'test_ast1_ast2_4': {"bucket1_alias": "as t1", "bucket2_alias": "as t2", "keyspace_param": "t1.`state`"},
'test_ast1_ast2_5': {"bucket1_alias": "as t1", "bucket2_alias": "as t2", "keyspace_param": "`t1`.`state`"},
}
self._create_fts_index(index_name='idx_beer_sample_fts', doc_count=7303, source_name='beer-sample')
bucket1_alias = test_cases[test_name]["bucket1_alias"]
bucket2_alias = test_cases[test_name]["bucket2_alias"]
keyspace_alias = test_cases[test_name]['keyspace_param']
if not self.is_index_present("beer-sample", "idx_brewery_id"):
self.run_cbq_query("create index idx_brewery_id on `beer-sample`(brewery_id)")
if not self.is_index_present("beer-sample", "idx_type"):
self.run_cbq_query("create index idx_type on `beer-sample`(type)")
if not self.is_index_present("beer-sample", "idx_code"):
self.run_cbq_query("create index idx_code on `beer-sample`(code)")
self.wait_for_all_indexes_online()
fts_query = "select t1.code, t1.state, t1.city, t2.name from `beer-sample` "+bucket1_alias+ \
" inner join `beer-sample` "+bucket2_alias+" on t1.code=t2.brewery_id where t1.type='brewery' and t2.type='beer' " \
"and SEARCH("+keyspace_alias+", 'state:California') order by t1.code, t2.name"
n1ql_query = "select t1.code, t1.state, t1.city, t2.name from `beer-sample` t1 inner join " \
"`beer-sample` t2 on t1.code=t2.brewery_id where t1.type='brewery' " \
" and t2.type='beer' and t1.state like '%California%' order by t1.code, t2.name"
fts_results = None
n1ql_results = None
try:
fts_results = self.run_cbq_query(fts_query)['results']
n1ql_results = self.run_cbq_query(n1ql_query)['results']
except CBQError as err:
self._remove_all_fts_indexes()
raise Exception("Query: "+fts_query+" is failed.")
self._remove_all_fts_indexes()
self.drop_index_safe('beer-sample', 'idx_brewery_id')
self.drop_index_safe('beer-sample', 'idx_type')
self.drop_index_safe('beer-sample', 'idx_code')
self.assertEqual(fts_results, n1ql_results, "Incorrect query : "+str(fts_query))
def test_keyspace_alias_two_buckets_negative(self):
test_name = self.input.param("test_name", '')
if test_name == '':
raise Exception("Invalid test configuration! Test name should not be empty.")
self.cbcluster = CouchbaseCluster(name='cluster', nodes=self.servers, log=self.log)
test_cases = {
"test1": {"bucket_name": "`beer-sample` t1", "search_alias": "`state`"},
"test2": {"bucket_name": "`beer-sample` t1", "search_alias": "state"},
"test3": {"bucket_name": "`beer-sample` as t1", "search_alias": "state"},
"test4": {"bucket_name": "`beer-sample` as t1", "search_alias": "`state`"},
}
self._create_fts_index(index_name='idx_beer_sample_fts', doc_count=7303, source_name='beer-sample')
bucket_name = test_cases[test_name]["bucket_name"]
search_alias = test_cases[test_name]["search_alias"]
fts_query = "select t1.code, t1.state, t1.city, t2.name from "+bucket_name+" inner join `beer-sample` t2 on t1.code=t2.brewery_id " \
"where t1.type='brewery' and t2.type='beer' and SEARCH("+search_alias+", 'state:California') order by t2.name"
try:
self.run_cbq_query(fts_query)
except CBQError as err:
self._remove_all_fts_indexes()
self.assertTrue("Ambiguous reference to field" in str(err), "Unexpected error message is found - "+str(err))
self._remove_all_fts_indexes()
def test_keyspace_alias_1_bucket_negative(self):
test_name = self.input.param("test_name", '')
if test_name == '':
raise Exception("Invalid test configuration! Test name should not be empty.")
self.cbcluster = CouchbaseCluster(name='cluster', nodes=self.servers, log=self.log)
test_cases = {
"star": "t.state[*]",
"object_values": "OBJECT_VALUES(t.state)",
"array": "t.[state]"
}
search_alias = test_cases[test_name]
fts_query = "select t.code, t.state from `beer-sample` t where t.type='brewery' and SEARCH("+search_alias+", 'France') order by t.code"
try:
self.run_cbq_query(fts_query)
except CBQError as ex:
self._remove_all_fts_indexes()
if test_name in ['star', 'object_values', 'array']:
self.assertTrue("SEARCH() function operands are invalid." in str(ex),
"Unexpected error message is found - " + str(ex))
else:
self.assertTrue("Ambiguous reference to field" in str(ex), "Unexpected error message is found - "+str(ex))
self._remove_all_fts_indexes()
def test_search_options_index_name(self):
test_name = self.input.param("test_name", '')
if test_name == '':
raise Exception("Invalid test configuration! Test name should not be empty.")
test_cases = {
"index_not_exists": {
"expected_result": "success",
"index_in_explain": "beer_primary"
},
"single_fts_index": {
"expected_result": "success",
"index_in_explain": "idx_beer_sample_fts"
},
"two_fts_indexes": {
"expected_result": "success",
"index_in_explain": "idx_beer_sample_fts"
},
"fts_index_is_not_optimal": {
"expected_result": "success",
"index_in_explain": "idx_beer_sample_fts"
}
}
self.cbcluster = CouchbaseCluster(name='cluster', nodes=self.servers, log=self.log)
fts_query = "select meta().id from `beer-sample` where search(`beer-sample`, {\"field\": \"state\", \"match\":\"California\"}, {\"index\":\"idx_beer_sample_fts\"})"
self._create_fts_index(index_name='idx_beer_sample_fts', doc_count=7303, source_name='beer-sample')
if test_name == "index_not_exists":
self._delete_fts_index(index_name="idx_beer_sample_fts")
else:
if test_name == "two_fts_indexes":
self._create_fts_index(index_name='idx_beer_sample_fts_1', doc_count=7303, source_name='beer-sample')
elif test_name == "fts_index_is_not_optimal":
more_suitable_index = self._create_fts_index(index_name='idx_beer_sample_fts_name', doc_count=7303,
source_name='beer-sample')
more_suitable_index.add_child_field_to_default_mapping(field_name="name", field_type="text")
more_suitable_index.index_definition['uuid'] = more_suitable_index.get_uuid()
more_suitable_index.update()
if test_cases[test_name]["expected_result"] == "fail":
result = self.run_cbq_query(fts_query)
self.assertEqual(result['status'], "errors", "Running SEARCH() query without fts index is successful. Should be failed.")
elif test_cases[test_name]["expected_result"] == "success":
result = self.run_cbq_query("explain " + fts_query)
self._remove_all_fts_indexes()
self.assertEqual(result['results'][0]['plan']['~children'][0]['index'], test_cases[test_name]["index_in_explain"])
self._remove_all_fts_indexes()
# 10 results problem
def test_search_options(self):
#todo: have more than one search() call, play with search_meta() and search_score()
test_name = self.input.param("test_name", '')
if test_name == '':
raise Exception("Invalid test configuration! Test name should not be empty.")
self.cbcluster = CouchbaseCluster(name='cluster', nodes=self.servers, log=self.log)
rest = self.get_rest_client(self.servers[0].rest_username, self.servers[0].rest_password)
test_cases = {
# 10 results
"explain": ["true", "false"],
# 10 results
"fields": ["[\"*\"]", "[\"name\"]"],
# 10 results
"highlight": ["{\"style\":\"html\", \"fields\":[\"*\"]}", "{\"style\":\"html\", \"fields\":[\"name\"]}", "{\"style\":\"ansi\", \"fields\":[\"name\"]}", "{\"style\":\"ansi\", \"fields\":[\"*\"]}"],
# 10 results
"analyzer": ["{\"match\": \"California\", \"field\": \"state\", \"analyzer\": \"standard\"}", "{\"match\": \"California\", \"field\": \"state\", \"analyzer\": \"html\"}"],
# MB-34005
"size": [10, 100],
# 10 results
"sort": ["[{\"by\": \"field\", \"field\": \"name\", \"mode\":\"max\", \"missing\": \"last\"}]"],
}
for option_val in test_cases[test_name]:
self._create_fts_index(index_name='idx_beer_sample_fts', doc_count=7303, source_name='beer-sample')
n1ql_query = "select meta().id from `beer-sample` where search(`beer-sample`, {\"query\":{\"field\": \"state\", \"match\":\"California\"}, \"size\": 10000, \"sort\": [\"_id\"]}, {\""+test_name+"\": "+str(option_val)+"})"
if test_name == "size":
n1ql_query = "select meta().id from `beer-sample` where search(`beer-sample`, {\"query\":{\"field\": \"state\", \"match\":\"California\"}, \"sort\": [\"_id\"], \""+test_name+"\":"+ str(option_val)+"})"
if test_name == 'size':
fts_request_str = "{\"query\":{\"field\": \"state\", \"match\":\"California\"}, \"size\":"+str(option_val)+ ", \"sort\": [\"_id\"]}"
else:
fts_request_str = "{\"query\":{\"field\": \"state\", \"match\":\"California\"}, \"sort\": [\"_id\"], \"size\":10000, \""+test_name+"\":"+str(option_val)+"}"
fts_request = json.loads(fts_request_str)
n1ql_results = self.run_cbq_query(n1ql_query)['results']
total_hits, hits, took, status = \
rest.run_fts_query(index_name="idx_beer_sample_fts",
query_json=fts_request)
comparison_result = self._compare_n1ql_results_against_fts(n1ql_results, hits)
self._remove_all_fts_indexes()
comparison_result = self._compare_n1ql_results_against_fts(n1ql_results, hits)
self.assertEqual(comparison_result, "OK", comparison_result)
def test_use_index_hint(self):
test_name = self.input.param("test_name", '')
if test_name == '':
raise Exception("Invalid test configuration! Test name should not be empty.")
self.cbcluster = CouchbaseCluster(name='cluster', nodes=self.servers, log=self.log)
test_cases = {
"fts_index_exists": {
"hint_content" : "idx_beer_sample_fts USING FTS",
"expected_result" : "positive",
"options_content": ""
},
"fts_index_does_not_exist": {
"hint_content" : "idx_beer_sample_fts_fake USING FTS",
"expected_result" : "negative",
"options_content": ""
},
"fts_index_busy": {
"hint_content" : "idx_beer_sample_fts USING FTS",
"expected_result" : "positive",
"options_content": ""
},
"fts_gsi_indexes_use": {
"hint_content" : "idx_beer_sample_fts USING FTS, beer_primary using GSI",
"expected_result" : "positive",
"options_content": ""
},
"same_hint_options": {
"hint_content" : "idx_beer_sample_fts USING FTS",
"expected_result" : "positive",
"options_content" : ", {\"index\":\"idx_beer_sample_fts\"}"
},
"not_same_hint_options": {
"hint_content" : "idx_beer_sample_fts USING FTS",
"expected_result" : "positive",
"options_content" : ", {\"index\":\"idx_beer_sample_fts_1\"}"
},
"hint_good_options_bad": {
"hint_content": "idx_beer_sample_fts USING FTS",
"expected_result": "negative",
"options_content": ", {\"index\":\"idx_beer_sample_fts_fake\"}"
},
"hint_bad_options_good": {
"hint_content": "idx_beer_sample_fts_fake USING FTS",
"expected_result": "negative",
"options_content": ", {\"index\":\"idx_beer_sample_fts\"}"
},
"hint_bad_options_bad": {
"hint_content": "idx_beer_sample_fts_fake USING FTS",
"expected_result": "negative",
"options_content": ", {\"index\":\"idx_beer_sample_fts_fake\"}"
},
}
try:
test_results = {}
test_passed = True
negatives_expected = 0
negatives_found = 0
self._create_fts_index(index_name="idx_beer_sample_fts", doc_count=7303, source_name='beer-sample')
if test_name == "not_same_hint_options":
self._create_fts_index(index_name='idx_beer_sample_fts_1', doc_count=7303, source_name='beer-sample')
test_case_dict = test_cases[test_name]
options_content = test_case_dict['options_content']
if test_case_dict['expected_result'] == "negative":
negatives_expected = 1
n1ql_query = "select meta().id from `beer-sample` USE INDEX ("+test_case_dict['hint_content']+") " \
"where search(`beer-sample`, {\"field\": \"state\", \"match\":\"California\"}"+options_content+")"
try:
n1ql_explain_query = "explain " + n1ql_query
self.run_cbq_query(n1ql_query)
result = self.run_cbq_query(n1ql_explain_query)
if test_name == "not_same_hint_options":
self.assertTrue(result['results'][0]['plan']['~children'][0]['index'] in ["idx_beer_sample_fts", "idx_beer_sample_fts_1"])
else:
self.assertEqual(result['results'][0]['plan']['~children'][0]['index'], "idx_beer_sample_fts")
except CBQError as e:
negatives_found = 1
test_passed = False
test_results[test_name] = test_passed
if test_name == "not_same_hint_options":
self._delete_fts_index(index_name='idx_beer_sample_fts_1')
finally:
self._remove_all_fts_indexes()
self.assertEqual(negatives_found, negatives_expected, "Some test case results differ from expected.")
def test_index_selection(self):
# gsi indexes - primary, secondary - field, seconadary - field,field
# fts indexes - default, field, type->field
test_name = self.input.param("test_name", '')
if test_name == '':
raise Exception("Invalid test configuration! Test name should not be empty.")
self.cbcluster = CouchbaseCluster(name='cluster', nodes=self.servers, log=self.log)
self._create_fts_index(index_name="idx_beer_sample_fts", doc_count=7303, source_name='beer-sample')
if not self.is_index_present("beer-sample", "idx_state"):
self.run_cbq_query("create index idx_state on `beer-sample`(state)")
if not self.is_index_present("beer-sample", "idx_state_city"):
self.run_cbq_query("create index idx_state_city on `beer-sample`(state, city)")
self.wait_for_all_indexes_online()
more_suitable_fts_index = self._create_fts_index(index_name='idx_beer_sample_fts_name', doc_count=7303, source_name='beer-sample')
more_suitable_fts_index.add_child_field_to_default_mapping(field_name="state", field_type="text")
more_suitable_fts_index.index_definition['uuid'] = more_suitable_fts_index.get_uuid()
more_suitable_fts_index.update()
test_cases = {
# index specified in SEARCH() or in USE INDEX hint must be used
"use_index_fts":{
"query": "explain select meta().id from `beer-sample` USE INDEX (idx_beer_sample_fts using fts) where search(`beer-sample`, {\"field\": \"state\", \"match\":\"California\"})",
"index": "idx_beer_sample_fts"
},
# MB-33677
"use_index_gsi": {
"query": "explain select meta().id from `beer-sample` USE INDEX (idx_state_city using gsi) where search(`beer-sample`, {\"field\": \"state\", \"match\":\"California\"}) and state='California'",
"index": "idx_state_city"
},
"search_hint":{
"query": "explain select meta().id from `beer-sample` where search(`beer-sample`, {\"field\": \"state\", \"match\":\"California\"}, {\"index\":\"idx_beer_sample_fts\"})",
"index": "idx_beer_sample_fts"
},
"shortest_fts": {
"query": "explain select meta().id from `beer-sample` where search(`beer-sample`, {\"field\": \"state\", \"match\":\"California\"})",
"index": "idx_beer_sample_fts_name"
},
"shortest_gsi":{
"query": "explain select meta().id from `beer-sample` where search(`beer-sample`, {\"field\": \"name\", \"match\":\"California\"})",
"index": "idx_name"
},
"primary_gsi":{
"query": "explain select meta().id from `beer-sample` where search(`beer-sample`, {\"field\": \"category\", \"match\":\"British\"})",
"index": "PrimaryScan3"
}
}
if test_name == "shortest_gsi":
self._delete_fts_index("idx_beer_sample_fts")
if not self.is_index_present("beer-sample", "idx_name"):
self.run_cbq_query("create index idx_name on `beer-sample`(name)")
if not self.is_index_present("beer-sample", "idx_state_name"):
self.run_cbq_query("create index idx_state_name on `beer-sample`(state, name)")
self.wait_for_all_indexes_online()
if test_name == "primary_gsi":
self._delete_fts_index("idx_beer_sample_fts")
n1ql_query = test_cases[test_name]["query"]
result = self.run_cbq_query(n1ql_query)
if test_name in["primary_gsi", "shortest_gsi"] :
self.assertEqual(result['results'][0]['plan']['~children'][0]['#operator'], "PrimaryScan3")
else:
self.assertEqual(result['results'][0]['plan']['~children'][0]['index'], test_cases[test_name]["index"])
self._remove_all_fts_indexes()
self.drop_index_safe('beer-sample', 'idx_state')
self.drop_index_safe('beer-sample', 'idx_state_city')
self.drop_index_safe('beer-sample', 'idx_name')
self.drop_index_safe('beer-sample', 'idx_state_name')
# 10 results
def test_logical_predicates(self):
test_cases = [" = true ", " in [true] ", " in [true, true, true] "]
self.cbcluster = CouchbaseCluster(name='cluster', nodes=self.servers, log=self.log)
rest = self.get_rest_client(self.servers[0].rest_username, self.servers[0].rest_password)
self._create_fts_index(index_name="idx_beer_sample_fts", doc_count=7303, source_name='beer-sample')
fts_request = {"query": {"field": "state", "match": "California"}, "size": 10000}
for test_case in test_cases:
n1ql_query = "select meta().id from `beer-sample` where search(`beer-sample`, {\"field\": \"state\", \"match\":\"California\"}) "+ test_case
n1ql_results = self.run_cbq_query(n1ql_query)['results']
total_hits, hits, took, status = \
rest.run_fts_query(index_name="idx_beer_sample_fts",
query_json=fts_request)
comparison_results = self._compare_n1ql_results_against_fts(n1ql_results, hits)
self.assertEqual(comparison_results, "OK", comparison_results)
n1ql_query = "select meta().id from `beer-sample` where not(not(search(`beer-sample`, {\"field\": \"state\", \"match\":\"California\"}))) "
n1ql_results = self.run_cbq_query(n1ql_query)['results']
total_hits, hits, took, status = rest.run_fts_query(index_name="idx_beer_sample_fts", query_json=fts_request)
comparison_results = self._compare_n1ql_results_against_fts(n1ql_results, hits)
self.assertEqual(comparison_results, "OK", comparison_results)
self._remove_all_fts_indexes()
def test_logical_predicates_negative(self):
test_cases = {
"case_1":{
"predicate": " = false ",
"verification_query": "select meta().id from `beer-sample` where state is missing or state!='California'"
},
"case_2": {
"predicate": " !=false ",
"verification_query": "select meta().id from `beer-sample` where state = 'California'"
},
"case_3": {
"predicate": " in [false] ",
"verification_query": "select meta().id from `beer-sample` where state is missing or state != 'California'"
},
"case_4": {
"predicate": " in [true, 1, 2] ",
"verification_query": "select meta().id from `beer-sample` where state = 'California'"
},
"case_5": {
"predicate": " not in [false] ",
"verification_query": "select meta().id from `beer-sample` where state = 'California'"
},
}
test_name = self.input.param("test_name", "")
if test_name == "":
raise Exception("Test name cannot be empty.")
self.cbcluster = CouchbaseCluster(name='cluster', nodes=self.servers, log=self.log)
self._create_fts_index(index_name="idx_beer_sample_fts", doc_count=7303, source_name='beer-sample')
if test_name != 'special_case':
predicate = test_cases[test_name]['predicate']
verification_query = test_cases[test_name]['verification_query']
search_query = "select meta().id from `beer-sample` where search(`beer-sample`, {\"field\": \"state\", \"match\":\"California\"}) "+ predicate
else:
search_query = "select meta().id from `beer-sample` where not(search(`beer-sample`, {\"field\": \"state\", \"match\":\"California\"})) "
verification_query = "select meta().id from `beer-sample` where state is missing or state != 'California'"
search_results = self.run_cbq_query(search_query)['results']
verification_results = self.run_cbq_query(verification_query)['results']
search_doc_ids = []
for result in search_results:
search_doc_ids.append(result['id'])
verification_doc_ids = []
for result in verification_results:
verification_doc_ids.append(result['id'])
self.assertEqual(len(search_doc_ids), len(verification_doc_ids),
"Results count does not match for test . SEARCH() - " + str(
len(search_doc_ids)) + ", Verification - " + str(len(verification_doc_ids)))
self.assertEqual(sorted(search_doc_ids), sorted(verification_doc_ids),
"Found mismatch in results for test .")
self._remove_all_fts_indexes()
def test_n1ql_syntax_select_from_let(self):
self.cbcluster = CouchbaseCluster(name='cluster', nodes=self.servers, log=self.log)
rest = self.get_rest_client(self.servers[0].rest_username, self.servers[0].rest_password)
self._create_fts_index(index_name="idx_beer_sample_fts", doc_count=7303, source_name='beer-sample')
n1ql_query = "select meta().id from `beer-sample` let res=true where search(`beer-sample`, {\"query\":{\"field\": \"state\", \"match\":\"California\"},\"size\":10000})=res"
fts_request = {"query":{"field": "state", "match":"California"}, "size":10000}
n1ql_results = self.run_cbq_query(n1ql_query)['results']
total_hits, hits, took, status = rest.run_fts_query(index_name="idx_beer_sample_fts",
query_json=fts_request)
comparison_results = self._compare_n1ql_results_against_fts(n1ql_results, hits)
self.assertEqual(comparison_results, "OK", comparison_results)
self._remove_all_fts_indexes()
def test_n1ql_syntax_select_from_2_buckets(self):
self.cbcluster = CouchbaseCluster(name='cluster', nodes=self.servers, log=self.log)
self._create_fts_index(index_name="idx_beer_sample_fts", doc_count=7303, source_name='beer-sample')
if not self.is_index_present("beer-sample", "idx_state"):
self.run_cbq_query("create index idx_state on `beer-sample`(state)")
if not self.is_index_present("beer-sample", "idx_city"):
self.run_cbq_query("create index idx_city on `beer-sample`(city)")
self.wait_for_all_indexes_online()
n1ql_query = "select `beer-sample`.id, `beer-sample`.country, `beer-sample`.city, t2.name from `beer-sample` " \
"inner join `beer-sample` t2 on `beer-sample`.state=t2.state and `beer-sample`.city=t2.city " \
"where SEARCH(`beer-sample`, 'state:California')"
n1ql_results = self.run_cbq_query(n1ql_query)
self.assertEqual(n1ql_results['status'], 'success')
self._remove_all_fts_indexes()
self.drop_index_safe('beer-sample', 'idx_state')
self.drop_index_safe('beer-sample', 'idx_city')
def test_n1ql_syntax_select_from_double_search_call(self):
self.cbcluster = CouchbaseCluster(name='cluster', nodes=self.servers, log=self.log)
self._create_fts_index(index_name="idx_beer_sample_fts", doc_count=7303, source_name='beer-sample')
if not self.is_index_present("beer-sample", "idx_state"):
self.run_cbq_query("create index idx_state on `beer-sample`(state)")
if not self.is_index_present("beer-sample", "idx_city"):
self.run_cbq_query("create index idx_city on `beer-sample`(city)")
self.wait_for_all_indexes_online()
n1ql_query = "select `beer-sample`.id, `beer-sample`.country, `beer-sample`.city, t2.name from `beer-sample` " \
"inner join `beer-sample` t2 on `beer-sample`.state=t2.state and `beer-sample`.city=t2.city " \
"where SEARCH(t2, 'state:California') and SEARCH(`beer-sample`, 'state:California')"
n1ql_results = self.run_cbq_query(n1ql_query)
self.assertEqual(n1ql_results['status'], 'success')
self._remove_all_fts_indexes()
self.drop_index_safe('beer-sample', 'idx_state')
self.drop_index_safe('beer-sample', 'idx_city')
def test_n1ql_syntax_from_select(self):
self.cbcluster = CouchbaseCluster(name='cluster', nodes=self.servers, log=self.log)
rest = self.get_rest_client(self.servers[0].rest_username, self.servers[0].rest_password)
self._create_fts_index(index_name="idx_beer_sample_fts", doc_count=7303, source_name='beer-sample')
n1ql_query = "from (select meta().id mt from `beer-sample` where search(`beer-sample`, 'state:California')) as t select t.mt as id"
fts_request = {"query":{"field": "state", "match":"California"}, "size":10000}
n1ql_results = self.run_cbq_query(n1ql_query)['results']
total_hits, hits, took, status = rest.run_fts_query(index_name="idx_beer_sample_fts",
query_json=fts_request)
comparison_results = self._compare_n1ql_results_against_fts(n1ql_results, hits)
self.assertEqual(comparison_results, "OK", comparison_results)
self._remove_all_fts_indexes()
#MB - 34007
def test_n1ql_syntax_union_intersect_except(self):
test_cases = {
"same_buckets_same_idx": {
"query_left": "select meta().id from `beer-sample` where search(`beer-sample`, 'state:California') ",
"query_right": " select meta().id from `beer-sample` where search(`beer-sample`, 'state:Georgia')"
},
"same_buckets_different_idx": {
"query_left": "select meta().id from `beer-sample` where search(`beer-sample`, 'state:California') ",
"query_right": " select meta().id from `beer-sample` where search(`beer-sample`, 'name:Amendment')"
},
"different_buckets_different_idx": {
"query_left": "select meta().id from `beer-sample` where search(`beer-sample`, 'state:California') ",
"query_right": " select meta().id from `default` where search(`default`, 'job_title:Engeneer')"
}
}
self.cbcluster = CouchbaseCluster(name='cluster', nodes=self.servers, log=self.log)
fts_name_index = self._create_fts_index(index_name="idx_beer_sample_fts_name", doc_count=7303, source_name='beer-sample')
fts_name_index.add_child_field_to_default_mapping(field_name="name", field_type="text")
fts_name_index.index_definition['uuid'] = fts_name_index.get_uuid()
fts_name_index.update()
fts_state_index = self._create_fts_index(index_name="idx_beer_sample_fts_state", doc_count=7303, source_name='beer-sample')
fts_state_index.add_child_field_to_default_mapping(field_name="state", field_type="text")
fts_state_index.index_definition['uuid'] = fts_state_index.get_uuid()
fts_state_index.update()
#fts_job_index = self._create_fts_index(index_name="idx_default_fts_job_title", doc_count=2016, source_name='default')
union_intersect_except = [" union ", " intersect ", " except "]
test_name = self.input.param("test_name", '')
for uie in union_intersect_except:
full_results = self.run_cbq_query(test_cases[test_name]['query_left']+uie+test_cases[test_name]['query_right'])['results']
left_results = self.run_cbq_query(test_cases[test_name]['query_left'])['results']
right_results = self.run_cbq_query(test_cases[test_name]['query_right'])['results']
left_right_results = []
if uie == ' union ':
left_right_results = left_results
for r in right_results:
if r not in left_right_results:
left_right_results.append(r)
elif uie == ' intersect ':
for r in left_results:
if r in right_results and r not in left_right_results:
left_right_results.append(r)
elif uie == ' except ':
for r in left_results:
if r not in right_results:
left_right_results.append(r)
self.assertEqual(len(full_results), len(left_right_results),
"Results count does not match for test "+test_name+", operation - "+uie+". Full query - " + str(
len(full_results)) + ", sum of 2 queries - " + str(len(left_right_results)))
self.assertEqual(sorted(full_results), sorted(left_right_results),
"Found mismatch in results for test "+test_name+", operation - "+uie+".")
self._remove_all_fts_indexes()
def test_prepareds(self):
test_name = self.input.param("test_name", '')
if test_name == '':
raise Exception("Invalid test configuration! Test name should not be empty.")
test_cases = {
"simple_prepared": {
"prepared": "select meta().id from `beer-sample` where search(`beer-sample`, 'state:California')",
"params": "",
"n1ql": "select meta().id from `beer-sample` where search(`beer-sample`, 'state:California')",
"expected_result": "success"
},
# MB-33724
"named_prepared_query_definition": {
"prepared": "select meta().id from `beer-sample` where search(`beer-sample`, $state_val)",
"params": "$state_val=\"state:California\"",
"n1ql": "select meta().id from `beer-sample` where search(`beer-sample`, 'state:California')",
"expected_result": "success"
},
# MB-33724
"named_prepared_option_index_name": {
"prepared": "select meta().id from `beer-sample` where search(`beer-sample`, 'state:California', $idx_name)",
"params": "$idx_name={\"index\": \"idx_beer_sample_fts\"}",
"n1ql": "select meta().id from `beer-sample` where search(`beer-sample`, 'state:California', {'index': 'idx_beer_sample_fts'})",
"expected_result": "success"
},
"named_prepared_option_settings": {
"prepared": "select meta().id from `beer-sample` where search(`beer-sample`, {\"query\":{\"field\": \"state\", \"match\":\"California\"},'size': $size, \"sort\":[\"_id\"]})",
"params": "$size=15",
"n1ql": "select meta().id from `beer-sample` where search(`beer-sample`, {\"query\":{\"field\": \"state\", \"match\":\"California\"},'size': 15, 'sort':['_id']})",
"expected_result": "success"
},
"named_prepared_option_out": {
"prepared": "select meta().id from `beer-sample` where search(`beer-sample`, 'state:California', {'out': $out_val})",
"params": "$out=\"out_values\"",
"n1ql": "select meta().id from `beer-sample` where search(`beer-sample`, 'state:California', {'out': 'out_values'})",
"expected_result": "cannot_prepare"
},
# MB-33724
"positional_prepared_query_definition": {
"prepared": "select meta().id from `beer-sample` where search(`beer-sample`, $1)",
"params": "args=[\"state:California\"]",
"n1ql": "select meta().id from `beer-sample` where search(`beer-sample`, 'state:California')",
"expected_result": "success"
},
# MB-33724
"positional_prepared_option_index_name": {
"prepared": "select meta().id from `beer-sample` where search(`beer-sample`, 'state:California', $1)",
"params": "args=[\"{'index': 'idx_beer_sample_fts'}\"]",
"n1ql": "select meta().id from `beer-sample` where search(`beer-sample`, 'state:California', {'index': 'idx_beer_sample_fts'})",
"expected_result": "cannot_execute"
},
"positional_prepared_option_settings": {
"prepared": "select meta().id from `beer-sample` where search(`beer-sample`, {\"query\":{\"field\": \"state\", \"match\":\"California\"},'size': $1, \"sort\":[\"_id\"]})",
"params": "args=[15]",
"n1ql": "select meta().id from `beer-sample` where search(`beer-sample`, {\"query\":{\"field\": \"state\", \"match\":\"California\"},'size': 15, 'sort':['_id']})",
"expected_result": "success"
},
"positional_prepared_option_out": {
"prepared": "select meta().id from `beer-sample` where search(`beer-sample`, 'state:California', {'out': $1})",
"params": "args=[\"out_values\"]",
"n1ql": "",
"expected_result": "cannot_prepare"
}
}
self.cbcluster = CouchbaseCluster(name='cluster', nodes=self.servers, log=self.log)
fts_name_index = self._create_fts_index(index_name="idx_beer_sample_fts", doc_count=7303, source_name='beer-sample')
self.run_cbq_query("delete from system:prepareds")
#1 create prepared
create_prepared = "prepare "+test_name+" from "+test_cases[test_name]['prepared']
if test_cases[test_name]["expected_result"] == "cannot_prepare":
try:
self.run_cbq_query(create_prepared)
except CBQError as err:
self.assertEqual(True, True)
return
else:
self.run_cbq_query(create_prepared)
#2 call prepared
call_query = "execute "+test_name
if test_cases[test_name]["params"]!="":
call_query = call_query + "&" + test_cases[test_name]["params"]
prepared_results = self.run_cbq_query_curl(query="'"+call_query+"'")['results']
#3 compare to n1ql query
n1ql_results = self.run_cbq_query(test_cases[test_name]['n1ql'])['results']
prepared_doc_ids = []
for result in prepared_results:
prepared_doc_ids.append(result['id'])
n1ql_doc_ids = []
for result in n1ql_results:
n1ql_doc_ids.append(result['id'])
self.assertEqual(len(n1ql_doc_ids), len(prepared_doc_ids),
"Results count does not match for test . N1QL - " + str(
len(n1ql_doc_ids)) + ", Prepareds - " + str(len(prepared_doc_ids)))
self.assertEqual(sorted(prepared_doc_ids), sorted(n1ql_doc_ids),
"Found mismatch in results for test .")
self._remove_all_fts_indexes()
def test_parameterized_queries(self):
#TODO - analyze execution plan for covering indexes.
test_name = self.input.param("test_name", '')
if test_name == '':
raise Exception("Invalid test configuration! Test name should not be empty.")
test_cases = {
# MB-33724
"named_prepared_query_definition": {
"prepared": "select meta().id from `beer-sample` where search(`beer-sample`, $state_val)",
"params": "$state_val=\"state:California\"",
"n1ql": "select meta().id from `beer-sample` where search(`beer-sample`, 'state:California')",
"expected_result": "success"
},
# MB-33724
"named_prepared_option_settings": {
"prepared": "select meta().id from `beer-sample` where search(`beer-sample`, \"state:California\", {\"size\": $size, \"sort\":[\"_id\"]})",
"params": "$size=15",
"n1ql": "select meta().id from `beer-sample` where search(`beer-sample`, 'state:California', {'size': 15, 'sort':['_id']})",
"expected_result": "success"
},
"named_prepared_option_out": {
"prepared": "select meta().id from `beer-sample` where search(`beer-sample`, \"state:California\", $out_param)",
"params": "$out_param={\"out\":\"out_values\"}",
"n1ql": "select meta().id from `beer-sample` where search(`beer-sample`, 'state:California', {'out': 'out_values'})",
"expected_result": "success"
},
"positional_prepared_query_definition": {
"prepared": "select meta().id from `beer-sample` where search(`beer-sample`, $1)",
"params": "args=[\"state:California\"]",
"n1ql": "select meta().id from `beer-sample` where search(`beer-sample`, 'state:California')",
"expected_result": "success"
},
"positional_prepared_option_index_name": {
"prepared": "select meta().id from `beer-sample` where search(`beer-sample`, \"state:California\", $1)",
"params": "args=[{\"index\":\"idx_beer_sample_fts\"}]",
"n1ql": "select meta().id from `beer-sample` where search(`beer-sample`, 'state:California', {\"index\":\"idx_beer_sample_fts\"})",
"expected_result": "success"
},
"positional_prepared_option_settings": {
"prepared": "select meta().id from `beer-sample` where search(`beer-sample`, \"state:California\", {\"size\": $1, \"sort\":[\"_id\"]})",
"params": "args=[15]",
"n1ql": "select meta().id from `beer-sample` where search(`beer-sample`, 'state:California', {'size': 15, 'sort':['_id']})",
"expected_result": "success"
},
"positional_prepared_option_out": {
"prepared": "select meta().id from `beer-sample` where search(`beer-sample`, \"state:California\", $1)",
"params": "args=[{\"out\":\"out_values\"}]",
"n1ql": "select meta().id from `beer-sample` where search(`beer-sample`, 'state:California')",
"expected_result": "success"
}
}
self.cbcluster = CouchbaseCluster(name='cluster', nodes=self.servers, log=self.log)
fts_name_index = self._create_fts_index(index_name="idx_beer_sample_fts", doc_count=7303, source_name='beer-sample')
if test_cases[test_name]['expected_result'] == "success":
call_query = test_cases[test_name]['prepared']
if test_cases[test_name]["params"]!="":
call_query = call_query + "&" + test_cases[test_name]["params"]
prepared_results = self.run_cbq_query_curl(query="'"+call_query+"'")['results']
#3 compare to n1ql query
n1ql_results = self.run_cbq_query(test_cases[test_name]['n1ql'])['results']
prepared_doc_ids = []
for result in prepared_results:
prepared_doc_ids.append(result['id'])
n1ql_doc_ids = []
for result in n1ql_results:
n1ql_doc_ids.append(result['id'])
self.assertEqual(len(n1ql_doc_ids), len(prepared_doc_ids),
"Results count does not match for test . N1QL - " + str(
len(n1ql_doc_ids)) + ", Prepareds - " + str(len(prepared_doc_ids)))
self.assertEqual(sorted(prepared_doc_ids), sorted(n1ql_doc_ids),
"Found mismatch in results for test .")
self._remove_all_fts_indexes()
def test_rbac(self):
user = self.input.param("user", '')
if user == '':
raise Exception("Invalid test configuration! User name should not be empty.")
self.cbcluster = CouchbaseCluster(name='cluster', nodes=self.servers, log=self.log)
fts_name_index = self._create_fts_index(index_name="idx_beer_sample_fts", doc_count=7303, source_name='beer-sample')
self._create_all_users()
username = self.users[user]['username']
password = self.users[user]['password']
query = "select meta().id from `beer-sample` where search(`beer-sample`, \"state:California\")"
master_result = self.run_cbq_query(query=query, server=self.master, username=username, password=password)
self.assertEqual(master_result['status'], 'success', username+" query run failed on non-fts node")
self._remove_all_fts_indexes()
# 10 results in fts
def test_sorting_pagination(self):
query = "select meta().id from `beer-sample` where search(`beer-sample`, \"state:California\")"
# inner sort modes: asc, desc
# inner sort fields: single field, multiple fields, score, id
# missing values: first, last
# mode: min, max, offset
inner_sorting_field_values = ["_id"]
inner_sorting_order_values = ["", "min", "max"]
inner_offset_values = ["", "10"]
outer_sorting_field_values = ["meta().id"]
outer_sorting_order_values = ["", "asc", "desc"]
outer_offset_values = ["", "10"]
self.cbcluster = CouchbaseCluster(name='cluster', nodes=self.servers, log=self.log)
rest = self.get_rest_client(self.servers[0].rest_username, self.servers[0].rest_password)
fts_name_index = self._create_fts_index(index_name="idx_beer_sample_fts", doc_count=7303, source_name='beer-sample')
for inner_field in inner_sorting_field_values:
for inner_order in inner_sorting_order_values:
for inner_offset in inner_offset_values:
for outer_field in outer_sorting_field_values:
for outer_order in outer_sorting_order_values:
for outer_offset in outer_offset_values:
inner_sort_expression = ""
if inner_field != "":
inner_sort_expression = ", \"sort\": [{\"by\": \"field\", \"field\": \""+inner_field+"\""
if inner_order != "":
inner_sort_expression = inner_sort_expression + ", \"mode\": \""+inner_order+"\""
if inner_offset != "":
inner_sort_expression = inner_sort_expression + ", \"offset\": "+inner_offset+""
inner_sort_expression = inner_sort_expression + "}]"
outer_sort_expression = ""
if outer_field != "":
outer_sort_expression = "order by "+outer_field +" "+outer_order
if outer_offset != "":
outer_sort_expression = outer_sort_expression + " offset "+outer_offset
search_query = "select meta().id from `beer-sample` where search(`beer-sample`, {\"query\": {\"field\": \"state\", \"match\": \"California\"}"+inner_sort_expression+"}) "+outer_sort_expression
search_results = self.run_cbq_query(search_query)['results']
if outer_sort_expression == "":
if inner_sort_expression != "":
fts_request_str = "'{\"query\":{\"field\": \"state\", \"match\":\"California\"}, \"size\":1000,"+inner_sort_expression+"}'"
else:
fts_request_str = "'{\"query\":{\"field\": \"state\", \"match\":\"California\"}, \"size\":10000}'"
fts_request = json.loads(fts_request_str)
total_hits, hits, took, status = rest.run_fts_query(
index_name="idx_beer_sample_fts",
query_json=fts_request)
comparison_results = self._compare_n1ql_results_against_fts(search_results, hits)
self.assertEqual(comparison_results, "OK", comparison_results)
else:
n1ql_query = "select meta().id from `beer-sample` where search(`beer-sample`, {\"query\": {\"field\": \"state\", \"match\": \"California\"}}) "+outer_sort_expression
n1ql_results = self.run_cbq_query(n1ql_query)['results']
search_doc_ids = []
for result in search_results:
search_doc_ids.append(result['id'])
n1ql_doc_ids = []
for result in n1ql_results:
n1ql_doc_ids.append(result['id'])
self.assertEqual(len(n1ql_doc_ids), len(search_doc_ids),
"SEARCH QUERY - " + search_query + "\nN1QL QUERY - " + n1ql_query)
self.assertEqual(sorted(search_doc_ids), sorted(n1ql_doc_ids), "SEARCH QUERY - "+search_query+"\nN1QL QUERY - "+n1ql_query)
self._remove_all_fts_indexes()
def test_scan_consistency(self):
self.cbcluster = CouchbaseCluster(name='cluster', nodes=self.servers, log=self.log)
fts_name_index = self._create_fts_index(index_name="idx_beer_sample_fts", doc_count=7303, source_name='beer-sample')
scan_val = self.input.param("scan_type", '')
count_before_update = self.run_cbq_query("select count(*) from `beer-sample` where search(`beer-sample`, \"state:California\")")['results'][0]
self.scan_consistency = scan_val
update_query = "update `beer-sample` set state='Califffornia' where meta().id in ( select raw meta().id from `beer-sample` b where search(b, {\"query\": {\"field\": \"state\", \"match\": \"California\"}, \"sort\": [{\"by\": \"field\", \"field\": \"city\"}]}))"
select_query = "select meta().id from `beer-sample` where search(`beer-sample`, \"state:California\")"
threads = []
t = threading.Thread(target=self._update_parallel, args=(update_query, "UPDATE", count_before_update['$1'], scan_val))
t1 = threading.Thread(target=self._check_scan_parallel, args=(select_query, count_before_update['$1'], scan_val))
t.daemon = True
t1.daemon = True
threads.append(t)
threads.append(t1)
t.start()
t1.start()
for th in threads:
th.join()
threads.remove(th)
update_query = "update `beer-sample` set state='California' where state='Califffornia'"
self.run_cbq_query(update_query)
self._remove_all_fts_indexes()
def test_drop_fts_index(self):
self.cbcluster = CouchbaseCluster(name='cluster', nodes=self.servers, log=self.log)
fts_name_index = self._create_fts_index(index_name="idx_beer_sample_fts", doc_count=7303, source_name='beer-sample')
select_query = "select * from `beer-sample` l join `beer-sample` r on l.city=r.city where search(l,\"city:San Francisco\")"
if not self.is_index_present("beer-sample", "beer_sample_city_idx"):
self.run_cbq_query("create index beer_sample_city_idx on `beer-sample` (`beer-sample`.city)")
threads = []
t = threading.Thread(target=self._select_parallel, args=(select_query, 213,))
t.daemon = True
threads.append(t)
t.start()
t1 = threading.Thread(target=self._delete_fts_index, args=("idx_beer_sample_fts",))
t1.daemon = True
t1.start()
threads.append(t1)
for th in threads:
th.join()
threads.remove(th)
def test_joins(self):
tests = {
"inner_l":{
"query": "select * from `beer-sample` l join `beer-sample` r on l.city=r.city where search(l, \"city:San Francisco\")",
"expected_result": "positive"
},
"inner_r":{
"query": "select * from `beer-sample` l join `beer-sample` r on l.city=r.city where search(r, \"city:San Francisco\")",
"expected_result": "negative"
},
"left_l":{
"query": "select * from `beer-sample` l left join `beer-sample` r on l.city=r.city where search(l, \"city:San Francisco\")",
"expected_result": "positive"
},
"left_r":{
"query": "select * from `beer-sample` l left join `beer-sample` r on l.city=r.city where search(r, \"city:San Francisco\")",
"expected_result": "negative"
},
"left_outer_l":{
"query": "select * from `beer-sample` l left outer join `beer-sample` r on l.city=r.city where search(l, \"city:San Francisco\")",
"expected_result": "positive"
},
"left_outer_r":{
"query": "select * from `beer-sample` l left outer join `beer-sample` r on l.city=r.city where search(r, \"city:San Francisco\")",
"expected_result": "negative"
},
"right_l":{
"query": "select * from `beer-sample` l right join `beer-sample` r on l.city=r.city where search(l, \"city:San Francisco\")",
"expected_result": "negative"
},
"right_r":{
"query": "select * from `beer-sample` l right join `beer-sample` r on l.city=r.city where search(r, \"city:San Francisco\")",
"expected_result": "positive"
},
"right_outer_l":{
"query": "select * from `beer-sample` l right outer join `beer-sample` r on l.city=r.city where search(l, \"city:San Francisco\")",
"expected_result": "negative"
},
"right_outer_r":{
"query": "select * from `beer-sample` l right outer join `beer-sample` r on l.city=r.city where search(r, \"city:San Francisco\")",
"expected_result": "positive"
},
"use_hash_build_l":{
"query": "select * from `beer-sample` l join `beer-sample` r use hash(build) on l.city=r.city where search(l, \"city:San Francisco\")",
"expected_result": "positive"
},
"use_hash_build_r":{
"query": "select * from `beer-sample` l join `beer-sample` r use hash(build) on l.city=r.city where search(r, \"city:San Francisco\")",
"expected_result": "positive"
},
"use_hash_probe_l":{
"query": "select * from `beer-sample` l join `beer-sample` r use hash(probe) on l.city=r.city where search(l, \"city:San Francisco\")",
"expected_result": "positive"
},
"use_hash_probe_r":{
"query": "select * from `beer-sample` l join `beer-sample` r use hash(probe) on l.city=r.city where search(r, \"city:San Francisco\")",
"expected_result": "positive"
},
"use_nl_l":{
"query": "select * from `beer-sample` l join `beer-sample` r use nl on l.city=r.city where search(l, \"city:San Francisco\")",
"expected_result": "positive"
},
"use_nl_r":{
"query": "select * from `beer-sample` l join `beer-sample` r use nl on l.city=r.city where search(r, \"city:San Francisco\")",
"expected_result": "negative"
},
"use_hash_keys_build_l": {
"query": "select * from `beer-sample` l join `beer-sample` r use hash(build) keys [\"512_brewing_company\"] on l.city=r.city where search(l, \"city:Austin\")",
"expected_result": "positive"
},
"use_hash_keys_build_r": {
"query": "select * from `beer-sample` l join `beer-sample` r use hash(build) keys [\"512_brewing_company\"] on l.city=r.city where search(r, \"city:Austin\")",
"expected_result": "negative"
},
"use_hash_keys_probe_l": {
"query": "select * from `beer-sample` l join `beer-sample` r use hash(probe) keys [\"512_brewing_company\"] on l.city=r.city where search(l, \"city:Austin\")",
"expected_result": "positive"
},
"use_hash_keys_probe_r": {
"query": "select * from `beer-sample` l join `beer-sample` r use hash(probe) keys [\"512_brewing_company\"] on l.city=r.city where search(r, \"city:Austin\")",
"expected_result": "negative"
},
"lookup_l": {
"query": "select * from `beer-sample` l join `beer-sample` r on keys l.brewery_id where search(l, \"city:Austin\")",
"expected_result": "positive"
},
"lookup_r": {
"query": "select * from `beer-sample` l join `beer-sample` r on keys l.brewery_id where search(r, \"city:Austin\")",
"expected_result": "negative"
},
"index_l": {
"query": "select * from `beer-sample` l join `beer-sample` r on key r.brewery_id for l where search(l, \"city:Austin\")",
"expected_result": "positive"
},
"index_r": {
"query": "select * from `beer-sample` l join `beer-sample` r on key r.brewery_id for l where search(r, \"city:Austin\")",
"expected_result": "negative"
},
"in_l": {
"query": "select * from `beer-sample` l join `beer-sample` r on l.brewery_id in r.code where search(l, \"city:Austin\")",
"expected_result": "positive"
},
"in_r": {
"query": "select * from `beer-sample` l join `beer-sample` r on l.brewery_id in r.code where search(r, \"city:Austin\")",
"expected_result": "negative"
},
"any_satisfies_l": {
"query": "select * from `beer-sample` l join `beer-sample` r on l.address=r.address and any v in r.address satisfies (v='563 Second Street') end where search(l, \"city:Austin\")",
"expected_result": "positive"
},
"any_satisfies_r": {
"query": "select * from `beer-sample` l join `beer-sample` r on l.address=r.address and any v in r.address satisfies (v='563 Second Street') end where search(r, \"city:Austin\")",
"expected_result": "negative"
},
}
test_name = self.input.param("test_name", '')
if test_name == "":
raise Exception("Test name cannot be empty.")
self.cbcluster = CouchbaseCluster(name='cluster', nodes=self.servers, log=self.log)
fts_name_index = self._create_fts_index(index_name="idx_beer_sample_fts", doc_count=7303, source_name='beer-sample')
if not self.is_index_present("beer-sample", "beer_sample_city_idx"):
self.run_cbq_query("create index beer_sample_city_idx on `beer-sample` (`beer-sample`.city)")
if not self.is_index_present("beer-sample", "beer_sample_brewery_id_idx"):
self.run_cbq_query("create index beer_sample_brewery_id_idx on `beer-sample` (`beer-sample`.brewery_id)")
if not self.is_index_present("beer-sample", "beer_sample_address_arr_idx"):
self.run_cbq_query("create index beer_sample_address_arr_idx on `beer-sample` (all array v.address for v in address end)")
if not self.is_index_present("beer-sample", "beer_sample_address_idx"):
self.run_cbq_query("create index beer_sample_address_idx on `beer-sample` (`beer-sample`.address)")
self.wait_for_all_indexes_online()
n1ql_query = ""
if test_name == '':
raise Exception("Invalid test configuration! Test name should not be empty.")
try:
n1ql_query = tests[test_name]['query']
result = self.run_cbq_query(n1ql_query)
self.assertEqual(result['status'], 'success', "The following query is incorrect - "+n1ql_query)
explain_result = self.run_cbq_query("explain "+n1ql_query)
if tests[test_name]['expected_result'] == "positive":
self.assertTrue("idx_beer_sample_fts" in str(explain_result), "FTS index is not used for query: "+n1ql_query)
if tests[test_name]['expected_result'] == "negative":
self.assertTrue("idx_beer_sample_fts" not in str(explain_result),
"FTS index is used for query: " + n1ql_query)
except CBQError as err:
self.log.info("Incorrect query ::"+n1ql_query+"::")
finally:
self.drop_index_safe('beer-sample', 'beer_sample_city_idx')
self.drop_index_safe('beer-sample', 'beer_sample_brewery_id_idx')
self.drop_index_safe('beer-sample', 'beer_sample_address_arr_idx')
self.drop_index_safe('beer-sample', 'beer_sample_address_idx')
def test_expired_docs(self):
self.cbcluster = CouchbaseCluster(name='cluster', nodes=self.servers, log=self.log)
bucket_params = self._create_bucket_params(server=self.master, size=self.bucket_size,
replicas=self.num_replicas,
enable_replica_index=self.enable_replica_index,
eviction_policy=self.eviction_policy, bucket_priority=None,
lww=self.lww, maxttl=60,
compression_mode=self.compression_mode)
self.cluster.create_standard_bucket("ttl_bucket", 11222, bucket_params)
for i in range(0, 100, 1):
if i%100 == 0:
initial_statement = (" INSERT INTO {0} (KEY, VALUE) VALUES ('primary_key_"+str(i)+"',").format("ttl_bucket")
initial_statement += "{"
initial_statement += "'primary_key':'primary_key_"+str(i) + "','string_field': 'test_string " + str(i) + "','int_field':"+str(i)+"})"
else:
initial_statement = (" INSERT INTO {0} (KEY, VALUE) VALUES ('primary_key_"+str(i)+"',").format("ttl_bucket")
initial_statement += "{"
initial_statement += "'primary_key':'primary_key_"+str(i) + "','string_field': 'string data " + str(i) + "','int_field':"+str(i)+"})"
self.run_cbq_query(initial_statement)
fts_name_index = self._create_fts_index(index_name="idx_ttl_bucket_fts", doc_count=100, source_name='ttl_bucket')
results_before_expiration = self.run_cbq_query("select count(*) from ttl_bucket where search(ttl_bucket, \"string_field:string\")")
self.assertTrue(results_before_expiration['results'][0]['$1'] > 0, "Results before expiration must be positive")
self.sleep(300)
results_after_expiration = self.run_cbq_query("select count(*) from ttl_bucket where search(ttl_bucket, \"string_field:string\")")
self.assertTrue(results_after_expiration['results'][0]['$1'] == 0, "Results after expiration must be zero")
# ============================================ utils =================================
def _compare_n1ql_results_against_fts(self, n1ql_results, hits):
n1ql_doc_ids = []
for result in n1ql_results:
n1ql_doc_ids.append(result['id'])
fts_doc_ids = []
for hit in hits:
fts_doc_ids.append(hit['id'])
if len(n1ql_doc_ids) != len(fts_doc_ids):
return "Results count does not match for test . FTS - " + str(len(fts_doc_ids)) + ", N1QL - " + str(len(n1ql_doc_ids))
if sorted(fts_doc_ids) != sorted(n1ql_doc_ids):
return "Found mismatch in results for test ."
return "OK"
def _check_scan_parallel(self, query, expected_count, scan_type):
try:
search_results = self.run_cbq_query(query)['metrics']['resultCount']
self.assertEqual(expected_count - int(search_results) > 0, True, "Query result is incorrect for "+scan_type+": \n"
"Results before update - "+str(expected_count)+", count during update - "+str(search_results))
except CBQError as e:
self.assertEqual('True', 'False', 'Wrong query - '+str(query))
def _update_parallel(self, query, operation, expected_count, scan_type):
try:
self.run_cbq_query(query)
except CBQError as e:
self.assertEqual('True', 'False', 'Wrong query - '+str(query))
def _select_parallel(self, query, expected_count):
try:
search_results = self.run_cbq_query(query)['metrics']['resultCount']
self.assertEqual(expected_count, search_results, "Query result is incorrect")
except CBQError as e:
self.assertEqual('True', 'False', 'Wrong query - '+str(query))
def _load_test_buckets(self):
if self.get_bucket_from_name("beer-sample") is None:
self.rest.load_sample("beer-sample")
self.wait_for_buckets_status({"beer-sample": "healthy"}, 5, 120)
self.wait_for_bucket_docs({"beer-sample": 7303}, 5, 120)
if not self.is_index_present("beer-sample", "beer_sample_code_idx"):
self.run_cbq_query("create index beer_sample_code_idx on `beer-sample` (`beer-sample`.code)")
if not self.is_index_present("beer-sample", "beer_sample_brewery_id_idx"):
self.run_cbq_query("create index beer_sample_brewery_id_idx on `beer-sample` (`beer-sample`.brewery_id)")
self.wait_for_all_indexes_online()
def _create_fts_index(self, index_name='', doc_count=0, source_name=''):
fts_index_type = self.input.param("fts_index_type", "scorch")
fts_index = self.cbcluster.create_fts_index(name=index_name, source_name=source_name)
if fts_index_type == 'upside_down':
fts_index.update_index_to_upside_down()
else:
fts_index.update_index_to_scorch()
indexed_doc_count = 0
while indexed_doc_count < doc_count:
try:
self.sleep(10)
indexed_doc_count = fts_index.get_indexed_doc_count()
except KeyError as k:
continue
return fts_index
def _delete_fts_index(self, index_name=''):
rest = self.get_rest_client(self.servers[0].rest_username, self.servers[0].rest_password)
rest.delete_fts_index(index_name)
def _open_curl_access(self):
shell = RemoteMachineShellConnection(self.master)
cmd = (self.curl_path + ' -u ' + self.master.rest_username + ':' + self.master.rest_password + ' http://' + self.master.ip + ':' + self.master.port + '/settings/querySettings/curlWhitelist -d \'{"all_access":true}\'')
shell.execute_command(cmd)
def _create_all_users(self):
admin_user = [{'id': 'admin_user', 'name': 'admin_user', 'password': 'password'}]
rolelist = [{'id': 'admin_user', 'name': 'admin_user', 'roles': 'admin'}]
RbacBase().create_user_source(admin_user, 'builtin', self.master)
RbacBase().add_user_role(rolelist, RestConnection(self.master), 'builtin')
self.users['admin_user'] = {'username': 'admin_user', 'password': 'password'}
all_buckets_data_reader_search_admin = [{'id': 'all_buckets_data_reader_search_admin', 'name': 'all_buckets_data_reader_search_admin', 'password': 'password'}]
rolelist = [{'id': 'all_buckets_data_reader_search_admin', 'name': 'all_buckets_data_reader_search_admin', 'roles': 'query_select[*],fts_admin[*],query_external_access'}]
RbacBase().create_user_source(all_buckets_data_reader_search_admin, 'builtin', self.master)
RbacBase().add_user_role(rolelist, RestConnection(self.master), 'builtin')
self.users['all_buckets_data_reader_search_admin'] = {'username': 'all_buckets_data_reader_search_admin', 'password': 'password'}
all_buckets_data_reader_search_reader = [{'id': 'all_buckets_data_reader_search_reader', 'name': 'all_buckets_data_reader_search_reader', 'password': 'password'}]
rolelist = [{'id': 'all_buckets_data_reader_search_reader', 'name': 'all_buckets_data_reader_search_reader', 'roles': 'query_select[*],fts_searcher[*],query_external_access'}]
RbacBase().create_user_source(all_buckets_data_reader_search_reader, 'builtin', self.master)
RbacBase().add_user_role(rolelist, RestConnection(self.master), 'builtin')
self.users['all_buckets_data_reader_search_reader'] = {'username': 'all_buckets_data_reader_search_reader', 'password': 'password'}
test_bucket_data_reader_search_admin = [{'id': 'test_bucket_data_reader_search_admin', 'name': 'test_bucket_data_reader_search_admin', 'password': 'password'}]
rolelist = [{'id': 'test_bucket_data_reader_search_admin', 'name': 'test_bucket_data_reader_search_admin', 'roles': 'query_select[beer-sample],fts_admin[beer-sample],query_external_access'}]
RbacBase().create_user_source(test_bucket_data_reader_search_admin, 'builtin', self.master)
RbacBase().add_user_role(rolelist, RestConnection(self.master), 'builtin')
self.users['test_bucket_data_reader_search_admin'] = {'username': 'test_bucket_data_reader_search_admin', 'password': 'password'}
test_bucket_data_reader_null = [{'id': 'test_bucket_data_reader_null', 'name': 'test_bucket_data_reader_null', 'password': 'password'}]
rolelist = [{'id': 'test_bucket_data_reader_null', 'name': 'test_bucket_data_reader_null', 'roles': 'query_select[beer-sample],query_external_access'}]
RbacBase().create_user_source(test_bucket_data_reader_null, 'builtin', self.master)
RbacBase().add_user_role(rolelist, RestConnection(self.master), 'builtin')
self.users['test_bucket_data_reader_null'] = {'username': 'test_bucket_data_reader_null', 'password': 'password'}
test_bucket_data_reader_search_reader = [{'id': 'test_bucket_data_reader_search_reader', 'name': 'test_bucket_data_reader_search_reader', 'password': 'password'}]
rolelist = [{'id': 'test_bucket_data_reader_search_reader', 'name': 'test_bucket_data_reader_search_reader', 'roles': 'query_select[beer-sample],fts_searcher[beer-sample],query_external_access'}]
RbacBase().create_user_source(test_bucket_data_reader_search_reader, 'builtin', self.master)
RbacBase().add_user_role(rolelist, RestConnection(self.master), 'builtin')
self.users['test_bucket_data_reader_search_reader'] = {'username': 'test_bucket_data_reader_search_reader', 'password': 'password'}
all_buckets_data_reader_null = [{'id': 'all_buckets_data_reader_null', 'name': 'all_buckets_data_reader_null', 'password': 'password'}]
rolelist = [{'id': 'all_buckets_data_reader_null', 'name': 'all_buckets_data_reader_null', 'roles': 'query_select[*],query_external_access'}]
RbacBase().create_user_source(all_buckets_data_reader_null, 'builtin', self.master)
RbacBase().add_user_role(rolelist, RestConnection(self.master), 'builtin')
self.users['all_buckets_data_reader_null'] = {'username': 'all_buckets_data_reader_null', 'password': 'password'}
def _remove_all_fts_indexes(self):
indexes = self.cbcluster.get_indexes()
rest = self.get_rest_client(self.servers[0].rest_username, self.servers[0].rest_password)
for index in indexes:
rest.delete_fts_index(index.name)
def get_rest_client(self, user, password):
rest = RestConnection(self.cbcluster.get_random_fts_node())
rest.username = user
rest.password = password
return rest
|
test_ssl.py
|
# Test the support for SSL and sockets
import sys
import unittest
from test import support
import socket
import select
import time
import datetime
import gc
import os
import errno
import pprint
import tempfile
import urllib.request
import traceback
import asyncore
import weakref
import platform
import functools
ssl = support.import_module("ssl")
try:
import threading
except ImportError:
_have_threads = False
else:
_have_threads = True
PROTOCOLS = sorted(ssl._PROTOCOL_NAMES)
HOST = support.HOST
def data_file(*name):
return os.path.join(os.path.dirname(__file__), *name)
# The custom key and certificate files used in test_ssl are generated
# using Lib/test/make_ssl_certs.py.
# Other certificates are simply fetched from the Internet servers they
# are meant to authenticate.
CERTFILE = data_file("keycert.pem")
BYTES_CERTFILE = os.fsencode(CERTFILE)
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
BYTES_ONLYCERT = os.fsencode(ONLYCERT)
BYTES_ONLYKEY = os.fsencode(ONLYKEY)
CERTFILE_PROTECTED = data_file("keycert.passwd.pem")
ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem")
KEY_PASSWORD = "somepass"
CAPATH = data_file("capath")
BYTES_CAPATH = os.fsencode(CAPATH)
CAFILE_NEURONIO = data_file("capath", "4e1295a3.0")
CAFILE_CACERT = data_file("capath", "5ed36f99.0")
# empty CRL
CRLFILE = data_file("revocation.crl")
# Two keys and certs signed by the same CA (for SNI tests)
SIGNED_CERTFILE = data_file("keycert3.pem")
SIGNED_CERTFILE2 = data_file("keycert4.pem")
# Same certificate as pycacert.pem, but without extra text in file
SIGNING_CA = data_file("capath", "ceff1710.0")
REMOTE_HOST = "self-signed.pythontest.net"
EMPTYCERT = data_file("nullcert.pem")
BADCERT = data_file("badcert.pem")
NONEXISTINGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
NOKIACERT = data_file("nokia.pem")
NULLBYTECERT = data_file("nullbytecert.pem")
DHFILE = data_file("dh1024.pem")
BYTES_DHFILE = os.fsencode(DHFILE)
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write(prefix + exc_format)
def can_clear_options():
# 0.9.8m or higher
return ssl._OPENSSL_API_VERSION >= (0, 9, 8, 13, 15)
def no_sslv2_implies_sslv3_hello():
# 0.9.7h or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 7, 8, 15)
def have_verify_flags():
# 0.9.8 or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 8, 0, 15)
def utc_offset(): #NOTE: ignore issues like #1647654
# local time = utc time + utc offset
if time.daylight and time.localtime().tm_isdst > 0:
return -time.altzone # seconds
return -time.timezone
def asn1time(cert_time):
# Some versions of OpenSSL ignore seconds, see #18207
# 0.9.8.i
if ssl._OPENSSL_API_VERSION == (0, 9, 8, 9, 15):
fmt = "%b %d %H:%M:%S %Y GMT"
dt = datetime.datetime.strptime(cert_time, fmt)
dt = dt.replace(second=0)
cert_time = dt.strftime(fmt)
# %d adds leading zero but ASN1_TIME_print() uses leading space
if cert_time[4] == "0":
cert_time = cert_time[:4] + " " + cert_time[5:]
return cert_time
# Issue #9415: Ubuntu hijacks their OpenSSL and forcefully disables SSLv2
def skip_if_broken_ubuntu_ssl(func):
if hasattr(ssl, 'PROTOCOL_SSLv2'):
@functools.wraps(func)
def f(*args, **kwargs):
try:
ssl.SSLContext(ssl.PROTOCOL_SSLv2)
except ssl.SSLError:
if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and
platform.linux_distribution() == ('debian', 'squeeze/sid', '')):
raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour")
return func(*args, **kwargs)
return f
else:
return func
needs_sni = unittest.skipUnless(ssl.HAS_SNI, "SNI support needed for this test")
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
ssl.OP_CIPHER_SERVER_PREFERENCE
ssl.OP_SINGLE_DH_USE
if ssl.HAS_ECDH:
ssl.OP_SINGLE_ECDH_USE
if ssl.OPENSSL_VERSION_INFO >= (1, 0):
ssl.OP_NO_COMPRESSION
self.assertIn(ssl.HAS_SNI, {True, False})
self.assertIn(ssl.HAS_ECDH, {True, False})
def test_str_for_enums(self):
# Make sure that the PROTOCOL_* constants have enum-like string
# reprs.
proto = ssl.PROTOCOL_SSLv23
self.assertEqual(str(proto), '_SSLMethod.PROTOCOL_SSLv23')
ctx = ssl.SSLContext(proto)
self.assertIs(ctx.protocol, proto)
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
data, is_cryptographic = ssl.RAND_pseudo_bytes(16)
self.assertEqual(len(data), 16)
self.assertEqual(is_cryptographic, v == 1)
if v:
data = ssl.RAND_bytes(16)
self.assertEqual(len(data), 16)
else:
self.assertRaises(ssl.SSLError, ssl.RAND_bytes, 16)
# negative num is invalid
self.assertRaises(ValueError, ssl.RAND_bytes, -5)
self.assertRaises(ValueError, ssl.RAND_pseudo_bytes, -5)
if hasattr(ssl, 'RAND_egd'):
self.assertRaises(TypeError, ssl.RAND_egd, 1)
self.assertRaises(TypeError, ssl.RAND_egd, 'foo', 1)
ssl.RAND_add("this is a random string", 75.0)
ssl.RAND_add(b"this is a random bytes object", 75.0)
ssl.RAND_add(bytearray(b"this is a random bytearray object"), 75.0)
@unittest.skipUnless(os.name == 'posix', 'requires posix')
def test_random_fork(self):
status = ssl.RAND_status()
if not status:
self.fail("OpenSSL's PRNG has insufficient randomness")
rfd, wfd = os.pipe()
pid = os.fork()
if pid == 0:
try:
os.close(rfd)
child_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(child_random), 16)
os.write(wfd, child_random)
os.close(wfd)
except BaseException:
os._exit(1)
else:
os._exit(0)
else:
os.close(wfd)
self.addCleanup(os.close, rfd)
_, status = os.waitpid(pid, 0)
self.assertEqual(status, 0)
child_random = os.read(rfd, 16)
self.assertEqual(len(child_random), 16)
parent_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(parent_random), 16)
self.assertNotEqual(child_random, parent_random)
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
p = ssl._ssl._test_decode_cert(CERTFILE)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['issuer'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
# Note the next three asserts will fail if the keys are regenerated
self.assertEqual(p['notAfter'], asn1time('Oct 5 23:01:56 2020 GMT'))
self.assertEqual(p['notBefore'], asn1time('Oct 8 23:01:56 2010 GMT'))
self.assertEqual(p['serialNumber'], 'D7C7381919AFC24E')
self.assertEqual(p['subject'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
self.assertEqual(p['subjectAltName'], (('DNS', 'localhost'),))
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
# extra OCSP and AIA fields
self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',))
self.assertEqual(p['caIssuers'],
('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',))
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', 'python-dev@python.org'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
if ssl._OPENSSL_API_VERSION >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1\n'))
else:
# OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_DER_to_PEM(self):
with open(CAFILE_CACERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, int)
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 0.9
self.assertGreaterEqual(n, 0x900000)
# < 3.0
self.assertLess(n, 0x30000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 0)
self.assertLess(major, 3)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 63)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
# Version string as returned by {Open,Libre}SSL, the format might change
if "LibreSSL" in s:
self.assertTrue(s.startswith("LibreSSL {:d}.{:d}".format(major, minor)),
(s, t, hex(n)))
else:
self.assertTrue(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)),
(s, t, hex(n)))
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = ssl.wrap_socket(s)
wr = weakref.ref(ss)
with support.check_warnings(("", ResourceWarning)):
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# Methods on an unconnected SSLSocket propagate the original
# OSError raise by the underlying socket object.
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s) as ss:
self.assertRaises(OSError, ss.recv, 1)
self.assertRaises(OSError, ss.recv_into, bytearray(b'x'))
self.assertRaises(OSError, ss.recvfrom, 1)
self.assertRaises(OSError, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(OSError, ss.send, b'x')
self.assertRaises(OSError, ss.sendto, b'x', ('0.0.0.0', 0))
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
# original socket should be retained.
for timeout in (None, 0.0, 5.0):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
with ssl.wrap_socket(s) as ss:
self.assertEqual(timeout, ss.gettimeout())
def test_errors(self):
sock = socket.socket()
self.assertRaisesRegex(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True, certfile="")
with ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE) as s:
self.assertRaisesRegex(ValueError, "can't connect in server-side mode",
s.connect, (HOST, 8080))
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=CERTFILE, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=NONEXISTINGCERT, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def bad_cert_test(self, certfile):
"""Check that trying to use the given client certificate fails"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
certfile)
sock = socket.socket()
self.addCleanup(sock.close)
with self.assertRaises(ssl.SSLError):
ssl.wrap_socket(sock,
certfile=certfile,
ssl_version=ssl.PROTOCOL_TLSv1)
def test_empty_cert(self):
"""Wrapping with an empty cert file"""
self.bad_cert_test("nullcert.pem")
def test_malformed_cert(self):
"""Wrapping with a badly formatted certificate (syntax error)"""
self.bad_cert_test("badcert.pem")
def test_malformed_key(self):
"""Wrapping with a badly formatted key (syntax error)"""
self.bad_cert_test("badkey.pem")
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
# -- Hostname matching --
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
# only match one left-most wildcard
cert = {'subject': ((('commonName', 'f*.com'),),)}
ok(cert, 'foo.com')
ok(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# NULL bytes are bad, CVE-2013-4073
cert = {'subject': ((('commonName',
'null.python.org\x00example.org'),),)}
ok(cert, 'null.python.org\x00example.org') # or raise an error?
fail(cert, 'example.org')
fail(cert, 'null.python.org')
# error cases with wildcards
cert = {'subject': ((('commonName', '*.*.a.com'),),)}
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
fail(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
# wildcard doesn't match IDNA prefix 'xn--'
idna = 'püthon.python.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, idna)
cert = {'subject': ((('commonName', 'x*.python.org'),),)}
fail(cert, idna)
cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)}
fail(cert, idna)
# wildcard in first fragment and IDNA A-labels in sequent fragments
# are supported.
idna = 'www*.pythön.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, 'www.pythön.org'.encode("idna").decode("ascii"))
ok(cert, 'www1.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'ftp.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'pythön.org'.encode("idna").decode("ascii"))
# Slightly fake real-world example
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
# Not a "DNS" entry
fail(cert, '<unsupported>')
# When there is a subjectAltName, commonName isn't used
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName is considered
fail(cert, 'California')
# -- IPv4 matching --
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (('DNS', 'example.com'),
('IP Address', '10.11.12.13'),
('IP Address', '14.15.16.17'))}
ok(cert, '10.11.12.13')
ok(cert, '14.15.16.17')
fail(cert, '14.15.16.18')
fail(cert, 'example.net')
# -- IPv6 matching --
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (('DNS', 'example.com'),
('IP Address', '2001:0:0:0:0:0:0:CAFE\n'),
('IP Address', '2003:0:0:0:0:0:0:BABA\n'))}
ok(cert, '2001::cafe')
ok(cert, '2003::baba')
fail(cert, '2003::bebe')
fail(cert, 'example.net')
# -- Miscellaneous --
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry in subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName and no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
# Issue #17980: avoid denials of service by refusing more than one
# wildcard per fragment.
cert = {'subject': ((('commonName', 'a*b.com'),),)}
ok(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b.co*'),),)}
fail(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b*.com'),),)}
with self.assertRaises(ssl.CertificateError) as cm:
ssl.match_hostname(cert, 'axxbxxc.com')
self.assertIn("too many wildcards", str(cm.exception))
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with socket.socket() as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
def test_unknown_channel_binding(self):
# should raise ValueError for unknown type
s = socket.socket(socket.AF_INET)
s.bind(('127.0.0.1', 0))
s.listen()
c = socket.socket(socket.AF_INET)
c.connect(s.getsockname())
with ssl.wrap_socket(c, do_handshake_on_connect=False) as ss:
with self.assertRaises(ValueError):
ss.get_channel_binding("unknown-type")
s.close()
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
# unconnected should return None for known type
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
# the same for server-side
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s, server_side=True, certfile=CERTFILE) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
def test_dealloc_warn(self):
ss = ssl.wrap_socket(socket.socket(socket.AF_INET))
r = repr(ss)
with self.assertWarns(ResourceWarning) as cm:
ss = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_get_default_verify_paths(self):
paths = ssl.get_default_verify_paths()
self.assertEqual(len(paths), 6)
self.assertIsInstance(paths, ssl.DefaultVerifyPaths)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
paths = ssl.get_default_verify_paths()
self.assertEqual(paths.cafile, CERTFILE)
self.assertEqual(paths.capath, CAPATH)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_certificates(self):
self.assertTrue(ssl.enum_certificates("CA"))
self.assertTrue(ssl.enum_certificates("ROOT"))
self.assertRaises(TypeError, ssl.enum_certificates)
self.assertRaises(WindowsError, ssl.enum_certificates, "")
trust_oids = set()
for storename in ("CA", "ROOT"):
store = ssl.enum_certificates(storename)
self.assertIsInstance(store, list)
for element in store:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 3)
cert, enc, trust = element
self.assertIsInstance(cert, bytes)
self.assertIn(enc, {"x509_asn", "pkcs_7_asn"})
self.assertIsInstance(trust, (set, bool))
if isinstance(trust, set):
trust_oids.update(trust)
serverAuth = "1.3.6.1.5.5.7.3.1"
self.assertIn(serverAuth, trust_oids)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_crls(self):
self.assertTrue(ssl.enum_crls("CA"))
self.assertRaises(TypeError, ssl.enum_crls)
self.assertRaises(WindowsError, ssl.enum_crls, "")
crls = ssl.enum_crls("CA")
self.assertIsInstance(crls, list)
for element in crls:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 2)
self.assertIsInstance(element[0], bytes)
self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"})
def test_asn1object(self):
expected = (129, 'serverAuth', 'TLS Web Server Authentication',
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertEqual(val, expected)
self.assertEqual(val.nid, 129)
self.assertEqual(val.shortname, 'serverAuth')
self.assertEqual(val.longname, 'TLS Web Server Authentication')
self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1')
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth')
val = ssl._ASN1Object.fromnid(129)
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1)
with self.assertRaisesRegex(ValueError, "unknown NID 100000"):
ssl._ASN1Object.fromnid(100000)
for i in range(1000):
try:
obj = ssl._ASN1Object.fromnid(i)
except ValueError:
pass
else:
self.assertIsInstance(obj.nid, int)
self.assertIsInstance(obj.shortname, str)
self.assertIsInstance(obj.longname, str)
self.assertIsInstance(obj.oid, (str, type(None)))
val = ssl._ASN1Object.fromname('TLS Web Server Authentication')
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected)
self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'),
expected)
with self.assertRaisesRegex(ValueError, "unknown object 'serverauth'"):
ssl._ASN1Object.fromname('serverauth')
def test_purpose_enum(self):
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.SERVER_AUTH, val)
self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129)
self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth')
self.assertEqual(ssl.Purpose.SERVER_AUTH.oid,
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2')
self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.CLIENT_AUTH, val)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth')
self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid,
'1.3.6.1.5.5.7.3.2')
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
with self.assertRaises(NotImplementedError) as cx:
ssl.wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with self.assertRaises(NotImplementedError) as cx:
ctx.wrap_socket(s)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
def cert_time_ok(self, timestring, timestamp):
self.assertEqual(ssl.cert_time_to_seconds(timestring), timestamp)
def cert_time_fail(self, timestring):
with self.assertRaises(ValueError):
ssl.cert_time_to_seconds(timestring)
@unittest.skipUnless(utc_offset(),
'local time needs to be different from UTC')
def test_cert_time_to_seconds_timezone(self):
# Issue #19940: ssl.cert_time_to_seconds() returns wrong
# results if local timezone is not UTC
self.cert_time_ok("May 9 00:00:00 2007 GMT", 1178668800.0)
self.cert_time_ok("Jan 5 09:34:43 2018 GMT", 1515144883.0)
def test_cert_time_to_seconds(self):
timestring = "Jan 5 09:34:43 2018 GMT"
ts = 1515144883.0
self.cert_time_ok(timestring, ts)
# accept keyword parameter, assert its name
self.assertEqual(ssl.cert_time_to_seconds(cert_time=timestring), ts)
# accept both %e and %d (space or zero generated by strftime)
self.cert_time_ok("Jan 05 09:34:43 2018 GMT", ts)
# case-insensitive
self.cert_time_ok("JaN 5 09:34:43 2018 GmT", ts)
self.cert_time_fail("Jan 5 09:34 2018 GMT") # no seconds
self.cert_time_fail("Jan 5 09:34:43 2018") # no GMT
self.cert_time_fail("Jan 5 09:34:43 2018 UTC") # not GMT timezone
self.cert_time_fail("Jan 35 09:34:43 2018 GMT") # invalid day
self.cert_time_fail("Jon 5 09:34:43 2018 GMT") # invalid month
self.cert_time_fail("Jan 5 24:00:00 2018 GMT") # invalid hour
self.cert_time_fail("Jan 5 09:60:43 2018 GMT") # invalid minute
newyear_ts = 1230768000.0
# leap seconds
self.cert_time_ok("Dec 31 23:59:60 2008 GMT", newyear_ts)
# same timestamp
self.cert_time_ok("Jan 1 00:00:00 2009 GMT", newyear_ts)
self.cert_time_ok("Jan 5 09:34:59 2018 GMT", 1515144899)
# allow 60th second (even if it is not a leap second)
self.cert_time_ok("Jan 5 09:34:60 2018 GMT", 1515144900)
# allow 2nd leap second for compatibility with time.strptime()
self.cert_time_ok("Jan 5 09:34:61 2018 GMT", 1515144901)
self.cert_time_fail("Jan 5 09:34:62 2018 GMT") # invalid seconds
# no special treatement for the special value:
# 99991231235959Z (rfc 5280)
self.cert_time_ok("Dec 31 23:59:59 9999 GMT", 253402300799.0)
@support.run_with_locale('LC_ALL', '')
def test_cert_time_to_seconds_locale(self):
# `cert_time_to_seconds()` should be locale independent
def local_february_name():
return time.strftime('%b', (1, 2, 3, 4, 5, 6, 0, 0, 0))
if local_february_name().lower() == 'feb':
self.skipTest("locale-specific month name needs to be "
"different from C locale")
# locale-independent
self.cert_time_ok("Feb 9 00:00:00 2007 GMT", 1170979200.0)
self.cert_time_fail(local_february_name() + " 9 00:00:00 2007 GMT")
def test_connect_ex_error(self):
server = socket.socket(socket.AF_INET)
self.addCleanup(server.close)
port = support.bind_port(server) # Reserve port but don't listen
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
rc = s.connect_ex((HOST, port))
# Issue #19919: Windows machines or VMs hosted on Windows
# machines sometimes return EWOULDBLOCK.
errors = (
errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT,
errno.EWOULDBLOCK,
)
self.assertIn(rc, errors)
class ContextTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_constructor(self):
for protocol in PROTOCOLS:
ssl.SSLContext(protocol)
self.assertRaises(TypeError, ssl.SSLContext)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
@skip_if_broken_ubuntu_ssl
def test_protocol(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.protocol, proto)
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@skip_if_broken_ubuntu_ssl
def test_options(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# OP_ALL | OP_NO_SSLv2 | OP_NO_SSLv3 is the default value
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3,
ctx.options)
ctx.options |= ssl.OP_NO_TLSv1
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1,
ctx.options)
if can_clear_options():
ctx.options = (ctx.options & ~ssl.OP_NO_SSLv2) | ssl.OP_NO_TLSv1
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_TLSv1 | ssl.OP_NO_SSLv3,
ctx.options)
ctx.options = 0
# Ubuntu has OP_NO_SSLv3 forced on by default
self.assertEqual(0, ctx.options & ~ssl.OP_NO_SSLv3)
else:
with self.assertRaises(ValueError):
ctx.options = 0
def test_verify_mode(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
with self.assertRaises(TypeError):
ctx.verify_mode = None
with self.assertRaises(ValueError):
ctx.verify_mode = 42
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_verify_flags(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# default value
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT | tf)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN)
ctx.verify_flags = ssl.VERIFY_DEFAULT
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
# supports any value
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT
self.assertEqual(ctx.verify_flags,
ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT)
with self.assertRaises(TypeError):
ctx.verify_flags = None
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Combined key and cert in a single file
ctx.load_cert_chain(CERTFILE, keyfile=None)
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
with self.assertRaises(OSError) as cm:
ctx.load_cert_chain(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(BADCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaisesRegex(ssl.SSLError, "key values mismatch"):
ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY)
# Password protected key and cert
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=bytearray(KEY_PASSWORD.encode()))
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD)
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode())
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED,
bytearray(KEY_PASSWORD.encode()))
with self.assertRaisesRegex(TypeError, "should be a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=True)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass")
with self.assertRaisesRegex(ValueError, "cannot be longer"):
# openssl has a fixed limit on the password buffer.
# PEM_BUFSIZE is generally set to 1kb.
# Return a string larger than this.
ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400)
# Password callback
def getpass_unicode():
return KEY_PASSWORD
def getpass_bytes():
return KEY_PASSWORD.encode()
def getpass_bytearray():
return bytearray(KEY_PASSWORD.encode())
def getpass_badpass():
return "badpass"
def getpass_huge():
return b'a' * (1024 * 1024)
def getpass_bad_type():
return 9
def getpass_exception():
raise Exception('getpass error')
class GetPassCallable:
def __call__(self):
return KEY_PASSWORD
def getpass(self):
return KEY_PASSWORD
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=GetPassCallable().getpass)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass)
with self.assertRaisesRegex(ValueError, "cannot be longer"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge)
with self.assertRaisesRegex(TypeError, "must return a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type)
with self.assertRaisesRegex(Exception, "getpass error"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception)
# Make sure the password function isn't called if it isn't needed
ctx.load_cert_chain(CERTFILE, password=getpass_exception)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=None)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None)
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None)
with self.assertRaises(OSError) as cm:
ctx.load_verify_locations(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash if the second argument type is invalid
self.assertRaises(TypeError, ctx.load_verify_locations, None, True)
def test_load_verify_cadata(self):
# test cadata
with open(CAFILE_CACERT) as f:
cacert_pem = f.read()
cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem)
with open(CAFILE_NEURONIO) as f:
neuronio_pem = f.read()
neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem)
# test PEM
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0)
ctx.load_verify_locations(cadata=cacert_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1)
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = "\n".join((cacert_pem, neuronio_pem))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# with junk around the certs
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = ["head", cacert_pem, "other", neuronio_pem, "again",
neuronio_pem, "tail"]
ctx.load_verify_locations(cadata="\n".join(combined))
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# test DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(cadata=cacert_der)
ctx.load_verify_locations(cadata=neuronio_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=cacert_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = b"".join((cacert_der, neuronio_der))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# error cases
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object)
with self.assertRaisesRegex(ssl.SSLError, "no start line"):
ctx.load_verify_locations(cadata="broken")
with self.assertRaisesRegex(ssl.SSLError, "not enough data"):
ctx.load_verify_locations(cadata=b"broken")
def test_load_dh_params(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_dh_params(DHFILE)
if os.name != 'nt':
ctx.load_dh_params(BYTES_DHFILE)
self.assertRaises(TypeError, ctx.load_dh_params)
self.assertRaises(TypeError, ctx.load_dh_params, None)
with self.assertRaises(FileNotFoundError) as cm:
ctx.load_dh_params(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
@skip_if_broken_ubuntu_ssl
def test_session_stats(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's not much we can do to test that it acts as expected,
# so just check it doesn't crash or raise an exception.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_default_verify_paths()
@unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build")
def test_set_ecdh_curve(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ecdh_curve("prime256v1")
ctx.set_ecdh_curve(b"prime256v1")
self.assertRaises(TypeError, ctx.set_ecdh_curve)
self.assertRaises(TypeError, ctx.set_ecdh_curve, None)
self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo")
self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo")
@needs_sni
def test_sni_callback(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# set_servername_callback expects a callable, or None
self.assertRaises(TypeError, ctx.set_servername_callback)
self.assertRaises(TypeError, ctx.set_servername_callback, 4)
self.assertRaises(TypeError, ctx.set_servername_callback, "")
self.assertRaises(TypeError, ctx.set_servername_callback, ctx)
def dummycallback(sock, servername, ctx):
pass
ctx.set_servername_callback(None)
ctx.set_servername_callback(dummycallback)
@needs_sni
def test_sni_callback_refcycle(self):
# Reference cycles through the servername callback are detected
# and cleared.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
def dummycallback(sock, servername, ctx, cycle=ctx):
pass
ctx.set_servername_callback(dummycallback)
wr = weakref.ref(ctx)
del ctx, dummycallback
gc.collect()
self.assertIs(wr(), None)
def test_cert_store_stats(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_cert_chain(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 1})
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 1, 'crl': 0, 'x509': 2})
def test_get_ca_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.get_ca_certs(), [])
# CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.get_ca_certs(), [])
# but CAFILE_CACERT is a CA cert
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.get_ca_certs(),
[{'issuer': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'notAfter': asn1time('Mar 29 12:29:49 2033 GMT'),
'notBefore': asn1time('Mar 30 12:29:49 2003 GMT'),
'serialNumber': '00',
'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',),
'subject': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'version': 3}])
with open(CAFILE_CACERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
self.assertEqual(ctx.get_ca_certs(True), [der])
def test_load_default_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.SERVER_AUTH)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_default_certs, None)
self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH')
@unittest.skipIf(sys.platform == "win32", "not-Windows specific")
def test_load_default_certs_env(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
self.assertEqual(ctx.cert_store_stats(), {"crl": 0, "x509": 1, "x509_ca": 0})
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_load_default_certs_env_windows(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
stats = ctx.cert_store_stats()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
stats["x509"] += 1
self.assertEqual(ctx.cert_store_stats(), stats)
def test_create_default_context(self):
ctx = ssl.create_default_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
with open(SIGNING_CA) as f:
cadata = f.read()
ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH,
cadata=cadata)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
self.assertEqual(
ctx.options & getattr(ssl, "OP_SINGLE_DH_USE", 0),
getattr(ssl, "OP_SINGLE_DH_USE", 0),
)
self.assertEqual(
ctx.options & getattr(ssl, "OP_SINGLE_ECDH_USE", 0),
getattr(ssl, "OP_SINGLE_ECDH_USE", 0),
)
def test__create_stdlib_context(self):
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1,
cert_reqs=ssl.CERT_REQUIRED,
check_hostname=True)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
def test_check_hostname(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertFalse(ctx.check_hostname)
# Requires CERT_REQUIRED or CERT_OPTIONAL
with self.assertRaises(ValueError):
ctx.check_hostname = True
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertFalse(ctx.check_hostname)
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
# Cannot set CERT_NONE with check_hostname enabled
with self.assertRaises(ValueError):
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
class SSLErrorTests(unittest.TestCase):
def test_str(self):
# The str() of a SSLError doesn't include the errno
e = ssl.SSLError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
# Same for a subclass
e = ssl.SSLZeroReturnError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
def test_lib_reason(self):
# Test the library and reason attributes
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
self.assertEqual(cm.exception.library, 'PEM')
self.assertEqual(cm.exception.reason, 'NO_START_LINE')
s = str(cm.exception)
self.assertTrue(s.startswith("[PEM: NO_START_LINE] no start line"), s)
def test_subclass(self):
# Check that the appropriate SSLError subclass is raised
# (this only tests one of them)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with socket.socket() as s:
s.bind(("127.0.0.1", 0))
s.listen()
c = socket.socket()
c.connect(s.getsockname())
c.setblocking(False)
with ctx.wrap_socket(c, False, do_handshake_on_connect=False) as c:
with self.assertRaises(ssl.SSLWantReadError) as cm:
c.do_handshake()
s = str(cm.exception)
self.assertTrue(s.startswith("The operation did not complete (read)"), s)
# For compatibility
self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ)
class MemoryBIOTests(unittest.TestCase):
def test_read_write(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
self.assertEqual(bio.read(), b'')
bio.write(b'foo')
bio.write(b'bar')
self.assertEqual(bio.read(), b'foobar')
self.assertEqual(bio.read(), b'')
bio.write(b'baz')
self.assertEqual(bio.read(2), b'ba')
self.assertEqual(bio.read(1), b'z')
self.assertEqual(bio.read(1), b'')
def test_eof(self):
bio = ssl.MemoryBIO()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertFalse(bio.eof)
bio.write(b'foo')
self.assertFalse(bio.eof)
bio.write_eof()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(2), b'fo')
self.assertFalse(bio.eof)
self.assertEqual(bio.read(1), b'o')
self.assertTrue(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertTrue(bio.eof)
def test_pending(self):
bio = ssl.MemoryBIO()
self.assertEqual(bio.pending, 0)
bio.write(b'foo')
self.assertEqual(bio.pending, 3)
for i in range(3):
bio.read(1)
self.assertEqual(bio.pending, 3-i-1)
for i in range(3):
bio.write(b'x')
self.assertEqual(bio.pending, i+1)
bio.read()
self.assertEqual(bio.pending, 0)
def test_buffer_types(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
bio.write(bytearray(b'bar'))
self.assertEqual(bio.read(), b'bar')
bio.write(memoryview(b'baz'))
self.assertEqual(bio.read(), b'baz')
def test_error_types(self):
bio = ssl.MemoryBIO()
self.assertRaises(TypeError, bio.write, 'foo')
self.assertRaises(TypeError, bio.write, None)
self.assertRaises(TypeError, bio.write, True)
self.assertRaises(TypeError, bio.write, 1)
@unittest.skipUnless(_have_threads, "Needs threading module")
class SimpleBackgroundTests(unittest.TestCase):
"""Tests that connect to a simple server running in the background"""
def setUp(self):
server = ThreadedEchoServer(SIGNED_CERTFILE)
self.server_addr = (HOST, server.port)
server.__enter__()
self.addCleanup(server.__exit__, None, None, None)
def test_connect(self):
with ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
# this should succeed because we specify the root cert
with ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA) as s:
s.connect(self.server_addr)
self.assertTrue(s.getpeercert())
def test_connect_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA)
self.addCleanup(s.close)
self.assertEqual(0, s.connect_ex(self.server_addr))
self.assertTrue(s.getpeercert())
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.setblocking(False)
rc = s.connect_ex(self.server_addr)
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [], 5.0)
except ssl.SSLWantWriteError:
select.select([], [s], [], 5.0)
# SSL established
self.assertTrue(s.getpeercert())
def test_connect_with_context(self):
# Same as test_connect, but with a separately created context
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
# Same with a server hostname
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="dummy") as s:
s.connect(self.server_addr)
ctx.verify_mode = ssl.CERT_REQUIRED
# This should succeed because we specify the root cert
ctx.load_verify_locations(SIGNING_CA)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_with_context_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_capath(self):
# Verify server certificates using the `capath` argument
# NOTE: the subject hashing algorithm has been changed between
# OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
# contain both versions of each certificate (same content, different
# filename) for this test to be portable across OpenSSL releases.
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# Same with a bytes `capath` argument
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=BYTES_CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_cadata(self):
with open(SIGNING_CA) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=pem)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# same with DER
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=der)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
ss = ssl.wrap_socket(socket.socket(socket.AF_INET))
ss.connect(self.server_addr)
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
s = socket.socket(socket.AF_INET)
s.connect(self.server_addr)
s.setblocking(False)
s = ssl.wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
self.addCleanup(s.close)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [])
except ssl.SSLWantWriteError:
select.select([], [s], [])
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
_test_get_server_certificate(self, *self.server_addr, cert=SIGNING_CA)
def test_get_server_certificate_fail(self):
# Connection failure crashes ThreadedEchoServer, so run this in an
# independent test method
_test_get_server_certificate_fail(self, *self.server_addr)
def test_ciphers(self):
with ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL") as s:
s.connect(self.server_addr)
with ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT") as s:
s.connect(self.server_addr)
# Error checking can happen at instantiation or when connecting
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
with socket.socket(socket.AF_INET) as sock:
s = ssl.wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(self.server_addr)
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
self.assertEqual(ctx.get_ca_certs(), [])
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
self.assertEqual(len(ctx.get_ca_certs()), 1)
@needs_sni
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx2 = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
s = socket.socket(socket.AF_INET)
with ctx1.wrap_socket(s) as ss:
ss.connect(self.server_addr)
self.assertIs(ss.context, ctx1)
self.assertIs(ss._sslobj.context, ctx1)
ss.context = ctx2
self.assertIs(ss.context, ctx2)
self.assertIs(ss._sslobj.context, ctx2)
def ssl_io_loop(self, sock, incoming, outgoing, func, *args, **kwargs):
# A simple IO loop. Call func(*args) depending on the error we get
# (WANT_READ or WANT_WRITE) move data between the socket and the BIOs.
timeout = kwargs.get('timeout', 10)
count = 0
while True:
errno = None
count += 1
try:
ret = func(*args)
except ssl.SSLError as e:
if e.errno not in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
raise
errno = e.errno
# Get any data from the outgoing BIO irrespective of any error, and
# send it to the socket.
buf = outgoing.read()
sock.sendall(buf)
# If there's no error, we're done. For WANT_READ, we need to get
# data from the socket and put it in the incoming BIO.
if errno is None:
break
elif errno == ssl.SSL_ERROR_WANT_READ:
buf = sock.recv(32768)
if buf:
incoming.write(buf)
else:
incoming.write_eof()
if support.verbose:
sys.stdout.write("Needed %d calls to complete %s().\n"
% (count, func.__name__))
return ret
def test_bio_handshake(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(SIGNING_CA)
ctx.check_hostname = True
sslobj = ctx.wrap_bio(incoming, outgoing, False, 'localhost')
self.assertIs(sslobj._sslobj.owner, sslobj)
self.assertIsNone(sslobj.cipher())
self.assertIsNone(sslobj.shared_ciphers())
self.assertRaises(ValueError, sslobj.getpeercert)
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertIsNone(sslobj.get_channel_binding('tls-unique'))
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
self.assertTrue(sslobj.cipher())
self.assertIsNone(sslobj.shared_ciphers())
self.assertTrue(sslobj.getpeercert())
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertTrue(sslobj.get_channel_binding('tls-unique'))
try:
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
except ssl.SSLSyscallError:
# If the server shuts down the TCP connection without sending a
# secure shutdown message, this is reported as SSL_ERROR_SYSCALL
pass
self.assertRaises(ssl.SSLError, sslobj.write, b'foo')
def test_bio_read_write_data(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_NONE
sslobj = ctx.wrap_bio(incoming, outgoing, False)
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
req = b'FOO\n'
self.ssl_io_loop(sock, incoming, outgoing, sslobj.write, req)
buf = self.ssl_io_loop(sock, incoming, outgoing, sslobj.read, 1024)
self.assertEqual(buf, b'foo\n')
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
class NetworkedTests(unittest.TestCase):
def test_timeout_connect_ex(self):
# Issue #12065: on a timeout, connect_ex() should return the original
# errno (mimicking the behaviour of non-SSL sockets).
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.settimeout(0.0000001)
rc = s.connect_ex((REMOTE_HOST, 443))
if rc == 0:
self.skipTest("REMOTE_HOST responded too quickly")
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
@unittest.skipUnless(support.IPV6_ENABLED, 'Needs IPv6')
def test_get_server_certificate_ipv6(self):
with support.transient_internet('ipv6.google.com'):
_test_get_server_certificate(self, 'ipv6.google.com', 443)
_test_get_server_certificate_fail(self, 'ipv6.google.com', 443)
def test_algorithms(self):
# Issue #8484: all algorithms should be available when verifying a
# certificate.
# SHA256 was added in OpenSSL 0.9.8
if ssl.OPENSSL_VERSION_INFO < (0, 9, 8, 0, 15):
self.skipTest("SHA256 not available on %r" % ssl.OPENSSL_VERSION)
# sha256.tbs-internet.com needs SNI to use the correct certificate
if not ssl.HAS_SNI:
self.skipTest("SNI needed for this test")
# https://sha2.hboeck.de/ was used until 2011-01-08 (no route to host)
remote = ("sha256.tbs-internet.com", 443)
sha256_cert = os.path.join(os.path.dirname(__file__), "sha256.pem")
with support.transient_internet("sha256.tbs-internet.com"):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(sha256_cert)
s = ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="sha256.tbs-internet.com")
try:
s.connect(remote)
if support.verbose:
sys.stdout.write("\nCipher with %r is %r\n" %
(remote, s.cipher()))
sys.stdout.write("Certificate is:\n%s\n" %
pprint.pformat(s.getpeercert()))
finally:
s.close()
def _test_get_server_certificate(test, host, port, cert=None):
pem = ssl.get_server_certificate((host, port))
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
pem = ssl.get_server_certificate((host, port), ca_certs=cert)
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem))
def _test_get_server_certificate_fail(test, host, port):
try:
pem = ssl.get_server_certificate((host, port), ca_certs=CERTFILE)
except ssl.SSLError as x:
#should fail
if support.verbose:
sys.stdout.write("%s\n" % x)
else:
test.fail("Got server certificate %s for %s:%s!" % (pem, host, port))
if _have_threads:
from test.ssl_servers import make_https_server
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(1)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
self.server.selected_npn_protocols.append(self.sslconn.selected_npn_protocol())
self.server.selected_alpn_protocols.append(self.sslconn.selected_alpn_protocol())
except (ssl.SSLError, ConnectionResetError) as e:
# We treat ConnectionResetError as though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
self.server.conn_errors.append(e)
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.server.stop()
self.close()
return False
else:
self.server.shared_ciphers.append(self.sslconn.shared_ciphers())
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
sys.stdout.write(" server: selected protocol is now "
+ str(self.sslconn.selected_npn_protocol()) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
try:
self.sock = self.sslconn.unwrap()
except OSError:
# Many tests shut the TCP connection down
# without an SSL shutdown. This causes
# unwrap() to raise OSError with errno=0!
pass
else:
self.sslconn = None
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
elif stripped == b'CB tls-unique':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except OSError:
if self.server.chatty:
handle_error("Test server failure:\n")
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
npn_protocols=None, alpn_protocols=None,
ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLSv1)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if npn_protocols:
self.context.set_npn_protocols(npn_protocols)
if alpn_protocols:
self.context.set_alpn_protocols(alpn_protocols)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = support.bind_port(self.sock)
self.flag = None
self.active = False
self.selected_npn_protocols = []
self.selected_alpn_protocols = []
self.shared_ciphers = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(0.05)
self.sock.listen()
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except socket.timeout:
pass
except KeyboardInterrupt:
self.stop()
self.sock.close()
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
# this one's based on asyncore.dispatcher
class EchoServer (asyncore.dispatcher):
class ConnectionHandler (asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = ssl.wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except (ssl.SSLWantReadError, ssl.SSLWantWriteError):
return
except ssl.SSLEOFError:
return self.handle_close()
except ssl.SSLError:
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accepted(self, sock_obj, addr):
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
def start (self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=sni_name) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_alpn_protocol': s.selected_alpn_protocol(),
'client_npn_protocol': s.selected_npn_protocol(),
'version': s.version(),
})
s.close()
stats['server_alpn_protocols'] = server.selected_alpn_protocols
stats['server_npn_protocols'] = server.selected_npn_protocols
stats['server_shared_ciphers'] = server.shared_ciphers
return stats
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
"""
Try to SSL-connect using *client_protocol* to *server_protocol*.
If *expect_success* is true, assert that the connection succeeds,
if it's false, assert that the connection fails.
Also, if *expect_success* is a string, assert that it is the protocol
version actually used by the connection.
"""
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
client_context = ssl.SSLContext(client_protocol)
client_context.options |= client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options |= server_options
# NOTE: we must enable "ALL" ciphers on the client, otherwise an
# SSLv23 client will send an SSLv3 hello (rather than SSLv2)
# starting from OpenSSL 1.0.0 (see issue #8322).
if client_context.protocol == ssl.PROTOCOL_SSLv23:
client_context.set_ciphers("ALL")
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
ctx.load_cert_chain(CERTFILE)
ctx.load_verify_locations(CERTFILE)
try:
stats = server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except OSError as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
elif (expect_success is not True
and expect_success != stats['version']):
raise AssertionError("version mismatch: expected %r, got %r"
% (expect_success, stats['version']))
class ThreadedTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
for protocol in PROTOCOLS:
with self.subTest(protocol=ssl._PROTOCOL_NAMES[protocol]):
context = ssl.SSLContext(protocol)
context.load_cert_chain(CERTFILE)
server_params_test(context, context,
chatty=True, connectionchatty=True)
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket(),
do_handshake_on_connect=False)
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
# done.
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
s.close()
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(SIGNING_CA)
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(context.verify_flags, ssl.VERIFY_DEFAULT | tf)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails
context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
with self.assertRaisesRegex(ssl.SSLError,
"certificate verify failed"):
s.connect((HOST, server.port))
# now load a CRL file. The CRL file is signed by the CA.
context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="localhost") as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="invalid") as s:
with self.assertRaisesRegex(ssl.CertificateError,
"hostname 'invalid' doesn't match 'localhost'"):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with socket.socket() as s:
with self.assertRaisesRegex(ValueError,
"check_hostname requires server_hostname"):
context.wrap_socket(s)
def test_wrong_cert(self):
"""Connecting when the server rejects the client's certificate
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with a wrong client certificate fails.
"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
"wrongcert.pem")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_REQUIRED,
cacerts=CERTFILE, chatty=False,
connectionchatty=False)
with server, \
socket.socket() as sock, \
ssl.wrap_socket(sock,
certfile=certfile,
ssl_version=ssl.PROTOCOL_TLSv1) as s:
try:
# Expect either an SSL error about the server rejecting
# the connection, or a low-level connection reset (which
# sometimes happens on Windows)
s.connect((HOST, server.port))
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = support.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen()
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with socket.socket() as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = ssl.wrap_socket(c)
except OSError:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv2'),
"OpenSSL is compiled without SSLv2 support")
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv2)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
def test_protocol_sslv23(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv2, True)
except OSError as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1')
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
# Server with specific SSL options
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv3'),
"OpenSSL is compiled without SSLv3 support")
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3')
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23,
False, client_options=ssl.OP_NO_SSLv2)
@skip_if_broken_ubuntu_ssl
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1')
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_1"),
"TLS version 1.1 not supported.")
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_1, False)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_2"),
"TLS version 1.2 not supported.")
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2',
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2')
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(1)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using a SocketServer to create and manage SSL connections."""
server = make_https_server(self, certfile=CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://localhost:%d/%s' % (
server.port, os.path.split(CERTFILE)[1])
context = ssl.create_default_context(cafile=CERTFILE)
f = urllib.request.urlopen(url, context=context)
try:
dlen = f.info().get("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
indata = "TEST MESSAGE of mixed case\n"
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = ssl.wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, expect success?, *args, return value func)
send_methods = [
('send', s.send, True, [], len),
('sendto', s.sendto, False, ["some.address"], len),
('sendall', s.sendall, True, [], lambda x: None),
]
# (name, method, whether to expect success, *args)
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = "PREFIX_"
for (meth_name, send_meth, expect_success, args,
ret_val_meth) in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
ret = send_meth(indata, *args)
msg = "sending with {}".format(meth_name)
self.assertEqual(ret, ret_val_meth(indata), msg=msg)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
# read(-1, buffer) is supported, even though read(-1) is not
data = b"data"
s.send(data)
buffer = bytearray(len(data))
self.assertEqual(s.read(-1, buffer), len(data))
self.assertEqual(buffer, data)
# Make sure sendmsg et al are disallowed to avoid
# inadvertent disclosure of data and/or corruption
# of the encrypted data stream
self.assertRaises(NotImplementedError, s.sendmsg, [b"data"])
self.assertRaises(NotImplementedError, s.recvmsg, 100)
self.assertRaises(NotImplementedError,
s.recvmsg_into, bytearray(100))
s.write(b"over\n")
self.assertRaises(ValueError, s.recv, -1)
self.assertRaises(ValueError, s.read, -1)
s.close()
def test_recv_zero(self):
server = ThreadedEchoServer(CERTFILE)
server.__enter__()
self.addCleanup(server.__exit__, None, None)
s = socket.create_connection((HOST, server.port))
self.addCleanup(s.close)
s = ssl.wrap_socket(s, suppress_ragged_eofs=False)
self.addCleanup(s.close)
# recv/read(0) should return no data
s.send(b"data")
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.read(0), b"")
self.assertEqual(s.read(), b"data")
# Should not block if the other end sends no data
s.setblocking(False)
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.recv_into(bytearray()), 0)
def test_nonblocking_send(self):
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
s.setblocking(False)
# If we keep sending data, at some point the buffers
# will be full and the call will block
buf = bytearray(8192)
def fill_buffer():
while True:
s.send(buf)
self.assertRaises((ssl.SSLWantWriteError,
ssl.SSLWantReadError), fill_buffer)
# Now read all the output and discard it
s.setblocking(True)
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen()
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
ssl.wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = ssl.wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
server = context.wrap_socket(server, server_side=True)
evt = threading.Event()
remote = None
peer = None
def serve():
nonlocal remote, peer
server.listen()
# Block on the accept and wait on the connection to close.
evt.set()
remote, peer = server.accept()
remote.recv(1)
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = context.wrap_socket(socket.socket())
client.connect((host, port))
client_addr = client.getsockname()
client.close()
t.join()
remote.close()
server.close()
# Sanity checks.
self.assertIsInstance(remote, ssl.SSLSocket)
self.assertEqual(peer, client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_default_ciphers(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
try:
# Force a set of weak ciphers on our client context
context.set_ciphers("DES")
except ssl.SSLError:
self.skipTest("no DES cipher available")
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_SSLv23,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
with self.assertRaises(OSError):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", str(server.conn_errors[0]))
def test_version_basic(self):
"""
Basic tests for SSLSocket.version().
More tests are done in the test_protocol_*() methods.
"""
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
self.assertIs(s.version(), None)
s.connect((HOST, server.port))
self.assertEqual(s.version(), "TLSv1")
self.assertIs(s.version(), None)
@unittest.skipUnless(ssl.HAS_ECDH, "test requires ECDH-enabled OpenSSL")
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.load_cert_chain(CERTFILE)
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
if ssl.OPENSSL_VERSION_INFO < (1, 0, 0):
context.set_ciphers("ECCdraft:ECDH")
with ThreadedEchoServer(context=context) as server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got channel binding data: {0!r}\n"
.format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
s.close()
# now, again
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got another channel binding data: {0!r}\n"
.format(new_cb_data))
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
s.close()
def test_compression(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['compression'], None)
def test_dh_params(self):
# Check we can get a connection with ephemeral Diffie-Hellman
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.load_dh_params(DHFILE)
context.set_ciphers("kEDH")
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts:
self.fail("Non-DH cipher: " + cipher[0])
def test_selected_alpn_protocol(self):
# selected_alpn_protocol() is None unless ALPN is used.
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support required")
def test_selected_alpn_protocol_if_server_uses_alpn(self):
# selected_alpn_protocol() is None unless ALPN is used by the client.
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_verify_locations(CERTFILE)
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_alpn_protocols(['foo', 'bar'])
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support needed for this test")
def test_alpn_protocols(self):
server_protocols = ['foo', 'bar', 'milkshake']
protocol_tests = [
(['foo', 'bar'], 'foo'),
(['bar', 'foo'], 'foo'),
(['milkshake'], 'milkshake'),
(['http/3.0', 'http/4.0'], None)
]
for client_protocols, expected in protocol_tests:
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_alpn_protocols(server_protocols)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_cert_chain(CERTFILE)
client_context.set_alpn_protocols(client_protocols)
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_alpn_protocol']
self.assertEqual(client_result, expected, msg % (client_result, "client"))
server_result = stats['server_alpn_protocols'][-1] \
if len(stats['server_alpn_protocols']) else 'nothing'
self.assertEqual(server_result, expected, msg % (server_result, "server"))
def test_selected_npn_protocol(self):
# selected_npn_protocol() is None unless NPN is used
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_npn_protocol'], None)
@unittest.skipUnless(ssl.HAS_NPN, "NPN support needed for this test")
def test_npn_protocols(self):
server_protocols = ['http/1.1', 'spdy/2']
protocol_tests = [
(['http/1.1', 'spdy/2'], 'http/1.1'),
(['spdy/2', 'http/1.1'], 'http/1.1'),
(['spdy/2', 'test'], 'spdy/2'),
(['abc', 'def'], 'abc')
]
for client_protocols, expected in protocol_tests:
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_npn_protocols(server_protocols)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_cert_chain(CERTFILE)
client_context.set_npn_protocols(client_protocols)
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_npn_protocol']
self.assertEqual(client_result, expected, msg % (client_result, "client"))
server_result = stats['server_npn_protocols'][-1] \
if len(stats['server_npn_protocols']) else 'nothing'
self.assertEqual(server_result, expected, msg % (server_result, "server"))
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
@needs_sni
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, 'localhost')
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, 'localhost')
self.assertEqual(calls, [])
@needs_sni
def test_sni_callback_alert(self):
# Returning a TLS alert is reflected to the connecting client
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
@needs_sni
def test_sni_callback_raising(self):
# Raising fails the connection with a TLS handshake failure alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1/0
server_context.set_servername_callback(cb_raising)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'SSLV3_ALERT_HANDSHAKE_FAILURE')
self.assertIn("ZeroDivisionError", stderr.getvalue())
@needs_sni
def test_sni_callback_wrong_return_type(self):
# Returning the wrong return type terminates the TLS connection
# with an internal error alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertIn("TypeError", stderr.getvalue())
def test_shared_ciphers(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_verify_locations(SIGNING_CA)
client_context.set_ciphers("RC4")
server_context.set_ciphers("AES:RC4")
stats = server_params_test(client_context, server_context)
ciphers = stats['server_shared_ciphers'][0]
self.assertGreater(len(ciphers), 0)
for name, tls_version, bits in ciphers:
self.assertIn("RC4", name.split("-"))
def test_read_write_after_close_raises_valuerror(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket())
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_sendfile(self):
TEST_DATA = b"x" * 512
with open(support.TESTFN, 'wb') as f:
f.write(TEST_DATA)
self.addCleanup(support.unlink, support.TESTFN)
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
with open(support.TESTFN, 'rb') as file:
s.sendfile(file)
self.assertEqual(s.recv(1024), TEST_DATA)
def test_main(verbose=False):
if support.verbose:
import warnings
plats = {
'Linux': platform.linux_distribution,
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore',
'dist\(\) and linux_distribution\(\) '
'functions are deprecated .*',
PendingDeprecationWarning,
)
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
print(" OP_ALL = 0x%8x" % ssl.OP_ALL)
try:
print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1)
except AttributeError:
pass
for filename in [
CERTFILE, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
tests = [
ContextTests, BasicSocketTests, SSLErrorTests, MemoryBIOTests,
SimpleBackgroundTests,
]
if support.is_resource_enabled('network'):
tests.append(NetworkedTests)
if _have_threads:
thread_info = support.threading_setup()
if thread_info:
tests.append(ThreadedTests)
try:
support.run_unittest(*tests)
finally:
if _have_threads:
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
benchmark.py
|
import threading
import time
from datetime import datetime
import rpcgrid as rpcg
from rpcgrid.providers.socket import SocketProvider
def create_server(p=None):
@rpcg.register
def sum(x, y):
return x + y
@rpcg.register
def sleep(x):
print('start sleep:', x, datetime.now())
time.sleep(x)
print('stop', x, datetime.now())
return x
return rpcg.create(p)
def benchmark(rpcserver, rpcclient):
print('Benchmark start server')
threading.Thread(target=rpcserver.run, daemon=False).start()
time.sleep(0.1)
print('Call sum:', rpcclient.sum(5, 6).wait())
start = datetime.now()
n = 5000
for i in range(n):
c = rpcclient.sum(i, i).wait()
if c != 2 * i:
print('Error:', c, ' true:', 2 * i)
t = datetime.now() - start
print(
'profile time:', n, t, round(n / (t.microseconds / 1000), 2), 'it/ms'
)
print('Simple batch operation:')
tsk = []
start = datetime.now()
for i in range(n):
tsk.append(rpcclient.sum(i, i))
t = datetime.now() - start
print(
'task created time:',
n,
t,
round(n / (t.microseconds / 1000), 2),
'it/ms',
)
for i in range(n):
if tsk[i].wait() != 2 * i:
print('Error:', c, ' true:', 2 * i)
t = datetime.now() - start
print(
'profile time:', n, t, round(n / (t.microseconds / 1000), 2), 'it/ms'
)
rpcclient.close()
rpcserver.close()
if __name__ == '__main__':
# Create RPC server
socket_rpcserver = create_server(SocketProvider())
# Open server provider indirect
socket_rpcclient = rpcg.open(SocketProvider('localhost:6300'))
print('SOCKET TEST')
benchmark(socket_rpcserver, socket_rpcclient)
rpcserver = create_server()
rpcclient = rpcg.open()
# Cross connection for localprovider
rpcserver.provider.set_remote_provider(rpcclient)
rpcclient.provider.set_remote_provider(rpcserver)
print('LOCAL TEST')
benchmark(rpcserver, rpcclient)
print('Done')
time.sleep(1)
|
remove_duplicate_ips.py
|
import socket, sys
from queue import Queue
from threading import Thread
q = Queue(maxsize=0)
file = open(sys.argv[1], 'r')
urls = [x.strip() for x in file.readlines()]
results = {}
for i, url in enumerate(urls):
q.put(url)
def get_host(q, result):
while not q.empty():
work = q.get()
try:
result[work] = socket.gethostbyname(work)
except Exception as e:
#if 'idna' not in e:
# print(work)
print(work)
q.task_done()
return True
num_threads = min(10, len(urls))
for i in range(num_threads):
worker = Thread(target=get_host, args=(q,results), daemon=True)
worker.start()
q.join()
seen = []
for key, value in results.items():
if value not in seen:
print(key)
seen.append(value)
|
cef_application.py
|
import pywintypes
import ctypes
import logging
import os
import threading
import tkinter as tk
import win32api
import win32con
import win32gui
import win32gui_struct
from cefpython3 import cefpython as cef
log = logging.getLogger()
# log.setLevel(logging.ERROR)
class SysTrayIcon (object):
'''SysTrayIcon类用于显示任务栏图标'''
QUIT = 'QUIT'
SPECIAL_ACTIONS = [QUIT]
FIRST_ID = 5320
def __init__(s, icon, hover_text, menu_options, on_quit, tk_window=None, default_menu_index=None, window_class_name=None):
'''
icon 需要显示的图标文件路径
hover_text 鼠标停留在图标上方时显示的文字
menu_options 右键菜单,格式: (('a', None, callback), ('b', None, (('b1', None, callback),)))
on_quit 传递退出函数,在执行退出时一并运行
tk_window 传递Tk窗口,s.root,用于单击图标显示窗口
default_menu_index 不显示的右键菜单序号
window_class_name 窗口类名
'''
s.icon = icon
s.hover_text = hover_text
s.on_quit = on_quit
s.root = tk_window
menu_options = menu_options + (('退出', None, s.QUIT),)
s._next_action_id = s.FIRST_ID
s.menu_actions_by_id = set()
s.menu_options = s._add_ids_to_menu_options(list(menu_options))
s.menu_actions_by_id = dict(s.menu_actions_by_id)
del s._next_action_id
s.default_menu_index = (default_menu_index or 0)
s.window_class_name = window_class_name or "SysTrayIconPy"
message_map = {win32gui.RegisterWindowMessage("TaskbarCreated"): s.restart,
win32con.WM_DESTROY: s.destroy,
win32con.WM_COMMAND: s.command,
win32con.WM_USER+20: s.notify, }
# 注册窗口类。
wc = win32gui.WNDCLASS()
wc.hInstance = win32gui.GetModuleHandle(None)
wc.lpszClassName = s.window_class_name
wc.style = win32con.CS_VREDRAW | win32con.CS_HREDRAW
wc.hCursor = win32gui.LoadCursor(0, win32con.IDC_ARROW)
wc.hbrBackground = win32con.COLOR_WINDOW
wc.lpfnWndProc = message_map # 也可以指定wndproc.
s.classAtom = win32gui.RegisterClass(wc)
def activation(s):
'''激活任务栏图标,不用每次都重新创建新的托盘图标'''
hinst = win32gui.GetModuleHandle(None) # 创建窗口。
style = win32con.WS_OVERLAPPED | win32con.WS_SYSMENU
s.hwnd = win32gui.CreateWindow(s.classAtom,
s.window_class_name,
style,
0, 0,
win32con.CW_USEDEFAULT,
win32con.CW_USEDEFAULT,
0, 0, hinst, None)
win32gui.UpdateWindow(s.hwnd)
s.notify_id = None
# s.refresh(title='软件已后台!', msg='点击重新打开', time=500)
s.refresh(title='软件已后台!', msg='', time=500)
win32gui.PumpMessages()
def refresh(s, title='', msg='111', time=500):
'''刷新托盘图标
title 标题
msg 内容,为空的话就不显示提示
time 提示显示时间'''
hinst = win32gui.GetModuleHandle(None)
if os.path.isfile(s.icon):
icon_flags = win32con.LR_LOADFROMFILE | win32con.LR_DEFAULTSIZE
hicon = win32gui.LoadImage(hinst, s.icon, win32con.IMAGE_ICON,
0, 0, icon_flags)
else: # 找不到图标文件 - 使用默认值
hicon = win32gui.LoadIcon(0, win32con.IDI_APPLICATION)
if s.notify_id:
message = win32gui.NIM_MODIFY
else:
message = win32gui.NIM_ADD
s.notify_id = (s.hwnd, 0, # 句柄、托盘图标ID
# 托盘图标可以使用的功能的标识
win32gui.NIF_ICON | win32gui.NIF_MESSAGE | win32gui.NIF_TIP | win32gui.NIF_INFO,
win32con.WM_USER + 20, hicon, s.hover_text, # 回调消息ID、托盘图标句柄、图标字符串
msg, time, title, # 提示内容、提示显示时间、提示标题
win32gui.NIIF_INFO # 提示用到的图标
)
win32gui.Shell_NotifyIcon(message, s.notify_id)
def show_menu(s):
'''显示右键菜单'''
menu = win32gui.CreatePopupMenu()
s.create_menu(menu, s.menu_options)
pos = win32gui.GetCursorPos()
win32gui.SetForegroundWindow(s.hwnd)
win32gui.TrackPopupMenu(menu,
win32con.TPM_LEFTALIGN,
pos[0],
pos[1],
0,
s.hwnd,
None)
win32gui.PostMessage(s.hwnd, win32con.WM_NULL, 0, 0)
def _add_ids_to_menu_options(s, menu_options):
result = []
for menu_option in menu_options:
option_text, option_icon, option_action = menu_option
if callable(option_action) or option_action in s.SPECIAL_ACTIONS:
s.menu_actions_by_id.add((s._next_action_id, option_action))
result.append(menu_option + (s._next_action_id,))
else:
result.append((option_text,
option_icon,
s._add_ids_to_menu_options(option_action),
s._next_action_id))
s._next_action_id += 1
return result
def restart(s, hwnd, msg, wparam, lparam):
s.refresh()
def destroy(s, hwnd=None, msg=None, wparam=None, lparam=None, exit=1):
nid = (s.hwnd, 0)
if exit and s.on_quit:
win32gui.Shell_NotifyIcon(win32gui.NIM_DELETE, nid)
win32gui.PostQuitMessage(0) # 终止应用程序。
s.on_quit(s) # 需要传递自身过去时用 s.on_quit(s)
else:
s.root.deiconify() # 显示tk窗口
def notify(s, hwnd, msg, wparam, lparam):
'''鼠标事件'''
if lparam == win32con.WM_LBUTTONDBLCLK: # 双击左键
pass
elif lparam == win32con.WM_RBUTTONUP: # 右键弹起
s.show_menu()
elif lparam == win32con.WM_LBUTTONUP: # 左键弹起
s.destroy(exit=0)
return True
"""
可能的鼠标事件:
WM_MOUSEMOVE #光标经过图标
WM_LBUTTONDOWN #左键按下
WM_LBUTTONUP #左键弹起
WM_LBUTTONDBLCLK #双击左键
WM_RBUTTONDOWN #右键按下
WM_RBUTTONUP #右键弹起
WM_RBUTTONDBLCLK #双击右键
WM_MBUTTONDOWN #滚轮按下
WM_MBUTTONUP #滚轮弹起
WM_MBUTTONDBLCLK #双击滚轮
"""
def create_menu(s, menu, menu_options):
for option_text, option_icon, option_action, option_id in menu_options[::-1]:
if option_icon:
option_icon = s.prep_menu_icon(option_icon)
if option_id in s.menu_actions_by_id:
item, extras = win32gui_struct.PackMENUITEMINFO(text=option_text,
hbmpItem=option_icon,
wID=option_id)
win32gui.InsertMenuItem(menu, 0, 1, item)
else:
submenu = win32gui.CreatePopupMenu()
s.create_menu(submenu, option_action)
item, extras = win32gui_struct.PackMENUITEMINFO(text=option_text,
hbmpItem=option_icon,
hSubMenu=submenu)
win32gui.InsertMenuItem(menu, 0, 1, item)
def prep_menu_icon(s, icon):
# 加载图标。
ico_x = win32api.GetSystemMetrics(win32con.SM_CXSMICON)
ico_y = win32api.GetSystemMetrics(win32con.SM_CYSMICON)
hicon = win32gui.LoadImage(
0, icon, win32con.IMAGE_ICON, ico_x, ico_y, win32con.LR_LOADFROMFILE)
hdcBitmap = win32gui.CreateCompatibleDC(0)
hdcScreen = win32gui.GetDC(0)
hbm = win32gui.CreateCompatibleBitmap(hdcScreen, ico_x, ico_y)
hbmOld = win32gui.SelectObject(hdcBitmap, hbm)
brush = win32gui.GetSysColorBrush(win32con.COLOR_MENU)
win32gui.FillRect(hdcBitmap, (0, 0, 16, 16), brush)
win32gui.DrawIconEx(hdcBitmap, 0, 0, hicon, ico_x,
ico_y, 0, 0, win32con.DI_NORMAL)
win32gui.SelectObject(hdcBitmap, hbmOld)
win32gui.DeleteDC(hdcBitmap)
return hbm
def command(s, hwnd, msg, wparam, lparam):
id = win32gui.LOWORD(wparam)
s.execute_menu_option(id)
def execute_menu_option(s, id):
print(id)
menu_action = s.menu_actions_by_id[id]
if menu_action == s.QUIT:
win32gui.DestroyWindow(s.hwnd)
else:
menu_action(s)
class Application(tk.Frame):
navigation_bar = None
icon = os.path.join(os.path.abspath('.'), 'static/image/favicon.ico')
def __init__(self, url='http://127.0.0.1:5000/'):
self.root = tk.Tk()
self.root.iconbitmap(self.icon)
self.root.geometry("1366x640")
flask_thread = threading.Thread(
target=self.create_SysTrayIcon, args=())
flask_thread.setDaemon(True)
flask_thread.start()
self.root.protocol('WM_DELETE_WINDOW', self.root.withdraw)
tk.Frame.__init__(self, self.root)
self.master.title("Auto Stock")
self.bind("<Configure>", self.on_configure)
self.browser_frame = BrowserFrame(
self, self.navigation_bar, url=url) # 浏览器框架
self.browser_frame.grid(
row=1, column=0, sticky=(tk.N + tk.S + tk.E + tk.W))
tk.Grid.rowconfigure(self, 1, weight=1)
tk.Grid.columnconfigure(self, 0, weight=1)
self.pack(fill=tk.BOTH, expand=tk.YES) # 包装 Application
def switch_icon(s, _sysTrayIcon, icon='favicon.ico'):
# 点击右键菜单项目会传递SysTrayIcon自身给引用的函数,所以这里的_sysTrayIcon = s.sysTrayIcon
# 只是一个改图标的例子,不需要的可以删除此函数
_sysTrayIcon.icon = icon
_sysTrayIcon.refresh()
# 气泡提示的例子
s.show_msg(title='图标更换', msg='图标更换成功!', time=500)
def show_msg(s, title='标题', msg='内容', time=500):
s.SysTrayIcon.refresh(title=title, msg=msg, time=time)
pass
def create_SysTrayIcon(s, hover_text="Auto Stock"):
'''隐藏窗口至托盘区,调用SysTrayIcon的重要函数'''
# 托盘图标右键菜单, 格式: ('name', None, callback),下面也是二级菜单的例子
# 24行有自动添加‘退出’,不需要的可删除
menu_options = (('一级 菜单', None, s.switch_icon),
('二级 菜单', None, (('更改 图标', None, s.switch_icon), )))
menu_options = ()
icon = os.path.join(os.path.abspath('.'), s.icon)
s.SysTrayIcon = SysTrayIcon(
icon, # 图标
hover_text, # 光标停留显示文字
menu_options, # 右键菜单
on_quit=s.exit, # 退出调用
tk_window=s.root, # Tk窗口
)
s.SysTrayIcon.activation()
def exit(s, _sysTrayIcon=None):
print('quit...')
s.root.quit()
def startup(self):
cef.Initialize()
self.root.mainloop()
cef.Shutdown()
print('Application ended')
os._exit(0)
def on_configure(self, event):
if self.browser_frame:
width = event.width
height = event.height
if self.navigation_bar:
height = height - self.navigation_bar.winfo_height()
self.browser_frame.on_Application_configure(width, height)
class BrowserFrame(tk.Frame):
closing = False
browser = None
def __init__(self, master, navigation_bar=None, url='http://127.0.0.1:5000/'):
self.navigation_bar = navigation_bar
tk.Frame.__init__(self, master)
self.bind("<Configure>", self.on_configure)
self.url = url
self.master = master
def PyAlert(s, msg):
print('calling PyAlert()')
s.master.SysTrayIcon.refresh(title='Algo Trading', msg=msg, time=500)
# hwnd = pywintypes.HANDLE(int(s.master.root.frame(), 16))
# # win32gui.MessageBox(hwnd, msg,
# # "PyAlert()", win32con.MB_ICONQUESTION)
# message = win32gui.NIM_ADD
# ico_x = win32api.GetSystemMetrics(win32con.SM_CXSMICON)
# ico_y = win32api.GetSystemMetrics(win32con.SM_CYSMICON)
# hicon = win32gui.LoadImage(
# 0, os.path.join(os.path.abspath('.'), 'favicon.ico'),
# win32con.IMAGE_ICON,
# ico_x,
# ico_y,
# win32con.LR_LOADFROMFILE)
# s.notify_id = (hwnd, 0, # 句柄、托盘图标ID
# # 托盘图标可以使用的功能的标识
# win32gui.NIF_ICON | win32gui.NIF_MESSAGE | win32gui.NIF_TIP | win32gui.NIF_INFO,
# win32con.WM_USER + 20, hicon, 'Auto Stock', # 回调消息ID、托盘图标句柄、图标字符串
# msg, 500, 'Algo Trading', # 提示内容、提示显示时间、提示标题
# win32gui.NIIF_INFO # 提示用到的图标
# )
# win32gui.Shell_NotifyIcon(message, s.notify_id)
def embed_browser(self):
window_info = cef.WindowInfo()
rect = [0, 0, self.winfo_width(), self.winfo_height()]
window_info.SetAsChild(self.get_window_handle(), rect)
self.browser = cef.CreateBrowserSync(window_info, url=self.url)
bindings = cef.JavascriptBindings(
bindToFrames=True, bindToPopups=True)
bindings.SetFunction("alert", self.PyAlert)
self.browser.SetJavascriptBindings(bindings)
self.message_loop_work()
def get_window_handle(self): # 获取窗口句柄
if self.winfo_id() > 0:
return self.winfo_id()
def message_loop_work(self): # 消息循环工作
cef.MessageLoopWork()
self.after(10, self.message_loop_work)
def on_configure(self, _): # 判断是否有 cef 对象
if not self.browser:
self.embed_browser()
def on_Application_configure(self, width, height): # cef 窗口大小
if self.browser:
ctypes.windll.user32.SetWindowPos(self.browser.GetWindowHandle(),
0, 0, 0, width, height, 0x0002)
if __name__ == '__main__':
Application(url='chrome://version/').startup()
|
server.py
|
import random
import os
import socket
from threading import Thread
colors_cells = [
(80, 252, 54), (36, 244, 255), (243, 31, 46), (4, 39, 243), (254, 6, 178), (255, 211, 7), (216, 6, 254),
(145, 255, 7), (7, 255, 182), (255, 6, 86), (147, 7, 255)
]
dots = {j: {'x': random.randint(20,1980), 'y': random.randint(20, 1980),
'color': colors_cells[random.randint(0, len(colors_cells)-1)]} for j in range(2000)}
all_users = {}
def upd_eaten(dots, all_users):
eaten_dots_ids = []
for i, dot in dots.items():
for name, user in all_users.items():
if ((dot['x'] - user['x'])**2 + (dot['y'] - user['y'])**2)**0.5 <= user['mass']/2:
all_users[name]['mass'] += 0.5
eaten_dots_ids.append(i)
for dot in eaten_dots_ids:
del dots[dot]
return eaten_dots_ids
def on_new_client(clientsocket, addr):
while True:
msg = clientsocket.recv(1024)
#print(addr, ' >> ', msg)
if msg == b'close':
break
if msg == b'spawn':
msg = bytes(str(dots), encoding='UTF-8')
clientsocket.sendall(msg)
print('sent dots')
else:
user = eval(msg.decode('UTF-8'))
all_users[user['name']] = user
eaten_dots_ids = upd_eaten(dots, all_users)
resp = {'user': all_users[user['name']], 'eaten_dots_ids': eaten_dots_ids}
msg = bytes(str(resp), encoding='UTF-8')
clientsocket.send(msg)
clientsocket.close()
host = 'localhost'
port = 34325
SERVER_ADDRESS = (host, port)
os.system('lsof -ti:' + str(port))
s = socket.socket()
s.bind(SERVER_ADDRESS)
s.listen(10)
print('Server started!')
print('Waiting for clients...')
try:
while True:
c, addr = s.accept()
print ('Got connection from', addr)
thread = Thread(target=on_new_client, args=(c, addr))
thread.start()
except KeyboardInterrupt:
s.close()
print('server closed')
|
utils.py
|
import re
import os
import time
import threading
import ssl
import json
try:
import urllib.request as urllib_request # for Python 3
except ImportError:
import urllib2 as urllib_request # for Python 2 and Jython
try:
from urllib.parse import urlparse # for Python 3
except ImportError:
from urlparse import urlparse # for Python 2 and Jython
try:
from BaseHTTPServer import BaseHTTPRequestHandler
except ImportError:
from http.server import BaseHTTPRequestHandler
from io import BytesIO
def string_join(*ss):
"""
String joins with arbitrary lengthy parameters
:param ss: strings to be joined
:return: strings joined
"""
return "".join(ss)
def mkdir_p(path):
"""
Create Directory if it does not exist, exit otherwise
:param path:
:return:
"""
try:
os.makedirs(path)
except:
if os.path.isdir(path):
pass
else:
raise
def wrap_open(method, exceptions = (OSError, IOError)):
"""Wrap Open method in order to create containing directories if they does not exist"""
def fn(*args, **kwargs):
try:
mkdir_p(os.path.dirname(args[0]))
return method(*args, **kwargs)
except exceptions:
sys.exit('Can\'t open \'{0}\'. Error #{1[0]}: {1[1]}'.format(args[0], sys.exc_info()[1].args))
return fn
open = wrap_open(open)
def inherits_popup_menu(element):
"""
Inherits popup menu on each and every child widgets.
:param element: current widget.
:return: None
"""
element.setInheritsPopupMenu(True)
try:
for e in element.getComponents():
inherits_popup_menu(e)
except:
pass
class AttrDict(dict):
"""
HACK: this class will generate a class object with fields from a dict
"""
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def override_headers(http_header, overrideheaders):
"""
Overrides headers with the defined overrides.
:param http_header: an HTTP header content
:param overrideheaders: an overrideheaders object.
:return: a new overridden headers string
"""
ree = [(
re.compile("^%s\s*:\s*[^\n]+$" % re.escape(header), re.MULTILINE),
"%s: %s" % (header, val))
for (header, val) in overrideheaders]
h = http_header
for find, replace in ree:
hn = re.sub(find, replace, h)
if hn == h:
h = "%s\n%s" % (hn, str(replace))
else:
h = hn
return h
def nop_evt(evt):
"""
Do nothing on events
:param evt: ignored
:return: None
"""
pass
def nop():
"""
Do nothing
:return: None
"""
pass
stop_watch = False
def stop():
global stop_watch
stop_watch = True
def watch(execute=nop, interval=60):
global stop_watch
def async_run():
try:
while not stop_watch:
execute()
time.sleep(interval)
sys.stdout.flush()
sys.stderr.flush()
finally:
sys.stdout.flush()
sys.stderr.flush()
t = threading.Thread(target=async_run)
t.start()
def run_async(execute=nop):
def async_run():
try:
execute()
finally:
sys.stdout.flush()
sys.stderr.flush()
threading.Thread(target=async_run).start()
def run_timeout(execute, timeout):
def async_run():
try:
execute()
finally:
sys.stdout.flush()
sys.stderr.flush()
t = threading.Thread(target=async_run)
t.daemon = True
t.start()
t.join(timeout=timeout)
def make_http_handler(http_mutator=None):
class GraphQLRequestHandler(BaseHTTPRequestHandler):
def graphiql_page(self, address, extrascript=""):
"""
Return a graphiql console page given a domain
:param listenin_on: address on which the graphiql server proxy is listening on
:param domain: input domain on which to perform queries
:return: a string representing the graphiql page
"""
return """<html>
<head>
<title>InQL GraphiQL Console</title>
<link href="https://unpkg.com/graphiql/graphiql.min.css" rel="stylesheet" />
</head>
<body style="margin: 0;">
<div id="graphiql" style="height: 100vh;"></div>
<script
crossorigin
src="https://unpkg.com/react/umd/react.production.min.js"
></script>
<script
crossorigin
src="https://unpkg.com/react-dom/umd/react-dom.production.min.js"
></script>
<script
crossorigin
src="https://unpkg.com/graphiql/graphiql.min.js"
></script>
<script>
/**
* This GraphiQL example illustrates how to use some of GraphiQL's props
* in order to enable reading and updating the URL parameters, making
* link sharing of queries a little bit easier.
*
* This is only one example of this kind of feature, GraphiQL exposes
* various React params to enable interesting integrations.
*/
// Parse the search string to get url parameters.
var address = "%s";
var search = window.location.search;
var parameters = {};
search.substr(1).split('&').forEach(function (entry) {
var eq = entry.indexOf('=');
if (eq >= 0) {
parameters[decodeURIComponent(entry.slice(0, eq))] =
decodeURIComponent(entry.slice(eq + 1));
}
});
// if variables was provided, try to format it.
if (parameters.variables) {
try {
parameters.variables =
JSON.stringify(JSON.parse(parameters.variables), null, 2);
} catch (e) {
// Do nothing, we want to display the invalid JSON as a string, rather
// than present an error.
}
}
// When the query and variables string is edited, update the URL bar so
// that it can be easily shared
function onEditQuery(newQuery) {
parameters.query = newQuery;
updateURL();
}
function onEditVariables(newVariables) {
parameters.variables = newVariables;
updateURL();
}
function onEditOperationName(newOperationName) {
parameters.operationName = newOperationName;
updateURL();
}
function updateURL() {
var newSearch = '?' + Object.keys(parameters).filter(function (key) {
return Boolean(parameters[key]);
}).map(function (key) {
return encodeURIComponent(key) + '=' +
encodeURIComponent(parameters[key]);
}).join('&');
history.replaceState(null, null, newSearch);
}
const graphQLFetcher = graphQLParams =>
fetch(address, {
method: 'post',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(graphQLParams),
})
.then(response => response.json())
.catch(() => response.text());
ReactDOM.render(
React.createElement(GraphiQL, {
fetcher: graphQLFetcher,
query: parameters.query,
variables: parameters.variables,
operationName: parameters.operationName,
onEditQuery: onEditQuery,
onEditVariables: onEditVariables,
onEditOperationName: onEditOperationName
}),
document.getElementById('graphiql'),
);
while (document.querySelector('.title') == null) {
// wait for the title to be something
}
document.querySelector('.title').innerHTML = '<a href="https://github.com/doyensec/inql"><img src="https://github.com/doyensec/inql/blob/master/docs/inql.png?raw=true" style="display: block; height:6em; z-index: 10; position: relative"></img></a>';
%s
</script>
</body>
</html>""" % (address, extrascript)
# Handler for the GET requests
def do_GET(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
# Send the html message
if http_mutator:
page = self.graphiql_page(self.path, extrascript="""(function() {
var toolbar = document.querySelector('.toolbar');
var sendToRepeater = document.createElement('button');
sendToRepeater.classList.add('toolbar-button');
sendToRepeater.innerHTML = 'Send To Repeater';
sendToRepeater.title = 'Send To Repeater';
sendToRepeater.setAttribute('aria-invalid', true);
sendToRepeater.onclick = function() {
var xhr = new XMLHttpRequest();
xhr.open("PUT", address, true);
xhr.setRequestHeader('Content-Type', 'application/json');
var params = JSON.parse(JSON.stringify(parameters));
try {
params['variables'] = JSON.parse(params['variables']);
} catch (e) {
console.log('Cannot parse parameters');
}
xhr.send(JSON.stringify(params));
}
toolbar.appendChild(sendToRepeater);
} ());""")
else:
page = self.graphiql_page(self.path)
self.wfile.write(page.encode())
return
def do_POST(self):
try:
content_len = int(self.headers.getheader('content-length', 0))
except AttributeError: # python3 has not the getheader type, use get instead
content_len = int(self.headers.get('Content-Length'))
host = None
body = None
try:
idx = self.path.find('?')
if idx != -1:
endpoint = self.path[1:idx]
else:
endpoint = self.path[1:]
url = urlparse(endpoint)
if url.scheme == "https" and url.port == 443 or url.scheme == "http" and url.port == 80:
host = url.hostname
else:
host = url.netloc
self.headers['Host'] = host
body = self.rfile.read(content_len)
if not http_mutator:
request = urllib_request.Request(endpoint, body, headers=self.headers)
else:
request = http_mutator.build_python_request(endpoint, host, body)
contents = urlopen(request, verify=not ('http_proxy' in os.environ or 'https_proxy' in os.environ)).read()
jres = json.loads(contents)
if 'errors' in jres and len(jres['errors']) > 0 and "IntrospectionQuery" in body:
raise Exception("IntrospectionQuery request contains errors")
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(contents)
except Exception as ex:
if host and http_mutator and http_mutator.get_stub_response(host) and "IntrospectionQuery" in body:
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(http_mutator.get_stub_response(host))
return
print(ex)
self.send_response(400)
self.send_header('Content-type', 'application/json')
self.end_headers()
try:
# Try to get the 400 page error content since it is used by the GraphiQL Console
self.wfile.write(ex.read())
except:
pass
return
def do_PUT(self):
try:
content_len = int(self.headers.getheader('content-length', 0))
except AttributeError: # python3 has not the getheader type, use get instead
content_len = int(self.headers.get('Content-Length'))
if http_mutator:
body = self.rfile.read(content_len)
url = urlparse(self.path[1:])
if url.scheme == "https" and url.port == 443 or url.scheme == "http" and url.port == 80:
host = url.hostname
else:
host = url.netloc
http_mutator.send_to_repeater(host, body)
else:
print(self.path)
print(self.rfile.read(content_len))
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
return
return GraphQLRequestHandler
class HTTPRequest(BaseHTTPRequestHandler):
def __init__(self, request_text):
self.rfile = BytesIO(request_text)
self.raw_requestline = self.rfile.readline()
self.error_code = self.error_message = None
self.parse_request()
def send_error(self, code, message):
self.error_code = code
self.error_message = message
try:
import sys
from javax.net.ssl import TrustManager, X509TrustManager
from jarray import array
from javax.net.ssl import SSLContext
class TrustAllX509TrustManager(X509TrustManager):
# Define a custom TrustManager which will blindly
# accept all certificates
def checkClientTrusted(self, chain, auth):
pass
def checkServerTrusted(self, chain, auth):
pass
def getAcceptedIssuers(self):
return None
# Create a static reference to an SSLContext which will use
# our custom TrustManager
trust_managers = array([TrustAllX509TrustManager()], TrustManager)
TRUST_ALL_CONTEXT = SSLContext.getInstance("SSL")
TRUST_ALL_CONTEXT.init(None, trust_managers, None)
# Keep a static reference to the JVM's default SSLContext for restoring
# at a later time
DEFAULT_CONTEXT = SSLContext.getDefault()
if 'create_default_context' not in dir(ssl):
SSLContext.setDefault(TRUST_ALL_CONTEXT)
except:
pass
def urlopen(request, verify):
ctx = None
if 'create_default_context' in dir(ssl):
ctx = ssl.create_default_context()
elif 'SSLContext' in dir(ssl) and 'PROTOCOL_TLSv1' in dir(ssl):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
if not verify and ctx:
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
return urllib_request.urlopen(request, context=ctx)
else:
return urllib_request.urlopen(request)
def _recursive_name_get(obj):
try:
return obj['name'] or _recursive_name_get(obj['ofType'])
except KeyError:
return False
def _recursive_kind_of(obj, target):
try:
return obj['kind'] == target or _recursive_kind_of(obj['ofType'], target)
except KeyError:
return False
except TypeError:
return False
def is_query(body):
# FIXME: handle urlencoded requests too in the future
try:
content = json.loads(body)
if not isinstance(content, list):
content = [content]
ret = all(['query' in c or 'operationName' in c
for c in content])
return ret
except:
return False
def simplify_introspection(data):
"""
Generates a simplified introspection object based on an introspection query.
This utility function is after used by many of the generators.
# Parsing JSON response/file structure as follows
# data
# __schema
# directives
# mutationType
# queryType
# subscriptionType
# types (kind, name, description)
# name (RootQuery, RootMutation, Subscriptions, [custom] OBJECT)
# fields
# name (query names)
# args
# name (args names)
# type
# name (args types)
:type data: an introspection query dict
"""
output = {}
output['schema'] = {}
schema = data['data']['__schema']
# Get the Root query type
if schema['queryType'] and 'name' in schema['queryType']:
output['schema']['query'] = {
"type": schema['queryType']['name'],
"array": False,
"required": False
}
# Get the Root subscription type
if schema['subscriptionType'] and 'name' in schema['subscriptionType']:
output['schema']['subscription'] = {
"type": schema['subscriptionType']['name'],
"array": False,
"required": False
}
# Get the Root mutation type
if schema['mutationType'] and 'name' in schema['mutationType']:
output['schema']['mutation'] = {
"type": schema['mutationType']['name'],
"array": False,
"required": False
}
# Go over all the fields and simplify the JSON
output['type'] = {}
for type in schema['types']:
if type['name'][0:2] == '__': continue
if type['kind'] == 'OBJECT':
output['type'][type['name']] = {}
if type['fields']:
for field in type['fields']:
output['type'][type['name']][field['name']] = {
"type": _recursive_name_get(field['type']),
"required": field['type']['kind'] == 'NON_NULL',
"array": _recursive_kind_of(field['type'], 'LIST'),
}
if field['args']:
output['type'][type['name']][field['name']]["args"] = {}
for arg in field['args']:
output['type'][type['name']][field['name']]['args'][arg['name']] = {
"type": _recursive_name_get(arg['type']),
"required": arg['type']['kind'] == 'NON_NULL',
"array": _recursive_kind_of(arg['type'], 'LIST'),
}
if arg['defaultValue'] != None:
output['type'][type['name']][field['name']]['args'][arg['name']]['default'] = arg[
'defaultValue']
if type['interfaces']:
output['type'][type['name']]['__implements'] = {}
for iface in type['interfaces']:
output['type'][type['name']]['__implements'][iface['name']] = {}
if 'type' not in output['type'][type['name']] and 'args' in output['type'][type['name']]:
output['type'][type['name']]["type"] = output['type'][type['name']]["args"]["type"]
# Get all the Enums
output['enum'] = {}
for type in schema['types']:
if type['name'][0:2] == '__': continue
if type['kind'] == 'ENUM':
output['enum'][type['name']] = {}
for v in type['enumValues']:
output['enum'][type['name']][v['name']] = {}
# Get all the Scalars
output['scalar'] = {}
for type in schema['types']:
if type['name'][0:2] == '__': continue
if type['kind'] == 'SCALAR' and type['name'] not in ['String', 'Int', 'Float', 'Boolean', 'ID']:
output['scalar'][type['name']] = {}
# Get all the inputs
output['input'] = {}
for type in schema['types']:
if type['name'][0:2] == '__': continue
if type['kind'] == 'INPUT_OBJECT':
output['input'][type['name']] = {}
if type['inputFields']:
for field in type['inputFields']:
output['input'][type['name']][field['name']] = {
"type": _recursive_name_get(field['type']),
"required": field['type']['kind'] == 'NON_NULL',
"array": _recursive_kind_of(field['type'], 'LIST'),
}
# Get all the unions
output['union'] = {}
for type in schema['types']:
if type['name'][0:2] == '__': continue
if type['kind'] == 'UNION':
output['union'][type['name']] = {}
for v in type['possibleTypes']:
output['union'][type['name']][v['name']] = {}
# Get all the interfaces
output['interface'] = {}
for type in schema['types']:
if type['name'][0:2] == '__': continue
if type['kind'] == 'INTERFACE':
output['interface'][type['name']] = {}
if type['fields']:
for field in type['fields']:
output['interface'][type['name']][field['name']] = {
"type": _recursive_name_get(field['type']),
"required": field['type']['kind'] == 'NON_NULL',
"array": _recursive_kind_of(field['type'], 'LIST'),
}
if field['args']:
output['interface'][type['name']][field['name']]["args"] = {}
for arg in field['args']:
output['interface'][type['name']][field['name']]['args'][arg['name']] = {
"type": _recursive_name_get(arg['type']),
"required": arg['type']['kind'] == 'NON_NULL',
"array": _recursive_kind_of(arg['type'], 'LIST'),
}
if arg['defaultValue'] != None:
output['interface'][type['name']][field['name']]['args'][arg['name']]['default'] = arg[
'defaultValue']
if 'type' not in output['interface'][type['name']] and 'args' in output['interface'][type['name']]:
output['interface'][type['name']]["type"] = output['interface'][type['name']]["args"]["type"]
return output
def raw_request(request):
"""
At this point it is completely built and ready
to be fired; it is "prepared".
However pay attention at the formatting used in
this function because it is programmed to be pretty
printed and may differ from the actual request.
"""
headers = request.headers.copy()
if 'Connection' not in headers:
headers['Connection'] = 'close'
if 'User-Agent' not in headers:
headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:55.0) Gecko/20100101 Firefox/55.0'
if 'Accept-Encoding' not in headers:
headers['Accept-Encoding'] = 'gzip, deflate'
url = urlparse(request.get_full_url())
headers['Host'] = url.netloc
path = url.path if len(url.path) else '/'
return '{}\r\n{}\r\n\r\n{}'.format(
request.get_method() + ' ' + path + ' HTTP/1.1',
'\r\n'.join('{}: {}'.format(k, v) for k, v in headers.items()),
request.data if request.data else '',
)
|
run_ex1.py
|
import os, sys, time
import threading, urllib.request, urllib.error, urllib.parse
from pyraft import raft
def url_checker(node):
while not node.shutdown_flag:
time.sleep(5)
if node.state != 'l':
continue
for k, v in node.data.items():
if not k.startswith('url_'):
continue
try:
url = v
if not v.startswith('http'):
url = 'https://' + v
result = urllib.request.urlopen(url).read()
print('url %s is ok' % k)
except Exception as e:
print('url %s is bad - %s' % (k, e))
def url_check_start(node):
print('url check start...')
if not hasattr(node, 'checker'):
node.checker = threading.Thread(target=url_checker, args=(node,))
node.checker.start()
node = raft.make_default_node()
node.worker.handler['on_leader'] = url_check_start
node.start()
node.join()
|
Loader.py
|
# import os
# import sys
# from threading import Thread
# from queue import Queue
#
# import cv2
# import scipy.misc
# import numpy as np
#
# from CalibrateTransfer.img_operation import ScreenSHot
# from CalibrateTransfer.data_preprocess import write_data_to_json_file,read_data_from_json_file,make_dir,read_subdata,read_stack_data
# from CalibrateTransfer.cv_transfer import transform_2d_to_3d,object_To_pixel,updata_img_point
# from CalibrateTransfer.img_operation import GenerateRect
#
# import torch
# import torch.multiprocessing as mp
#
# from FairMot.track import Short_track_eval
#
# class LoadShortCutVideo: # for short tracking
# def __init__(self,video, video_time, rect, Output_size, img_size=(1088, 608), multiple = 2):
#
# self.cap = video
# self.cap.set(cv2.CAP_PROP_POS_MSEC,round(1000*video_time)) # 将视频设置到动作发生的时间
# self.current_frame_index = self.cap.get(cv2.CAP_PROP_POS_FRAMES) # 动作发生的时间对应的帧
# self.multiple = multiple # 获取 2 * multiple倍 的视频帧率长度的图片
# self.frame_rate = int(round(self.cap.get(cv2.CAP_PROP_FPS))) # 计算帧率
# self.cap.set(cv2.CAP_PROP_POS_FRAMES, self.current_frame_index - multiple*self.frame_rate) # 将视频向前调整 multiple 秒
#
# self.width, self.height = img_size[0] , img_size[1] # 网络输入的Feature Map的大小
# [self.vw, self.vh] = Output_size # 输入图片的大小
# [self.w, self.h] = Output_size # 可视化的图片的大小
#
# self.rect = rect # 对应的目标区域 [x_l,y_l,x_r,y_r]
# self.count = 0
# self.vn = 2 *multiple * self.frame_rate + 1
# print('Lenth of the video: {:d} frames'.format(self.vn))
#
# def get_size(self, vw, vh, dw, dh):
# wa, ha = float(dw) / vw, float(dh) / vh
# a = min(wa, ha)
# return int(vw * a), int(vh * a)
#
# def __iter__(self):
# self.count = -1
# return self
#
# def __next__(self):
# # Read image
# res, img0 = self.cap.read() # BGR
# assert img0 is not None, 'Failed to load frame {:d}'.format(self.count)
# # 裁剪图片
# img0 = img0[self.rect[1]:self.rect[3],self.rect[0]:self.rect[2]]
# # Normalize RGB
# img = img0[:, :, ::-1].transpose(2, 0, 1)
# img = np.ascontiguousarray(img, dtype=np.float32)
# img /= 255.0
# # cv2.imwrite(img_path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
# self.count += 1
# if self.count == len(self):
# raise StopIteration
#
# return self.count, img, img0
#
# def __len__(self):
# return self.vn # number of files
#
# class ImgSequenceLoader:
# def __init__(self, opt, dataloder, queueSize=1000, sp=False):
#
# self.dir_name = opt.dir_name
# self.root_path = os.path.join(opt.data_root, '{}'.format(opt.dir_name))
# # logger.info('目标文件夹是{}'.format(root_path))
#
# self.file_name = opt.file_name
# # 本来就是要载入两次视频,分开读亦可以
# self.Videoparameters, \
# self.setting_parameter, \
# self.action_datas, \
# self.channel_list, \
# self.parameter = read_data_from_json_file(self.root_path, self.file_name, opt)
#
# self.stopped = False
# self.datalen = len(self.action_datas)
#
#
# # initialize the queue used to store frames read from
# # the video file
# self.sp = sp
# if sp:
# self.Q = Queue(maxsize=queueSize)
# else:
# self.Q = mp.Queue(maxsize=queueSize)
#
# def start(self):
# # start a thread to read frames from the file video stream
# if self.sp:
# t = Thread(target=self.update, args=())
# t.daemon = True
# t.start()
# else:
# p = mp.Process(target=self.update, args=())
# p.daemon = True
# p.start()
# return self
#
# def update(self):
# # keep looping the whole dataset
#
#
# for index in range(self.datalen):
#
# # result_root = make_dir(self.root_path, index, Secondary_directory='{}_short_tracking'.format(self.dir_name))
#
# '''read each item from subdata of action datas according to the index '''
# channel, action_time, img_point, video_parameter = read_subdata(self.action_datas[index], self.Videoparameters)
#
# video = video_parameter['video']
# # action time need to add the delta time to calibrate the time between channels .
# video_time = action_time + video_parameter['delta_t']
# width = video.get(cv2.CAP_PROP_FRAME_WIDTH)
# height = video.get(cv2.CAP_PROP_FRAME_HEIGHT)
# Message = GenerateRect(img_point, self.setting_parameter['Output_size'], self.setting_parameter['bias'], width,
# height)
#
# if Message[0] == True:
# # 获取目标区域
# rect = Message[1]
# x_l = int(rect[0])
# y_l = int(rect[1])
# x_r = int(rect[2] + rect[0])
# y_r = int(rect[3] + rect[1])
# rect = [x_l, y_l, x_r, y_r]
# # 目标点坐标相对于从原图中的位置,更新到相对于截图中的位置
# reference_point = (int(img_point[0] - x_l), int(img_point[1] - y_l))
# # sub_img = img[y_l:y_r, x_l:x_r]
# else:
# # 如果没有截图则,则无需放入Queue中。
# self.Q.put(None,None,None)
# continue
#
# # logger.info('Starting tracking...')
# dataloader = LoadShortCutVideo(video, video_time, rect, self.setting_parameter['Output_size'])
# target_frame = dataloader.multiple * dataloader.frame_rate
#
# # result_filename = os.path.join(result_root, '..', '{}.txt'.format(index))
# frame_rate = dataloader.frame_rate
#
# img, orig_img, im_name, im_dim_list = self.dataloder.getitem(
# self.Q.put((orig_img[k], im_name[k], boxes_k, scores[dets[: ,0 ]= =k], inps, pt1, pt2))
#
# def read(self):
# # return next frame in the queue
# return self.Q.get()
#
# def len(self):
# # return queue len
# return self.Q.qsize()
|
gui.py
|
'''File containing the class controlling the GUI'''
import Tkinter as tk
from threading import Thread
from Queue import Queue
from time import sleep
'''
GUI object that uses Tkinter to display information from the robot\'s sensors.
Additionally, the GUI acts as the main thread and the wrapper to connect the other objects.
'''
class GUI(object):
'''
Initialises GUI.
Takes canvas_size (tuple containing width and height) as the first argument.
Takes the robot_handler as the second argument.
'''
def __init__(self, robot_handler, grid):
# Initialise variables
self.root = tk.Tk()
self.canvas_width, self.canvas_height = grid.canvas_width, grid.canvas_height
self.canvas = tk.Canvas(self.root, width = self.canvas_width, height = self.canvas_height)
self.graph = grid
self.robot = robot_handler
self.nodes = {}
self.current_location_marker = None
# Initialise buttons
self.frame = tk.Frame(self.root)
self.map_btn = tk.Button(self.frame, text = 'Map')
self.map_btn.bind('<Button-1>', self.map)
self.localise_btn = tk.Button(self.frame, text = 'Localise')
self.localise_btn.bind('<Button-1>', self.localise)
self.return_btn = tk.Button(self.frame, text = 'Return')
self.return_btn.bind('<Button-1>', self.return_to_start)
self.stop_btn = tk.Button(self.frame, text = 'Stop')
self.stop_btn.bind('<Button-1>', self.stop)
self.map_btn.pack(side = 'left')
self.localise_btn.pack(side = 'left')
self.return_btn.pack(side = 'left')
self.stop_btn.pack(side = 'left')
# Initialise threads
self.main_thread = Thread(target = self.main)
self.main_thread.daemon = True
self.mapping_thread = None
self.localising_thread = None
self.returning_thread = None
self.display_thread = Thread(target = self.update_graph)
self.display_thread.daemon = True
'''Starts the Tkinter process.'''
def start(self):
self.canvas.pack()
self.frame.pack()
self.main_thread.start()
self.root.mainloop()
'''Main process.'''
def main(self):
self.graph.set_grid_rows(5)
self.graph.set_grid_cols(5)
self.graph.set_start((self.graph.grid_rows / 2, self.graph.grid_columns / 2))
self.graph.make_grid()
self.graph.compute_node_locations()
self.display_graph()
self.display_thread.start()
'''Start mapping the surrounding environment.'''
def map(self, event = None):
if not self.graph.mapping:
self.graph.mapping = True
self.mapping_thread = Thread(target = self.graph.map, args=(self.robot,))
self.mapping_thread.daemon = True
self.mapping_thread.start()
'''Start localisation.'''
def localise(self, event = None):
if not self.graph.localising:
self.graph.localising = True
self.localising_thread = Thread(target = self.graph.localise, args=(self.robot,))
self.localising_thread.daemon = True
self.localising_thread.start()
'''Start returning to start position.'''
def return_to_start(self, event = None):
if not self.graph.returning:
self.graph.returning = True
self.returning_thread = Thread(target = self.graph.return_to_start, args=(self.robot,))
self.returning_thread.daemon = True
self.returning_thread.start()
'''Stop every single process.'''
def stop(self, event = None):
try:
self.graph.mapping = False
self.mapping_thread.join()
self.graph.localising = False
self.localising_thread.join()
self.graph.returning = False
self.returning_thread.join()
except RuntimeError:
pass
except AttributeError:
pass
'''Update the graph with the current obstacle list.'''
def update_graph(self):
while True:
for node in self.graph.nodes:
if node == self.graph.start_node:
self.canvas.itemconfig(self.nodes[node], fill = '#f00')
elif node in self.graph.obs_list:
self.canvas.itemconfig(self.nodes[node], fill = '#0f0')
else:
self.canvas.itemconfig(self.nodes[node], fill = '#00f')
if node == self.graph.current_location:
self.canvas.coords(self.current_location_marker, self.graph.node_display_locations[node][0], self.graph.node_display_locations[node][1])
self.canvas.itemconfig(self.current_location_marker, text = {
'up': '^',
'right': '>',
'left': '<',
'down': 'v'
}[self.graph.current_direction])
sleep(0.1)
'''Display the graph on the Tkinter canvas.'''
def display_graph(self):
for node in self.graph.nodes:
for neighbour in self.graph.nodes[node]:
self.canvas.create_line(self.graph.node_display_locations[node][0], self.graph.node_display_locations[node][1], self.graph.node_display_locations[neighbour][0], self.graph.node_display_locations[neighbour][1], width = 2)
for node in self.graph.nodes:
x_top_left = int(self.graph.node_display_locations[node][0] - 0.5 * self.graph.column_width)
y_top_left = int(self.graph.node_display_locations[node][1] - 0.5 * self.graph.row_height)
x_bottom_right = int(self.graph.node_display_locations[node][0] + 0.5 * self.graph.column_width)
y_bottom_right = int(self.graph.node_display_locations[node][1] + 0.5 * self.graph.row_height)
if node == self.graph.start_node:
self.nodes[node] = self.canvas.create_oval(x_top_left, y_top_left, x_bottom_right, y_bottom_right, outline = '#000', fill = '#f00', width = 2)
elif node in self.graph.obs_list:
self.nodes[node] = self.canvas.create_oval(x_top_left, y_top_left, x_bottom_right, y_bottom_right, outline = '#000', fill = '#0f0', width = 2)
else:
self.nodes[node] = self.canvas.create_oval(x_top_left, y_top_left, x_bottom_right, y_bottom_right, outline = '#000', fill = '#00f', width = 2)
self.current_location_marker = self.canvas.create_text(self.graph.node_display_locations[self.graph.start_node][0], self.graph.node_display_locations[self.graph.start_node][1], text = '^', fill = '#fff', font = ('Arial', 30))
'''Highlight the graph based on the path.'''
def highlight_path(self, path):
for node in path:
if not node == self.graph.start_node and not node == self.graph.goal_node:
self.canvas.itemconfig(self.nodes[node], fill = '#ff0')
|
bulk_decompile.py
|
'''
A helper script that takes all the binary code from cache_code,
and decompiles all of it, saving results into cache_pan
It uses multi-processing.
This is useful with testing new changes to the decompiler. Run the decompilation
on a few hundred contracts and look for any serious bugs / weird results.
I also use it to decompile all the contracts with a new Eveem release.
Just fetch all the bytecodes into cache_code (e.g. from BigQuery), and then run
it through all of them. The strongest AWS instance can handle ~100 processes,
and after ~24h it should have all the bytecodes decompiled.
It would be rather easy to optimise this with some kind of a database and
bytecode deduplication, but it would make the code more complex and dependency-ridden.
'''
import json
from subprocess import call
from queue import Queue
import sys
import threading
import time
import logging
import os
from various import addr_list, random_addresses
logging.basicConfig(level=logging.DEBUG,
format='(%(threadName)-9s) %(message)s',)
stuff = []
path = 'cache_code/'
'''
uncomment to decompile all contracts in cache_code
for dname in os.listdir(path):
if not os.path.isdir(path+dname):
continue
for fname in os.listdir(path+dname):
addr = fname[:-4]
full_fname = path+dname+'/'+fname
if os.stat(full_fname).st_size > 0:
stuff.append(addr)
'''
stuff = random_addresses # or addr_list for more complex examples
print('binaries found:', len(stuff))
if len(sys.argv)<3:
print("bulk_decompile start_loc end_loc num_threads [--force]")
exit()
def queued(q):
while True:
addr = q.get()
if addr == 'die':
logging.debug('end of queue')
break
logging.debug('addr: %s' % addr)
call(['python3.8','panoramix.py', addr])#, '--upload'])
stuff = sorted(stuff)
if __name__ == '__main__':
queue = Queue()
threads = []
for i in range(int(sys.argv[3])):
t = threading.Thread(target=queued, name='thread_'+str(i), args=[queue])
t.start()
threads.append(t)
mini_queue = []
for addr in stuff[int(sys.argv[1]):int(sys.argv[2])]:
if '--force' not in sys.argv and os.path.isfile('cache_pan/'+addr[:5]+'/'+addr+'.pan'):
print('skipping '+addr)
continue
mini_queue.append(addr)
if len(mini_queue) > 10:
queue.put(','.join(mini_queue))
mini_queue = []
queue.put(','.join(mini_queue))
for i in range(int(sys.argv[3])):
queue.put('die')
print('waiting for threads..')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.